1 /**************************************************************************
3 * Copyright 2013-2014 RAD Game Tools and Valve Software
4 * Copyright 2010-2014 Rich Geldreich and Tenacious Software LLC
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 **************************************************************************/
27 // File: vogl_ktx_texture.cpp
28 #include "vogl_core.h"
29 #include "vogl_ktx_texture.h"
30 #include "vogl_console.h"
31 #include "vogl_strutils.h"
33 // Set #if VOGL_KTX_PVRTEX_WORKAROUNDS to 1 to enable various workarounds for oddball KTX files written by PVRTexTool.
34 #define VOGL_KTX_PVRTEX_WORKAROUNDS 1
38 const uint8 s_ktx_file_id[12] = { 0xAB, 0x4B, 0x54, 0x58, 0x20, 0x31, 0x31, 0xBB, 0x0D, 0x0A, 0x1A, 0x0A };
40 // true if the specified internal pixel format is compressed
41 bool ktx_is_compressed_ogl_fmt(uint32 ogl_fmt)
45 case KTX_COMPRESSED_RED_RGTC1:
46 case KTX_COMPRESSED_SIGNED_RED_RGTC1_EXT:
47 case KTX_COMPRESSED_LUMINANCE_LATC1_EXT:
48 case KTX_COMPRESSED_SIGNED_LUMINANCE_LATC1_EXT:
49 case KTX_ETC1_RGB8_OES:
52 case KTX_COMPRESSED_RGB_S3TC_DXT1_EXT:
53 case KTX_COMPRESSED_RGBA_S3TC_DXT1_EXT:
54 case KTX_COMPRESSED_SRGB_S3TC_DXT1_EXT:
55 case KTX_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT:
56 case KTX_COMPRESSED_R11_EAC:
57 case KTX_COMPRESSED_SIGNED_R11_EAC:
58 case KTX_COMPRESSED_RGB8_ETC2:
59 case KTX_COMPRESSED_SRGB8_ETC2:
60 case KTX_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2:
61 case KTX_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2:
62 case KTX_COMPRESSED_LUMINANCE_ALPHA_LATC2_EXT:
63 case KTX_COMPRESSED_SIGNED_LUMINANCE_ALPHA_LATC2_EXT:
64 case KTX_COMPRESSED_RED_GREEN_RGTC2_EXT:
65 case KTX_COMPRESSED_SIGNED_RED_GREEN_RGTC2_EXT:
68 case KTX_COMPRESSED_RGBA_S3TC_DXT3_EXT:
69 case KTX_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT:
70 case KTX_COMPRESSED_RGBA_S3TC_DXT5_EXT:
71 case KTX_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT:
72 case KTX_RGBA_DXT5_S3TC:
73 case KTX_RGBA4_DXT5_S3TC:
74 case KTX_COMPRESSED_RGBA8_ETC2_EAC:
75 case KTX_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC:
76 case KTX_COMPRESSED_RG11_EAC:
77 case KTX_COMPRESSED_SIGNED_RG11_EAC:
78 case KTX_COMPRESSED_RGBA_BPTC_UNORM_ARB:
79 case KTX_COMPRESSED_SRGB_ALPHA_BPTC_UNORM_ARB:
80 case KTX_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_ARB:
81 case KTX_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_ARB:
82 VOGL_ASSERT(ktx_get_ogl_compressed_base_internal_fmt(ogl_fmt) != 0);
85 VOGL_ASSERT(ktx_get_ogl_compressed_base_internal_fmt(ogl_fmt) == 0);
90 bool ktx_is_packed_pixel_ogl_type(uint32 ogl_type)
94 case KTX_UNSIGNED_BYTE_3_3_2:
95 case KTX_UNSIGNED_BYTE_2_3_3_REV:
96 case KTX_UNSIGNED_SHORT_5_6_5:
97 case KTX_UNSIGNED_SHORT_5_6_5_REV:
98 case KTX_UNSIGNED_SHORT_4_4_4_4:
99 case KTX_UNSIGNED_SHORT_4_4_4_4_REV:
100 case KTX_UNSIGNED_SHORT_5_5_5_1:
101 case KTX_UNSIGNED_SHORT_1_5_5_5_REV:
102 case KTX_UNSIGNED_INT_8_8_8_8:
103 case KTX_UNSIGNED_INT_8_8_8_8_REV:
104 case KTX_UNSIGNED_INT_10_10_10_2:
105 case KTX_UNSIGNED_INT_2_10_10_10_REV:
106 case KTX_UNSIGNED_INT_24_8:
107 case KTX_UNSIGNED_INT_10F_11F_11F_REV:
108 case KTX_UNSIGNED_INT_5_9_9_9_REV:
109 case KTX_FLOAT_32_UNSIGNED_INT_24_8_REV:
115 uint ktx_get_ogl_type_size(uint32 ogl_type)
119 case KTX_UNSIGNED_BYTE:
123 case KTX_UNSIGNED_SHORT:
127 case KTX_UNSIGNED_INT:
130 case KTX_UNSIGNED_BYTE_3_3_2:
131 case KTX_UNSIGNED_BYTE_2_3_3_REV:
133 case KTX_UNSIGNED_SHORT_5_6_5:
134 case KTX_UNSIGNED_SHORT_5_6_5_REV:
135 case KTX_UNSIGNED_SHORT_4_4_4_4:
136 case KTX_UNSIGNED_SHORT_4_4_4_4_REV:
137 case KTX_UNSIGNED_SHORT_5_5_5_1:
138 case KTX_UNSIGNED_SHORT_1_5_5_5_REV:
140 case KTX_UNSIGNED_INT_8_8_8_8:
141 case KTX_UNSIGNED_INT_8_8_8_8_REV:
142 case KTX_UNSIGNED_INT_10_10_10_2:
143 case KTX_UNSIGNED_INT_2_10_10_10_REV:
144 case KTX_UNSIGNED_INT_24_8:
145 case KTX_UNSIGNED_INT_10F_11F_11F_REV:
146 case KTX_UNSIGNED_INT_5_9_9_9_REV:
147 case KTX_FLOAT_32_UNSIGNED_INT_24_8_REV: // this was 8, probably a mistake
153 uint32 ktx_get_ogl_compressed_base_internal_fmt(uint32 ogl_fmt)
157 case KTX_COMPRESSED_LUMINANCE_ALPHA_LATC2_EXT:
158 case KTX_COMPRESSED_SIGNED_LUMINANCE_ALPHA_LATC2_EXT:
159 // Should this be RG? I dunno.
160 return KTX_LUMINANCE_ALPHA;
162 case KTX_COMPRESSED_LUMINANCE_LATC1_EXT:
163 case KTX_COMPRESSED_SIGNED_LUMINANCE_LATC1_EXT:
164 case KTX_COMPRESSED_R11_EAC:
165 case KTX_COMPRESSED_SIGNED_R11_EAC:
166 case KTX_COMPRESSED_RED_RGTC1:
167 case KTX_COMPRESSED_SIGNED_RED_RGTC1:
170 case KTX_COMPRESSED_RG11_EAC:
171 case KTX_COMPRESSED_SIGNED_RG11_EAC:
172 case KTX_COMPRESSED_RG_RGTC2:
173 case KTX_COMPRESSED_SIGNED_RG_RGTC2:
176 case KTX_ETC1_RGB8_OES:
179 case KTX_COMPRESSED_RGB_S3TC_DXT1_EXT:
180 case KTX_COMPRESSED_SRGB_S3TC_DXT1_EXT:
181 case KTX_COMPRESSED_RGB8_ETC2:
182 case KTX_COMPRESSED_SRGB8_ETC2:
183 case KTX_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_ARB:
184 case KTX_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_ARB:
187 case KTX_COMPRESSED_RGBA_S3TC_DXT1_EXT:
188 case KTX_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT:
191 case KTX_COMPRESSED_RGBA_S3TC_DXT3_EXT:
192 case KTX_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT:
193 case KTX_COMPRESSED_RGBA_S3TC_DXT5_EXT:
194 case KTX_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT:
195 case KTX_RGBA_DXT5_S3TC:
196 case KTX_RGBA4_DXT5_S3TC:
197 case KTX_COMPRESSED_RGBA_BPTC_UNORM_ARB:
198 case KTX_COMPRESSED_SRGB_ALPHA_BPTC_UNORM_ARB:
199 case KTX_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2:
200 case KTX_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2:
201 case KTX_COMPRESSED_RGBA8_ETC2_EAC:
202 case KTX_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC:
208 bool ktx_get_ogl_fmt_desc(uint32 ogl_fmt, uint32 ogl_type, uint &block_dim, uint &bytes_per_block)
210 uint ogl_type_size = ktx_get_ogl_type_size(ogl_type);
217 case KTX_COMPRESSED_RED_RGTC1:
218 case KTX_COMPRESSED_SIGNED_RED_RGTC1_EXT:
219 case KTX_COMPRESSED_LUMINANCE_LATC1_EXT:
220 case KTX_COMPRESSED_SIGNED_LUMINANCE_LATC1_EXT:
221 case KTX_ETC1_RGB8_OES:
224 case KTX_COMPRESSED_RGB_S3TC_DXT1_EXT:
225 case KTX_COMPRESSED_RGBA_S3TC_DXT1_EXT:
226 case KTX_COMPRESSED_SRGB_S3TC_DXT1_EXT:
227 case KTX_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT:
228 case KTX_COMPRESSED_R11_EAC:
229 case KTX_COMPRESSED_SIGNED_R11_EAC:
230 case KTX_COMPRESSED_RGB8_ETC2:
231 case KTX_COMPRESSED_SRGB8_ETC2:
232 case KTX_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2:
233 case KTX_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2:
239 case KTX_COMPRESSED_LUMINANCE_ALPHA_LATC2_EXT:
240 case KTX_COMPRESSED_SIGNED_LUMINANCE_ALPHA_LATC2_EXT:
241 case KTX_COMPRESSED_RED_GREEN_RGTC2_EXT:
242 case KTX_COMPRESSED_SIGNED_RED_GREEN_RGTC2_EXT:
245 case KTX_COMPRESSED_RGBA_S3TC_DXT3_EXT:
246 case KTX_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT:
247 case KTX_COMPRESSED_RGBA_S3TC_DXT5_EXT:
248 case KTX_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT:
249 case KTX_RGBA_DXT5_S3TC:
250 case KTX_RGBA4_DXT5_S3TC:
251 case KTX_COMPRESSED_RGBA8_ETC2_EAC:
252 case KTX_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC:
253 case KTX_COMPRESSED_RG11_EAC:
254 case KTX_COMPRESSED_SIGNED_RG11_EAC:
255 case KTX_COMPRESSED_RGBA_BPTC_UNORM_ARB:
256 case KTX_COMPRESSED_SRGB_ALPHA_BPTC_UNORM_ARB:
257 case KTX_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_ARB:
258 case KTX_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_ARB:
261 bytes_per_block = 16;
269 case KTX_RED_INTEGER:
270 case KTX_GREEN_INTEGER:
271 case KTX_BLUE_INTEGER:
272 case KTX_ALPHA_INTEGER:
274 case KTX_DEPTH_COMPONENT:
275 case KTX_LUMINANCE_INTEGER_EXT:
277 bytes_per_block = ogl_type_size;
285 VOGL_ASSERT(ogl_type_size == 1);
292 case KTX_LUMINANCE_ALPHA:
293 case KTX_LUMINANCE_ALPHA_INTEGER_EXT:
295 bytes_per_block = 2 * ogl_type_size;
299 case KTX_LUMINANCE8_ALPHA8:
301 VOGL_ASSERT(ogl_type_size == 1);
309 case KTX_RGB_INTEGER:
310 case KTX_BGR_INTEGER:
312 bytes_per_block = ktx_is_packed_pixel_ogl_type(ogl_type) ? ogl_type_size : (3 * ogl_type_size);
318 VOGL_ASSERT(ogl_type_size == 1);
325 case KTX_RGBA_INTEGER:
326 case KTX_BGRA_INTEGER:
329 bytes_per_block = ktx_is_packed_pixel_ogl_type(ogl_type) ? ogl_type_size : (4 * ogl_type_size);
332 case KTX_SRGB8_ALPHA8:
335 VOGL_ASSERT(ogl_type_size == 1);
339 case KTX_DEPTH_STENCIL:
341 bytes_per_block = ktx_is_packed_pixel_ogl_type(ogl_type) ? ogl_type_size : (2 * ogl_type_size);
350 bool ktx_texture::compute_pixel_info()
352 if ((!m_header.m_glType) || (!m_header.m_glFormat))
354 if (!ktx_is_compressed_ogl_fmt(m_header.m_glInternalFormat))
357 // Must be a compressed format.
358 if ((m_header.m_glType) || (m_header.m_glFormat))
361 if (!ktx_get_ogl_fmt_desc(m_header.m_glInternalFormat, m_header.m_glType, m_block_dim, m_bytes_per_block))
364 if (m_block_dim == 1)
369 // Must be an uncompressed format.
370 if (ktx_is_compressed_ogl_fmt(m_header.m_glInternalFormat))
373 if (!ktx_get_ogl_fmt_desc(m_header.m_glFormat, m_header.m_glType, m_block_dim, m_bytes_per_block))
382 bool ktx_texture::read_from_stream(data_stream_serializer &serializer)
387 if (serializer.read(&m_header, 1, sizeof(m_header)) != sizeof(ktx_header))
391 if (memcmp(s_ktx_file_id, m_header.m_identifier, sizeof(m_header.m_identifier)))
394 if ((m_header.m_endianness != KTX_OPPOSITE_ENDIAN) && (m_header.m_endianness != KTX_ENDIAN))
397 m_opposite_endianness = (m_header.m_endianness == KTX_OPPOSITE_ENDIAN);
398 if (m_opposite_endianness)
400 m_header.endian_swap();
402 if ((m_header.m_glTypeSize != sizeof(uint8)) && (m_header.m_glTypeSize != sizeof(uint16)) && (m_header.m_glTypeSize != sizeof(uint32)))
409 if (!compute_pixel_info())
411 #if VOGL_KTX_PVRTEX_WORKAROUNDS
412 // rg [9/10/13] - moved this check into here, instead of in compute_pixel_info(), but need to retest it.
413 if ((!m_header.m_glInternalFormat) && (!m_header.m_glType) && (!m_header.m_glTypeSize) && (!m_header.m_glBaseInternalFormat))
415 // PVRTexTool writes bogus headers when outputting ETC1.
416 console::warning("ktx_texture::compute_pixel_info: Header doesn't specify any format, assuming ETC1 and hoping for the best\n");
417 m_header.m_glBaseInternalFormat = KTX_RGB;
418 m_header.m_glInternalFormat = KTX_ETC1_RGB8_OES;
419 m_header.m_glTypeSize = 1;
421 m_bytes_per_block = 8;
430 // Read the key value entries
431 uint num_key_value_bytes_remaining = m_header.m_bytesOfKeyValueData;
432 while (num_key_value_bytes_remaining)
434 if (num_key_value_bytes_remaining < sizeof(uint32))
437 uint32 key_value_byte_size;
438 if (serializer.read(&key_value_byte_size, 1, sizeof(uint32)) != sizeof(uint32))
441 num_key_value_bytes_remaining -= sizeof(uint32);
443 if (m_opposite_endianness)
444 key_value_byte_size = utils::swap32(key_value_byte_size);
446 if (key_value_byte_size > num_key_value_bytes_remaining)
449 uint8_vec key_value_data;
450 if (key_value_byte_size)
452 key_value_data.resize(key_value_byte_size);
453 if (serializer.read(&key_value_data[0], 1, key_value_byte_size) != key_value_byte_size)
457 m_key_values.push_back(key_value_data);
459 uint padding = 3 - ((key_value_byte_size + 3) % 4);
462 if (serializer.read(pad_bytes, 1, padding) != padding)
466 num_key_value_bytes_remaining -= key_value_byte_size;
467 if (num_key_value_bytes_remaining < padding)
469 num_key_value_bytes_remaining -= padding;
472 // Now read the mip levels
473 uint total_faces = get_num_mips() * get_array_size() * get_num_faces() * get_depth();
474 if ((!total_faces) || (total_faces > 65535))
477 // See Section 2.8 of KTX file format: No rounding to block sizes should be applied for block compressed textures.
478 // OK, I'm going to break that rule otherwise KTX can only store a subset of textures that DDS can handle for no good reason.
480 const uint mip0_row_blocks = m_header.m_pixelWidth / m_block_dim;
481 const uint mip0_col_blocks = VOGL_MAX(1, m_header.m_pixelHeight) / m_block_dim;
483 const uint mip0_row_blocks = (m_header.m_pixelWidth + m_block_dim - 1) / m_block_dim;
484 const uint mip0_col_blocks = (VOGL_MAX(1, m_header.m_pixelHeight) + m_block_dim - 1) / m_block_dim;
486 if ((!mip0_row_blocks) || (!mip0_col_blocks))
489 const uint mip0_depth = VOGL_MAX(1, m_header.m_pixelDepth);
490 VOGL_NOTE_UNUSED(mip0_depth);
492 bool has_valid_image_size_fields = true;
493 bool disable_mip_and_cubemap_padding = false;
495 #if VOGL_KTX_PVRTEX_WORKAROUNDS
497 // PVRTexTool has a bogus KTX writer that doesn't write any imageSize fields. Nice.
498 size_t expected_bytes_remaining = 0;
499 for (uint mip_level = 0; mip_level < get_num_mips(); mip_level++)
501 uint mip_width, mip_height, mip_depth;
502 get_mip_dim(mip_level, mip_width, mip_height, mip_depth);
504 const uint mip_row_blocks = (mip_width + m_block_dim - 1) / m_block_dim;
505 const uint mip_col_blocks = (mip_height + m_block_dim - 1) / m_block_dim;
506 if ((!mip_row_blocks) || (!mip_col_blocks))
509 expected_bytes_remaining += sizeof(uint32);
511 if ((!m_header.m_numberOfArrayElements) && (get_num_faces() == 6))
513 for (uint face = 0; face < get_num_faces(); face++)
515 uint slice_size = mip_row_blocks * mip_col_blocks * m_bytes_per_block;
516 expected_bytes_remaining += slice_size;
518 uint num_cube_pad_bytes = 3 - ((slice_size + 3) % 4);
519 expected_bytes_remaining += num_cube_pad_bytes;
524 uint total_mip_size = 0;
525 for (uint array_element = 0; array_element < get_array_size(); array_element++)
527 for (uint face = 0; face < get_num_faces(); face++)
529 for (uint zslice = 0; zslice < mip_depth; zslice++)
531 uint slice_size = mip_row_blocks * mip_col_blocks * m_bytes_per_block;
532 total_mip_size += slice_size;
536 expected_bytes_remaining += total_mip_size;
538 uint num_mip_pad_bytes = 3 - ((total_mip_size + 3) % 4);
539 expected_bytes_remaining += num_mip_pad_bytes;
543 if (serializer.get_stream()->get_remaining() < expected_bytes_remaining)
545 has_valid_image_size_fields = false;
546 disable_mip_and_cubemap_padding = true;
547 console::warning("ktx_texture::read_from_stream: KTX file size is smaller than expected - trying to read anyway without imageSize fields\n");
552 for (uint mip_level = 0; mip_level < get_num_mips(); mip_level++)
554 uint mip_width, mip_height, mip_depth;
555 get_mip_dim(mip_level, mip_width, mip_height, mip_depth);
557 const uint mip_row_blocks = (mip_width + m_block_dim - 1) / m_block_dim;
558 const uint mip_col_blocks = (mip_height + m_block_dim - 1) / m_block_dim;
559 if ((!mip_row_blocks) || (!mip_col_blocks))
562 uint32 image_size = 0;
563 if (!has_valid_image_size_fields)
565 if ((!m_header.m_numberOfArrayElements) && (get_num_faces() == 6))
567 // The KTX file format has an exception for plain cubemap textures, argh.
568 image_size = mip_row_blocks * mip_col_blocks * m_bytes_per_block;
572 image_size = mip_depth * mip_row_blocks * mip_col_blocks * m_bytes_per_block * get_array_size() * get_num_faces();
577 if (serializer.read(&image_size, 1, sizeof(image_size)) != sizeof(image_size))
580 if (m_opposite_endianness)
581 image_size = utils::swap32(image_size);
587 uint total_mip_size = 0;
589 // The KTX file format has an exception for plain cubemap textures, argh.
590 if ((!m_header.m_numberOfArrayElements) && (get_num_faces() == 6))
592 // plain non-array cubemap
593 for (uint face = 0; face < get_num_faces(); face++)
595 VOGL_ASSERT(m_image_data.size() == get_image_index(mip_level, 0, face, 0));
597 m_image_data.push_back(uint8_vec());
598 uint8_vec &image_data = m_image_data.back();
600 image_data.resize(image_size);
601 if (serializer.read(&image_data[0], 1, image_size) != image_size)
604 if (m_opposite_endianness)
605 utils::endian_swap_mem(&image_data[0], image_size, m_header.m_glTypeSize);
607 uint num_cube_pad_bytes = disable_mip_and_cubemap_padding ? 0 : (3 - ((image_size + 3) % 4));
608 if (serializer.read(pad_bytes, 1, num_cube_pad_bytes) != num_cube_pad_bytes)
611 total_mip_size += image_size + num_cube_pad_bytes;
616 uint num_image_bytes_remaining = image_size;
618 // 1D, 2D, 3D (normal or array texture), or array cubemap
619 for (uint array_element = 0; array_element < get_array_size(); array_element++)
621 for (uint face = 0; face < get_num_faces(); face++)
623 for (uint zslice = 0; zslice < mip_depth; zslice++)
625 uint slice_size = mip_row_blocks * mip_col_blocks * m_bytes_per_block;
626 if ((!slice_size) || (slice_size > num_image_bytes_remaining))
629 uint image_index = get_image_index(mip_level, array_element, face, zslice);
630 m_image_data.ensure_element_is_valid(image_index);
632 uint8_vec &image_data = m_image_data[image_index];
634 image_data.resize(slice_size);
635 if (serializer.read(&image_data[0], 1, slice_size) != slice_size)
638 if (m_opposite_endianness)
639 utils::endian_swap_mem(&image_data[0], slice_size, m_header.m_glTypeSize);
641 num_image_bytes_remaining -= slice_size;
643 total_mip_size += slice_size;
648 if (num_image_bytes_remaining)
655 uint num_mip_pad_bytes = disable_mip_and_cubemap_padding ? 0 : (3 - ((total_mip_size + 3) % 4));
656 if (serializer.read(pad_bytes, 1, num_mip_pad_bytes) != num_mip_pad_bytes)
662 bool ktx_texture::write_to_stream(data_stream_serializer &serializer, bool no_keyvalue_data) const
664 if (!consistency_check())
670 memcpy(m_header.m_identifier, s_ktx_file_id, sizeof(m_header.m_identifier));
671 m_header.m_endianness = m_opposite_endianness ? KTX_OPPOSITE_ENDIAN : KTX_ENDIAN;
673 if (m_block_dim == 1)
675 m_header.m_glTypeSize = ktx_get_ogl_type_size(m_header.m_glType);
676 m_header.m_glBaseInternalFormat = m_header.m_glFormat;
680 m_header.m_glBaseInternalFormat = ktx_get_ogl_compressed_base_internal_fmt(m_header.m_glInternalFormat);
683 m_header.m_bytesOfKeyValueData = 0;
684 if (!no_keyvalue_data)
686 for (uint i = 0; i < m_key_values.size(); i++)
687 m_header.m_bytesOfKeyValueData += sizeof(uint32) + ((m_key_values[i].size() + 3) & ~3);
690 if (m_opposite_endianness)
691 m_header.endian_swap();
693 bool success = (serializer.write(&m_header, sizeof(m_header), 1) == 1);
695 if (m_opposite_endianness)
696 m_header.endian_swap();
701 uint total_key_value_bytes = 0;
702 const uint8 padding[3] = { 0, 0, 0 };
704 if (!no_keyvalue_data)
706 for (uint i = 0; i < m_key_values.size(); i++)
708 uint32 key_value_size = m_key_values[i].size();
710 if (m_opposite_endianness)
711 key_value_size = utils::swap32(key_value_size);
713 success = (serializer.write(&key_value_size, sizeof(key_value_size), 1) == 1);
714 total_key_value_bytes += sizeof(key_value_size);
716 if (m_opposite_endianness)
717 key_value_size = utils::swap32(key_value_size);
724 if (serializer.write(&m_key_values[i][0], key_value_size, 1) != 1)
726 total_key_value_bytes += key_value_size;
728 uint num_padding = 3 - ((key_value_size + 3) % 4);
729 if ((num_padding) && (serializer.write(padding, num_padding, 1) != 1))
731 total_key_value_bytes += num_padding;
734 (void)total_key_value_bytes;
737 VOGL_ASSERT(total_key_value_bytes == m_header.m_bytesOfKeyValueData);
739 for (uint mip_level = 0; mip_level < get_num_mips(); mip_level++)
741 uint mip_width, mip_height, mip_depth;
742 get_mip_dim(mip_level, mip_width, mip_height, mip_depth);
744 const uint mip_row_blocks = (mip_width + m_block_dim - 1) / m_block_dim;
745 const uint mip_col_blocks = (mip_height + m_block_dim - 1) / m_block_dim;
746 if ((!mip_row_blocks) || (!mip_col_blocks))
749 uint32 image_size = mip_row_blocks * mip_col_blocks * m_bytes_per_block;
750 if ((m_header.m_numberOfArrayElements) || (get_num_faces() == 1))
751 image_size *= (get_array_size() * get_num_faces() * mip_depth);
759 if (m_opposite_endianness)
760 image_size = utils::swap32(image_size);
762 success = (serializer.write(&image_size, sizeof(image_size), 1) == 1);
764 if (m_opposite_endianness)
765 image_size = utils::swap32(image_size);
770 uint total_mip_size = 0;
771 uint total_image_data_size = 0;
773 if ((!m_header.m_numberOfArrayElements) && (get_num_faces() == 6))
775 // plain non-array cubemap
776 for (uint face = 0; face < get_num_faces(); face++)
778 const uint8_vec &image_data = get_image_data(get_image_index(mip_level, 0, face, 0));
779 if ((!image_data.size()) || (image_data.size() != image_size))
782 if (m_opposite_endianness)
784 uint8_vec tmp_image_data(image_data);
785 utils::endian_swap_mem(&tmp_image_data[0], tmp_image_data.size(), m_header.m_glTypeSize);
786 if (serializer.write(&tmp_image_data[0], tmp_image_data.size(), 1) != 1)
789 else if (serializer.write(&image_data[0], image_data.size(), 1) != 1)
792 // Not +=, but =, because of the silly image_size plain cubemap exception in the KTX file format
793 total_image_data_size = image_data.size();
795 uint num_cube_pad_bytes = 3 - ((image_data.size() + 3) % 4);
796 if ((num_cube_pad_bytes) && (serializer.write(padding, num_cube_pad_bytes, 1) != 1))
799 total_mip_size += image_size + num_cube_pad_bytes;
804 // 1D, 2D, 3D (normal or array texture), or array cubemap
805 for (uint array_element = 0; array_element < get_array_size(); array_element++)
807 for (uint face = 0; face < get_num_faces(); face++)
809 for (uint zslice = 0; zslice < mip_depth; zslice++)
811 const uint8_vec &image_data = get_image_data(get_image_index(mip_level, array_element, face, zslice));
812 if (!image_data.size())
815 if (m_opposite_endianness)
817 uint8_vec tmp_image_data(image_data);
818 utils::endian_swap_mem(&tmp_image_data[0], tmp_image_data.size(), m_header.m_glTypeSize);
819 if (serializer.write(&tmp_image_data[0], tmp_image_data.size(), 1) != 1)
822 else if (serializer.write(&image_data[0], image_data.size(), 1) != 1)
825 total_image_data_size += image_data.size();
827 total_mip_size += image_data.size();
832 uint num_mip_pad_bytes = 3 - ((total_mip_size + 3) % 4);
833 if ((num_mip_pad_bytes) && (serializer.write(padding, num_mip_pad_bytes, 1) != 1))
835 total_mip_size += num_mip_pad_bytes;
838 VOGL_ASSERT((total_mip_size & 3) == 0);
839 VOGL_ASSERT(total_image_data_size == image_size);
845 bool ktx_texture::init_1D(uint width, uint num_mips, uint32 ogl_internal_fmt, uint32 ogl_fmt, uint32 ogl_type)
849 m_header.m_pixelWidth = width;
850 m_header.m_numberOfMipmapLevels = num_mips;
851 m_header.m_glInternalFormat = ogl_internal_fmt;
852 m_header.m_glFormat = ogl_fmt;
853 m_header.m_glType = ogl_type;
854 m_header.m_numberOfFaces = 1;
856 if (!compute_pixel_info())
862 bool ktx_texture::init_1D_array(uint width, uint num_mips, uint array_size, uint32 ogl_internal_fmt, uint32 ogl_fmt, uint32 ogl_type)
866 m_header.m_pixelWidth = width;
867 m_header.m_numberOfMipmapLevels = num_mips;
868 m_header.m_numberOfArrayElements = array_size;
869 m_header.m_glInternalFormat = ogl_internal_fmt;
870 m_header.m_glFormat = ogl_fmt;
871 m_header.m_glType = ogl_type;
872 m_header.m_numberOfFaces = 1;
874 if (!compute_pixel_info())
880 bool ktx_texture::init_2D(uint width, uint height, uint num_mips, uint32 ogl_internal_fmt, uint32 ogl_fmt, uint32 ogl_type)
884 m_header.m_pixelWidth = width;
885 m_header.m_pixelHeight = height;
886 m_header.m_numberOfMipmapLevels = num_mips;
887 m_header.m_glInternalFormat = ogl_internal_fmt;
888 m_header.m_glFormat = ogl_fmt;
889 m_header.m_glType = ogl_type;
890 m_header.m_numberOfFaces = 1;
892 if (!compute_pixel_info())
898 bool ktx_texture::init_2D_array(uint width, uint height, uint num_mips, uint array_size, uint32 ogl_internal_fmt, uint32 ogl_fmt, uint32 ogl_type)
902 m_header.m_pixelWidth = width;
903 m_header.m_pixelHeight = height;
904 m_header.m_numberOfMipmapLevels = num_mips;
905 m_header.m_numberOfArrayElements = array_size;
906 m_header.m_glInternalFormat = ogl_internal_fmt;
907 m_header.m_glFormat = ogl_fmt;
908 m_header.m_glType = ogl_type;
909 m_header.m_numberOfFaces = 1;
911 if (!compute_pixel_info())
917 bool ktx_texture::init_3D(uint width, uint height, uint depth, uint num_mips, uint32 ogl_internal_fmt, uint32 ogl_fmt, uint32 ogl_type)
921 m_header.m_pixelWidth = width;
922 m_header.m_pixelHeight = height;
923 m_header.m_pixelDepth = depth;
924 m_header.m_numberOfMipmapLevels = num_mips;
925 m_header.m_glInternalFormat = ogl_internal_fmt;
926 m_header.m_glFormat = ogl_fmt;
927 m_header.m_glType = ogl_type;
928 m_header.m_numberOfFaces = 1;
930 if (!compute_pixel_info())
936 bool ktx_texture::init_cubemap(uint dim, uint num_mips, uint32 ogl_internal_fmt, uint32 ogl_fmt, uint32 ogl_type)
940 m_header.m_pixelWidth = dim;
941 m_header.m_pixelHeight = dim;
942 m_header.m_numberOfMipmapLevels = num_mips;
943 m_header.m_glInternalFormat = ogl_internal_fmt;
944 m_header.m_glFormat = ogl_fmt;
945 m_header.m_glType = ogl_type;
946 m_header.m_numberOfFaces = 6;
948 if (!compute_pixel_info())
954 bool ktx_texture::check_header() const
956 if (((get_num_faces() != 1) && (get_num_faces() != 6)) || (!m_header.m_pixelWidth))
959 if ((!m_header.m_pixelHeight) && (m_header.m_pixelDepth))
962 if ((get_num_faces() == 6) && ((m_header.m_pixelDepth) || (!m_header.m_pixelHeight)))
966 if (m_header.m_numberOfMipmapLevels)
968 const uint max_mipmap_dimension = 1U << (m_header.m_numberOfMipmapLevels - 1U);
969 if (max_mipmap_dimension > (VOGL_MAX(VOGL_MAX(m_header.m_pixelWidth, m_header.m_pixelHeight), m_header.m_pixelDepth)))
977 uint ktx_texture::get_expected_image_size(uint mip_level) const
979 uint mip_width, mip_height, mip_depth;
980 get_mip_dim(mip_level, mip_width, mip_height, mip_depth);
982 const uint mip_row_blocks = (mip_width + m_block_dim - 1) / m_block_dim;
983 const uint mip_col_blocks = (mip_height + m_block_dim - 1) / m_block_dim;
984 if ((!mip_row_blocks) || (!mip_col_blocks))
990 return mip_row_blocks * mip_col_blocks * m_bytes_per_block;
993 uint ktx_texture::get_total_images() const
995 if (!is_valid() || !get_num_mips())
999 //return get_num_mips() * (get_depth() * get_num_faces() * get_array_size());
1001 // Naive algorithm, could just compute based off the # of mips
1003 for (uint mip_level = 0; mip_level < get_num_mips(); mip_level++)
1005 uint total_zslices = math::maximum<uint>(get_depth() >> mip_level, 1U);
1006 uint index = get_image_index(mip_level, get_array_size() - 1, get_num_faces() - 1, total_zslices - 1);
1007 max_index = math::maximum<uint>(max_index, index);
1010 return max_index + 1;
1013 bool ktx_texture::consistency_check() const
1015 if (!check_header())
1018 uint block_dim = 0, bytes_per_block = 0;
1019 if ((!m_header.m_glType) || (!m_header.m_glFormat))
1021 if ((m_header.m_glType) || (m_header.m_glFormat))
1023 if (!ktx_get_ogl_fmt_desc(m_header.m_glInternalFormat, m_header.m_glType, block_dim, bytes_per_block))
1027 //if ((get_width() % block_dim) || (get_height() % block_dim))
1032 if (!ktx_get_ogl_fmt_desc(m_header.m_glFormat, m_header.m_glType, block_dim, bytes_per_block))
1037 if ((m_block_dim != block_dim) || (m_bytes_per_block != bytes_per_block))
1040 uint total_expected_images = get_total_images();
1041 if (m_image_data.size() != total_expected_images)
1044 for (uint mip_level = 0; mip_level < get_num_mips(); mip_level++)
1046 uint mip_width, mip_height, mip_depth;
1047 get_mip_dim(mip_level, mip_width, mip_height, mip_depth);
1049 const uint mip_row_blocks = (mip_width + m_block_dim - 1) / m_block_dim;
1050 const uint mip_col_blocks = (mip_height + m_block_dim - 1) / m_block_dim;
1051 if ((!mip_row_blocks) || (!mip_col_blocks))
1054 for (uint array_element = 0; array_element < get_array_size(); array_element++)
1056 for (uint face = 0; face < get_num_faces(); face++)
1058 for (uint zslice = 0; zslice < mip_depth; zslice++)
1060 const uint8_vec &image_data = get_image_data(get_image_index(mip_level, array_element, face, zslice));
1062 uint expected_image_size = mip_row_blocks * mip_col_blocks * m_bytes_per_block;
1063 if (image_data.size() != expected_image_size)
1073 void ktx_texture::get_keys(dynamic_string_array &keys) const
1076 keys.reserve(m_key_values.size());
1078 for (uint i = 0; i < m_key_values.size(); i++)
1080 const uint8_vec &v = m_key_values[i];
1082 keys.enlarge(1)->set(reinterpret_cast<const char *>(v.get_ptr()));
1086 const uint8_vec *ktx_texture::find_key(const char *pKey) const
1088 const uint n = vogl_strlen(pKey) + 1;
1089 for (uint i = 0; i < m_key_values.size(); i++)
1091 const uint8_vec &v = m_key_values[i];
1092 if ((v.size() >= n) && (!memcmp(&v[0], pKey, n)))
1099 bool ktx_texture::get_key_value_data(const char *pKey, uint8_vec &data) const
1101 const uint8_vec *p = find_key(pKey);
1108 const uint ofs = vogl_strlen(pKey) + 1;
1109 const uint8 *pValue = p->get_ptr() + ofs;
1110 const uint n = p->size() - ofs;
1114 memcpy(data.get_ptr(), pValue, n);
1118 bool ktx_texture::get_key_value_as_string(const char *pKey, dynamic_string &str) const
1120 const uint8_vec *p = find_key(pKey);
1127 const uint ofs = vogl_strlen(pKey) + 1;
1128 const uint8 *pValue = p->get_ptr() + ofs;
1129 const uint n = p->size() - ofs;
1132 for (i = 0; i < n; i++)
1136 str.set_from_buf(pValue, i);
1140 uint ktx_texture::add_key_value(const char *pKey, const void *pVal, uint val_size)
1142 const uint idx = m_key_values.size();
1143 m_key_values.resize(idx + 1);
1144 uint8_vec &v = m_key_values.back();
1145 v.append(reinterpret_cast<const uint8 *>(pKey), vogl_strlen(pKey) + 1);
1146 v.append(static_cast<const uint8 *>(pVal), val_size);
1150 bool ktx_texture::operator==(const ktx_texture &rhs) const
1155 // This is not super deep because I want to avoid poking around into internal state (such as the header)
1160 CMP(get_ogl_internal_fmt());
1164 CMP(get_num_mips());
1165 CMP(get_array_size());
1166 CMP(get_num_faces());
1167 CMP(is_compressed());
1168 CMP(get_block_dim());
1170 // The image fmt/type shouldn't matter with compressed textures.
1171 if (!is_compressed())
1174 CMP(get_ogl_type());
1177 CMP(get_total_images());
1179 CMP(get_opposite_endianness());
1181 // Do an order insensitive key/value comparison.
1182 dynamic_string_array lhs_keys;
1185 dynamic_string_array rhs_keys;
1186 rhs.get_keys(rhs_keys);
1188 if (lhs_keys.size() != rhs_keys.size())
1191 lhs_keys.sort(dynamic_string_less_than_case_sensitive());
1192 rhs_keys.sort(dynamic_string_less_than_case_sensitive());
1194 for (uint i = 0; i < lhs_keys.size(); i++)
1195 if (lhs_keys[i].compare(rhs_keys[i], true) != 0)
1198 for (uint i = 0; i < lhs_keys.size(); i++)
1200 uint8_vec lhs_data, rhs_data;
1201 if (!get_key_value_data(lhs_keys[i].get_ptr(), lhs_data))
1203 if (!get_key_value_data(lhs_keys[i].get_ptr(), rhs_data))
1205 if (lhs_data != rhs_data)
1210 for (uint l = 0; l < get_num_mips(); l++)
1212 for (uint a = 0; a < get_array_size(); a++)
1214 for (uint f = 0; f < get_num_faces(); f++)
1216 for (uint z = 0; z < get_depth(); z++)
1218 const uint8_vec &lhs_img = get_image_data(l, a, f, z);
1219 const uint8_vec &rhs_img = rhs.get_image_data(l, a, f, z);
1221 if (lhs_img != rhs_img)