Hope Game
Logo Glusoft
Glusoft

DXT Texture Compression

Introduction to compressed textures

When loading images into memory from a conventional image format such as PNG, JPEG, TIF.
The calculation of the weight of the texture is calculated as follows:

weight = number of bytes per pixel * width * height

If we take an image encoded in RGBA we have number of bytes per pixel = 4
So weight = 4 * width * height

If we encode the image with a compressed texture format, there are several:


In this tutorial we will deal with the S3 Texture Compression format, which is DXT, but there is one small important detail.

When talking about DXT texture there are five file formats:

FOURCC DirectX equivalent Description Alpha premultiplied Compression ratio
DXT1 BC1 No transparency Yes 6 : 1 (for a 24-bit image)
DXT2 BC2 Alpha explicite No 4 : 1
DXT3 BC2 Alpha explicite Yes 4 : 1
DXT4 BC3 Alpha interpolé No 4 : 1
DXT5 BC3 Alpha interpolé Yes 4 : 1

In general we will use the format DXT1 when the image does not have transparency, and DXT3 or DXT5 depending on the transparency level of the image.
DXT2 and DXT4 have the same implementation as DXT3 and DXT5, they have the same format under DirectX and in general are not used.

How to compress your textures?

Just use a utility for example: Compressonator

Reading compressed texture formats

To decode compressed formats we will use the library OpenGL Image (GLI)

You can read the image this way:

gli::texture Texture = gli::load_dds(Filename);

Then we will create the texture in Opengl by hand to get the texture handle.
So we start by determining the format of the texture:
gli::gl GL(gli::gl::PROFILE_GL33);
gli::gl::format const Format = GL.translate(Texture.format(), Texture.swizzles());

We use the format to determine the target, with OpenGL the possible targets are:

In our case we will use the target GL_TEXTURE_2D, but we can generalize a little bit with gli:
GLenum Target = GL.translate(Texture.target());

We initialize our texture handle:
GLuint TextureName = 0;

We create the identifier with openGL:
glGenTextures(1, &TextureName);

We associate the target with the texture:
glBindTexture(Target, TextureName);

We continue the initialization of the target:

glTexParameteri(Target, GL_TEXTURE_BASE_LEVEL, 0);
glTexParameteri(Target, GL_TEXTURE_MAX_LEVEL, static_cast(Texture.levels() - 1));

We will need the size of the texture.
glm::tvec3 const Extent(Texture.extent());
GLsizei const FaceTotal = static_cast(Texture.layers() * Texture.faces());

GLsizei width = Extent.x;
GLsizei height = (Texture.target() == gli::TARGET_2D ? Extent.y : FaceTotal);

We allocate the texture according to the type.
switch (Texture.target())
	{
	case gli::TARGET_1D:
		//glTexStorage1D(
		//	Target, static_cast(Texture.levels()), Format.Internal, Extent.x);
		break;
	case gli::TARGET_1D_ARRAY:
	case gli::TARGET_2D:
	case gli::TARGET_CUBE:
		for (size_t i = 0; i < Texture.levels(); i++) {
			glTexImage2D(Target, i, Format.Internal, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
			width = (GLsizei)(width / 2.f);
			height = (GLsizei)(height / 2.f);
		}

		/*GLenum err;
		while ((err = glGetError()) != GL_NO_ERROR)
		{
		}*/
		/*glTexStorage2D(
			Target, static_cast(Texture.levels()), Format.Internal,
			Extent.x, Texture.target() == gli::TARGET_2D ? Extent.y : FaceTotal);*/
		break;
		/*case gli::TARGET_2D_ARRAY:
		case gli::TARGET_3D:
		case gli::TARGET_CUBE_ARRAY:
			//glTexStorage3D(
			//	Target, static_cast(Texture.levels()), Format.Internal,
			//	Extent.x, Extent.y,
			//	Texture.target() == gli::TARGET_3D ? Extent.z : FaceTotal);
			break;*/
	default:
		assert(0);
		break;
	}

Then we update the contents of the texture according to the type.
for (std::size_t Layer = 0; Layer < Texture.layers(); ++Layer)
	for (std::size_t Face = 0; Face < Texture.faces(); ++Face)
		for (std::size_t Level = 0; Level < Texture.levels(); ++Level)
		{
			GLsizei const LayerGL = static_cast(Layer);
			glm::tvec3 Extent(Texture.extent(Level));
			Target = gli::is_target_cube(Texture.target())
				? static_cast(GL_TEXTURE_CUBE_MAP_POSITIVE_X + Face)
				: Target;

			switch (Texture.target())
			{
			case gli::TARGET_1D:
				if (gli::is_compressed(Texture.format()))
					glCompressedTexSubImage1D(
						Target, static_cast(Level), 0, Extent.x,
						Format.Internal, static_cast(Texture.size(Level)),
						Texture.data(Layer, Face, Level));
				else
					glTexSubImage1D(
						Target, static_cast(Level), 0, Extent.x,
						Format.External, Format.Type,
						Texture.data(Layer, Face, Level));
				break;
			case gli::TARGET_1D_ARRAY:
			case gli::TARGET_2D:
			case gli::TARGET_CUBE:
				if (gli::is_compressed(Texture.format()))
					glCompressedTexSubImage2D(
						Target, static_cast(Level),
						0, 0,
						Extent.x,
						Texture.target() == gli::TARGET_1D_ARRAY ? LayerGL : Extent.y,
						Format.Internal, static_cast(Texture.size(Level)),
						Texture.data(Layer, Face, Level));
				else
					glTexSubImage2D(
						Target, static_cast(Level),
						0, 0,
						Extent.x,
						Texture.target() == gli::TARGET_1D_ARRAY ? LayerGL : Extent.y,
						Format.External, Format.Type,
						Texture.data(Layer, Face, Level));
				break;
			case gli::TARGET_2D_ARRAY:
			case gli::TARGET_3D:
			case gli::TARGET_CUBE_ARRAY:
				if (gli::is_compressed(Texture.format()))
					glCompressedTexSubImage3D(
						Target, static_cast(Level),
						0, 0, 0,
						Extent.x, Extent.y,
						Texture.target() == gli::TARGET_3D ? Extent.z : LayerGL,
						Format.Internal, static_cast(Texture.size(Level)),
						Texture.data(Layer, Face, Level));
				else
					glTexSubImage3D(
						Target, static_cast(Level),
						0, 0, 0,
						Extent.x, Extent.y,
						Texture.target() == gli::TARGET_3D ? Extent.z : LayerGL,
						Format.External, Format.Type,
						Texture.data(Layer, Face, Level));
				break;
			default: assert(0); break;
			}
		}

Now we have the texture is created in memory we can create an object of type GPU_Image.
GPU_Image* result;

// We create the texture data for GPU_Image
GPU_IMAGE_DATA* data;
data = (GPU_IMAGE_DATA*)SDL_malloc(sizeof(GPU_IMAGE_DATA));
data->refcount = 1;
data->handle = (GLuint)TextureName;
data->owns_handle = true;
data->format = Format.Internal;

GPU_Renderer *renderer = GPU_GetCurrentRenderer();

result = (GPU_Image*)SDL_malloc(sizeof(GPU_Image));
result->refcount = 1;
result->target = NULL;
result->renderer = renderer;
result->context_target = renderer->current_context_target;

if (Format.Internal == gli::gl::internal_format::INTERNAL_RGBA_DXT5) {
	result->format = GPU_FORMAT_COMPRESSED_RGBA_DXT5;
}
else if (Format.Internal == gli::gl::internal_format::INTERNAL_RGBA_DXT1) {
	result->format = GPU_FORMAT_COMPRESSED_RGBA_DXT1;
}

result->num_layers = 1;
result->bytes_per_pixel = 4;
result->has_mipmaps = GPU_FALSE;

result->anchor_x = renderer->default_image_anchor_x;
result->anchor_y = renderer->default_image_anchor_y;

result->color = SDL_Color{ 255, 255, 255, 255 };
result->use_blending = GPU_TRUE;
result->blend_mode = GPU_GetBlendModeFromPreset(GPU_BLEND_NORMAL);
result->snap_mode = GPU_SNAP_POSITION_AND_DIMENSIONS;
result->filter_mode = GPU_FILTER_LINEAR;
result->wrap_mode_x = GPU_WRAP_NONE;
result->wrap_mode_y = GPU_WRAP_NONE;

result->data = data;
result->is_alias = GPU_FALSE;

result->using_virtual_resolution = GPU_FALSE;
result->w = (Uint16)Extent.x;
result->h = (Uint16)Extent.y;

result->base_w = (Uint16)Extent.x;
result->base_h = (Uint16)Extent.y;
result->texture_w = (Uint16)Extent.x;
result->texture_h = (Uint16)Extent.y;

GPU_SetImageFilter(result, GPU_FILTER_LINEAR);

For this code to work, SDL_gpu needs to be modified to support compressed texture formats.

So in the file SDL_gpu.h we have the enum:
/*! \ingroup ImageControls
 * Image format enum
 * \see GPU_CreateImage()
 */
typedef enum {
    GPU_FORMAT_LUMINANCE = 1,
    GPU_FORMAT_LUMINANCE_ALPHA = 2,
    GPU_FORMAT_RGB = 3,
    GPU_FORMAT_RGBA = 4,
    GPU_FORMAT_ALPHA = 5,
    GPU_FORMAT_RG = 6,
    GPU_FORMAT_YCbCr422 = 7,
    GPU_FORMAT_YCbCr420P = 8,
    GPU_FORMAT_BGR = 9,
    GPU_FORMAT_BGRA = 10,
    GPU_FORMAT_ABGR = 11
} GPU_FormatEnum;

We add the two new formats GPU_FORMAT_COMPRESSED_RGBA_DXT1 and GPU_FORMAT_COMPRESSED_RGBA_DXT5:
/*! \ingroup ImageControls
 * Image format enum
 * \see GPU_CreateImage()
 */
typedef enum {
    GPU_FORMAT_LUMINANCE = 1,
    GPU_FORMAT_LUMINANCE_ALPHA = 2,
    GPU_FORMAT_RGB = 3,
    GPU_FORMAT_RGBA = 4,
    GPU_FORMAT_ALPHA = 5,
    GPU_FORMAT_RG = 6,
    GPU_FORMAT_YCbCr422 = 7,
    GPU_FORMAT_YCbCr420P = 8,
    GPU_FORMAT_BGR = 9,
    GPU_FORMAT_BGRA = 10,
    GPU_FORMAT_ABGR = 11,
	GPU_FORMAT_COMPRESSED_RGBA_DXT1 = 12,
	GPU_FORMAT_COMPRESSED_RGBA_DXT5 = 13
} GPU_FormatEnum;

It's good normally you can display the DXT textures, we need to create the context for SDL, so here is the main function.
int main(int argc, char *argv[]) {
	SDL_Init(SDL_INIT_VIDEO);

	GPU_Target *window = GPU_InitRenderer(GPU_RENDERER_OPENGL_3, 1024, 512, GPU_DEFAULT_INIT_FLAGS);

	if (window == NULL || ogl_LoadFunctions() == ogl_LOAD_FAILED) {
		std::cout << "error initialization OpenGL\n";
	}

	GPU_Image *dxt1Image = create_texture("dirt.dds");
	GPU_Image *dxt5Image = create_texture("spritesheet.dds");

	SDL_Event event;

	bool done = 0;

	while (!done) {
		while (SDL_PollEvent(&event)) {
			if (event.type == SDL_QUIT)
				done = 1;
			else if (event.type == SDL_KEYDOWN) {
				if (event.key.keysym.sym == SDLK_ESCAPE)
					done = 1;
			}
		}

		GPU_Clear(window);
		GPU_BlitTransformX(dxt1Image, NULL, window, 0, 0, 0, 0, 0, 1, 1);
		GPU_BlitTransformX(dxt5Image, NULL, window, 512, 0, 0, 0, 0, 1, 1);
		GPU_Flip(window);
	}

	GPU_FreeImage(dxt1Image);
	GPU_FreeImage(dxt5Image);

	GPU_Quit();

	return 0;
}

For those who want to download the entire project with the libraries: TextureDXT.7z

Dark theme