Glusoft

DXT Texture Compression with SDL

In this tutorial we use dxt texture compression with SDL with SDL_gpu and gli.

Introduction to compressed textures

First, when loading images into memory from a conventional image format such as PNG, JPEG, TIF.
The calculation of the weight of the texture is calculated as follows:

weight = number of bytes per pixel * width * height

If we take an image encoded in RGBA we have number of bytes per pixel = 4
Then we obtain weight = 4 * width * height

If we encode the image with a compressed texture format, there are several:

In this tutorial, we will deal with the S3 Texture Compression format, which is DXT, but there is one small important detail.

When talking about DXT textures there are a total of five file formats:

FOURCCDirectX equivalentDescriptionAlpha premultipliedCompression ratio
DXT1BC1No transparencyYes6 : 1 (for a 24-bit image)
DXT2BC2Alpha expliciteNo4 : 1
DXT3BC2Alpha expliciteYes4 : 1
DXT4BC3Alpha interpoléNo4 : 1
DXT5BC3Alpha interpoléYes4 : 1

In general, we will use the format DXT1 when the image does not have transparency, and DXT3 or DXT5 depending on the transparency level of the image.
DXT2 and DXT4 have the same implementation as DXT3 and DXT5, they have the same format under DirectX and in general are not used.

How to compress your textures?

Use a utility for example: Compressonator

Reading compressed texture formats

To decode compressed formats we will use the library OpenGL Image (GLI)

You can read the image this way:

gli::texture Texture = gli::load_dds(Filename);

Next we will create the texture in Opengl by hand to get the texture handle.

gli::gl GL(gli::gl::PROFILE_GL33);
gli::gl::format const Format = GL.translate(Texture.format(), Texture.swizzles());

We use the format to determine the target, with OpenGL the possible targets are:

After that we use the target GL_TEXTURE_2D, but we can generalize a little bit with gli:

GLenum Target = GL.translate(Texture.target());

We initialize our texture handle:

GLuint TextureName = 0;

Next we create the identifier with openGL:

glGenTextures(1, &TextureName);

Then we associate the target with the texture:

glBindTexture(Target, TextureName);

We continue the initialization of the target:

glTexParameteri(Target, GL_TEXTURE_BASE_LEVEL, 0);
glTexParameteri(Target, GL_TEXTURE_MAX_LEVEL, static_cast(Texture.levels() – 1));

We will need the size of the texture.

glm::tvec3<GLsizei> const Extent(Texture.extent());
GLsizei const FaceTotal = static_cast<GLsizei>(Texture.layers() * Texture.faces());

GLsizei width = Extent.x;
GLsizei height = (Texture.target() == gli::TARGET_2D ? Extent.y : FaceTotal);

We allocate the texture according to the type.

switch (Texture.target())
	{
	case gli::TARGET_1D:
		//glTexStorage1D(
		//	Target, static_cast<GLint>(Texture.levels()), Format.Internal, Extent.x);
		break;
	case gli::TARGET_1D_ARRAY:
	case gli::TARGET_2D:
	case gli::TARGET_CUBE:
		for (size_t i = 0; i < Texture.levels(); i++) {
			glTexImage2D(Target, i, Format.Internal, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
			width = (GLsizei)(width / 2.f);
			height = (GLsizei)(height / 2.f);
		}

		/*GLenum err;
		while ((err = glGetError()) != GL_NO_ERROR)
		{
		}*/
		/*glTexStorage2D(
			Target, static_cast<GLint>(Texture.levels()), Format.Internal,
			Extent.x, Texture.target() == gli::TARGET_2D ? Extent.y : FaceTotal);*/
		break;
		/*case gli::TARGET_2D_ARRAY:
		case gli::TARGET_3D:
		case gli::TARGET_CUBE_ARRAY:
			//glTexStorage3D(
			//	Target, static_cast<GLint>(Texture.levels()), Format.Internal,
			//	Extent.x, Extent.y,
			//	Texture.target() == gli::TARGET_3D ? Extent.z : FaceTotal);
			break;*/
	default:
		assert(0);
		break;
	}

After we update the contents of the texture according to the type.

for (std::size_t Layer = 0; Layer < Texture.layers(); ++Layer)
	for (std::size_t Face = 0; Face < Texture.faces(); ++Face)
		for (std::size_t Level = 0; Level < Texture.levels(); ++Level)
		{
			GLsizei const LayerGL = static_cast<GLsizei>(Layer);
			glm::tvec3<GLsizei> Extent(Texture.extent(Level));
			Target = gli::is_target_cube(Texture.target())
				? static_cast<GLenum>(GL_TEXTURE_CUBE_MAP_POSITIVE_X + Face)
				: Target;

			switch (Texture.target())
			{
			case gli::TARGET_1D:
				if (gli::is_compressed(Texture.format()))
					glCompressedTexSubImage1D(
						Target, static_cast<GLint>(Level), 0, Extent.x,
						Format.Internal, static_cast<GLsizei>(Texture.size(Level)),
						Texture.data(Layer, Face, Level));
				else
					glTexSubImage1D(
						Target, static_cast<GLint>(Level), 0, Extent.x,
						Format.External, Format.Type,
						Texture.data(Layer, Face, Level));
				break;
			case gli::TARGET_1D_ARRAY:
			case gli::TARGET_2D:
			case gli::TARGET_CUBE:
				if (gli::is_compressed(Texture.format()))
					glCompressedTexSubImage2D(
						Target, static_cast<GLint>(Level),
						0, 0,
						Extent.x,
						Texture.target() == gli::TARGET_1D_ARRAY ? LayerGL : Extent.y,
						Format.Internal, static_cast<GLsizei>(Texture.size(Level)),
						Texture.data(Layer, Face, Level));
				else
					glTexSubImage2D(
						Target, static_cast<GLint>(Level),
						0, 0,
						Extent.x,
						Texture.target() == gli::TARGET_1D_ARRAY ? LayerGL : Extent.y,
						Format.External, Format.Type,
						Texture.data(Layer, Face, Level));
				break;
			case gli::TARGET_2D_ARRAY:
			case gli::TARGET_3D:
			case gli::TARGET_CUBE_ARRAY:
				if (gli::is_compressed(Texture.format()))
					glCompressedTexSubImage3D(
						Target, static_cast<GLint>(Level),
						0, 0, 0,
						Extent.x, Extent.y,
						Texture.target() == gli::TARGET_3D ? Extent.z : LayerGL,
						Format.Internal, static_cast<GLsizei>(Texture.size(Level)),
						Texture.data(Layer, Face, Level));
				else
					glTexSubImage3D(
						Target, static_cast<GLint>(Level),
						0, 0, 0,
						Extent.x, Extent.y,
						Texture.target() == gli::TARGET_3D ? Extent.z : LayerGL,
						Format.External, Format.Type,
						Texture.data(Layer, Face, Level));
				break;
			default: assert(0); break;
			}
		}

Now we have the texture is created in memory we can create an object of type GPU_Image.

GPU_Image* result;

// We create the texture data for GPU_Image
GPU_IMAGE_DATA* data;
data = (GPU_IMAGE_DATA*)SDL_malloc(sizeof(GPU_IMAGE_DATA));
data->refcount = 1;
data->handle = (GLuint)TextureName;
data->owns_handle = true;
data->format = Format.Internal;

GPU_Renderer *renderer = GPU_GetCurrentRenderer();

result = (GPU_Image*)SDL_malloc(sizeof(GPU_Image));
result->refcount = 1;
result->target = NULL;
result->renderer = renderer;
result->context_target = renderer->current_context_target;

if (Format.Internal == gli::gl::internal_format::INTERNAL_RGBA_DXT5) {
	result->format = GPU_FORMAT_COMPRESSED_RGBA_DXT5;
}
else if (Format.Internal == gli::gl::internal_format::INTERNAL_RGBA_DXT1) {
	result->format = GPU_FORMAT_COMPRESSED_RGBA_DXT1;
}

result->num_layers = 1;
result->bytes_per_pixel = 4;
result->has_mipmaps = GPU_FALSE;

result->anchor_x = renderer->default_image_anchor_x;
result->anchor_y = renderer->default_image_anchor_y;

result->color = SDL_Color{ 255, 255, 255, 255 };
result->use_blending = GPU_TRUE;
result->blend_mode = GPU_GetBlendModeFromPreset(GPU_BLEND_NORMAL);
result->snap_mode = GPU_SNAP_POSITION_AND_DIMENSIONS;
result->filter_mode = GPU_FILTER_LINEAR;
result->wrap_mode_x = GPU_WRAP_NONE;
result->wrap_mode_y = GPU_WRAP_NONE;

result->data = data;
result->is_alias = GPU_FALSE;

result->using_virtual_resolution = GPU_FALSE;
result->w = (Uint16)Extent.x;
result->h = (Uint16)Extent.y;

result->base_w = (Uint16)Extent.x;
result->base_h = (Uint16)Extent.y;
result->texture_w = (Uint16)Extent.x;
result->texture_h = (Uint16)Extent.y;

GPU_SetImageFilter(result, GPU_FILTER_LINEAR);

For this code to work, SDL_gpu needs to be modified to support compressed texture formats and dxt texture compression with SDL.

Next, in the file SDL_gpu.h we have the enum:

/*! \ingroup ImageControls
 * Image format enum
 * \see GPU_CreateImage()
 */
typedef enum {
    GPU_FORMAT_LUMINANCE = 1,
    GPU_FORMAT_LUMINANCE_ALPHA = 2,
    GPU_FORMAT_RGB = 3,
    GPU_FORMAT_RGBA = 4,
    GPU_FORMAT_ALPHA = 5,
    GPU_FORMAT_RG = 6,
    GPU_FORMAT_YCbCr422 = 7,
    GPU_FORMAT_YCbCr420P = 8,
    GPU_FORMAT_BGR = 9,
    GPU_FORMAT_BGRA = 10,
    GPU_FORMAT_ABGR = 11
} GPU_FormatEnum;

We add the two new formats GPU_FORMAT_COMPRESSED_RGBA_DXT1 and GPU_FORMAT_COMPRESSED_RGBA_DXT5:

/*! \ingroup ImageControls
 * Image format enum
 * \see GPU_CreateImage()
 */
typedef enum {
    GPU_FORMAT_LUMINANCE = 1,
    GPU_FORMAT_LUMINANCE_ALPHA = 2,
    GPU_FORMAT_RGB = 3,
    GPU_FORMAT_RGBA = 4,
    GPU_FORMAT_ALPHA = 5,
    GPU_FORMAT_RG = 6,
    GPU_FORMAT_YCbCr422 = 7,
    GPU_FORMAT_YCbCr420P = 8,
    GPU_FORMAT_BGR = 9,
    GPU_FORMAT_BGRA = 10,
    GPU_FORMAT_ABGR = 11,
	GPU_FORMAT_COMPRESSED_RGBA_DXT1 = 12,
	GPU_FORMAT_COMPRESSED_RGBA_DXT5 = 13
} GPU_FormatEnum;

Then, It’s good normally you can display the DXT textures, we need to create the context for SDL, so here is the main function.

int main(int argc, char *argv[]) {
	SDL_Init(SDL_INIT_VIDEO);

	GPU_Target *window = GPU_InitRenderer(GPU_RENDERER_OPENGL_3, 1024, 512, GPU_DEFAULT_INIT_FLAGS);

	if (window == NULL || ogl_LoadFunctions() == ogl_LOAD_FAILED) {
		std::cout << "error initialization OpenGL\n";
	}

	GPU_Image *dxt1Image = create_texture("dirt.dds");
	GPU_Image *dxt5Image = create_texture("spritesheet.dds");

	SDL_Event event;

	bool done = 0;

	while (!done) {
		while (SDL_PollEvent(&event)) {
			if (event.type == SDL_QUIT)
				done = 1;
			else if (event.type == SDL_KEYDOWN) {
				if (event.key.keysym.sym == SDLK_ESCAPE)
					done = 1;
			}
		}

		GPU_Clear(window);
		GPU_BlitTransformX(dxt1Image, NULL, window, 0, 0, 0, 0, 0, 1, 1);
		GPU_BlitTransformX(dxt5Image, NULL, window, 512, 0, 0, 0, 0, 1, 1);
		GPU_Flip(window);
	}

	GPU_FreeImage(dxt1Image);
	GPU_FreeImage(dxt5Image);

	GPU_Quit();

	return 0;
}

For those who want to download the entire project dxt texture compression with SDL with the libraries: TextureDXT.7z