glretrace doesn't have the logic to choose the most appropriate data type,
nor does qapitrace have the logic to cope with multiple data types or
HDR data, so make it easier on everybody by choose the common denominator.
// Hardcoded for now, but we could chose types more adequate to the
// texture internal format
// Hardcoded for now, but we could chose types more adequate to the
// texture internal format
- json.writeStringMember("__type__", "float");
+ json.writeStringMember("__type__", "uint8");
+ json.writeBoolMember("__normalized__", true);
json.writeNumberMember("__channels__", 4);
json.writeNumberMember("__channels__", 4);
- float *pixels = new float[depth*width*height*4];
+ GLubyte *pixels = new GLubyte[depth*width*height*4];
- glGetTexImage(target, level, GL_RGBA, GL_FLOAT, pixels);
+ glGetTexImage(target, level, GL_RGBA, GL_UNSIGNED_BYTE, pixels);
json.writeStringMember("__encoding__", "base64");
json.beginMember("__data__");
json.writeStringMember("__encoding__", "base64");
json.beginMember("__data__");
// Hardcoded for now, but we could chose types more adequate to the
// texture internal format
// Hardcoded for now, but we could chose types more adequate to the
// texture internal format
- json.writeStringMember("__type__", "float");
+ json.writeStringMember("__type__", "uint8");
+ json.writeBoolMember("__normalized__", true);
json.writeNumberMember("__channels__", 4);
json.writeNumberMember("__channels__", 4);
- float *pixels = new float[width*height*4];
+ GLubyte *pixels = new GLubyte[width*height*4];
GLint drawbuffer = glretrace::double_buffer ? GL_BACK : GL_FRONT;
GLint readbuffer = glretrace::double_buffer ? GL_BACK : GL_FRONT;
GLint drawbuffer = glretrace::double_buffer ? GL_BACK : GL_FRONT;
GLint readbuffer = glretrace::double_buffer ? GL_BACK : GL_FRONT;
+static inline int
+rgba8_to_argb(quint8 r, quint8 g, quint8 b, quint8 a)
+{
+ return (a << 24 | r << 16 | g << 8 | b);
+}
+
static inline int
rgbaf2argb(float r, float g, float b, float a)
{
static inline int
rgbaf2argb(float r, float g, float b, float a)
{
void ApiSurface::contentsFromBase64(const QByteArray &base64)
{
QByteArray dataArray = QByteArray::fromBase64(base64);
void ApiSurface::contentsFromBase64(const QByteArray &base64)
{
QByteArray dataArray = QByteArray::fromBase64(base64);
- const float *data = (const float*)dataArray.data();
+ const quint8 *data = (const quint8*)dataArray.data();
int width = m_size.width();
int height = m_size.height();
int width = m_size.width();
int height = m_size.height();
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
- int pixel = rgbaf2argb(data[(y * width + x) * 4 + 0],
- data[(y * width + x) * 4 + 1],
- data[(y * width + x) * 4 + 2],
- data[(y * width + x) * 4 + 3]);
+ int pixel = rgba8_to_argb(data[(y * width + x) * 4 + 0],
+ data[(y * width + x) * 4 + 1],
+ data[(y * width + x) * 4 + 2],
+ data[(y * width + x) * 4 + 3]);
pixelData[y * width + x] = pixel;
}
}
pixelData[y * width + x] = pixel;
}
}
image[QLatin1String("__height__")].toInt());
QString cls = image[QLatin1String("__class__")].toString();
QString type = image[QLatin1String("__type__")].toString();
image[QLatin1String("__height__")].toInt());
QString cls = image[QLatin1String("__class__")].toString();
QString type = image[QLatin1String("__type__")].toString();
+ bool normalized =
+ image[QLatin1String("__normalized__")].toBool();
int numChannels =
image[QLatin1String("__channels__")].toInt();
Q_ASSERT(numChannels == 4);
int numChannels =
image[QLatin1String("__channels__")].toInt();
Q_ASSERT(numChannels == 4);
- Q_ASSERT(type == QLatin1String("float"));
+ Q_ASSERT(type == QLatin1String("uint8"));
+ Q_ASSERT(normalized == true);
QByteArray dataArray =
image[QLatin1String("__data__")].toByteArray();
QByteArray dataArray =
image[QLatin1String("__data__")].toByteArray();