Commit 3e47f5f7 by John Kessenich Committed by GitHub

Merge pull request #523 from amdrexu/feature2

Parser: Implement extension GL_AMD_gpu_shader_half_float.
parents df98cc26 c9e3c3c9
...@@ -11,10 +11,12 @@ set(SPVREMAP_SOURCES ...@@ -11,10 +11,12 @@ set(SPVREMAP_SOURCES
doc.cpp) doc.cpp)
set(HEADERS set(HEADERS
bitutils.h
spirv.hpp spirv.hpp
GLSL.std.450.h GLSL.std.450.h
GLSL.ext.KHR.h GLSL.ext.KHR.h
GlslangToSpv.h GlslangToSpv.h
hex_float.h
Logger.h Logger.h
SpvBuilder.h SpvBuilder.h
spvIR.h spvIR.h
...@@ -26,8 +28,9 @@ set(SPVREMAP_HEADERS ...@@ -26,8 +28,9 @@ set(SPVREMAP_HEADERS
doc.h) doc.h)
if(ENABLE_AMD_EXTENSIONS) if(ENABLE_AMD_EXTENSIONS)
set(HEADERS list(APPEND
GLSL.ext.AMD.h) HEADERS
GLSL.ext.AMD.h)
endif(ENABLE_AMD_EXTENSIONS) endif(ENABLE_AMD_EXTENSIONS)
add_library(SPIRV STATIC ${SOURCES} ${HEADERS}) add_library(SPIRV STATIC ${SOURCES} ${HEADERS})
......
...@@ -32,7 +32,7 @@ enum Decoration; ...@@ -32,7 +32,7 @@ enum Decoration;
enum Op; enum Op;
static const int GLSLextAMDVersion = 100; static const int GLSLextAMDVersion = 100;
static const int GLSLextAMDRevision = 1; static const int GLSLextAMDRevision = 2;
// SPV_AMD_shader_ballot // SPV_AMD_shader_ballot
static const char* const E_SPV_AMD_shader_ballot = "SPV_AMD_shader_ballot"; static const char* const E_SPV_AMD_shader_ballot = "SPV_AMD_shader_ballot";
...@@ -110,4 +110,7 @@ enum GcnShaderAMD { ...@@ -110,4 +110,7 @@ enum GcnShaderAMD {
GcnShaderCountAMD GcnShaderCountAMD
}; };
// SPV_AMD_gpu_shader_half_float
static const char* const E_SPV_AMD_gpu_shader_half_float = "SPV_AMD_gpu_shader_half_float";
#endif // #ifndef GLSLextAMD_H #endif // #ifndef GLSLextAMD_H
...@@ -46,6 +46,10 @@ ...@@ -46,6 +46,10 @@
#include "SpvBuilder.h" #include "SpvBuilder.h"
#ifdef AMD_EXTENSIONS
#include "hex_float.h"
#endif
#ifndef _WIN32 #ifndef _WIN32
#include <cstdio> #include <cstdio>
#endif #endif
...@@ -785,6 +789,36 @@ Id Builder::makeDoubleConstant(double d, bool specConstant) ...@@ -785,6 +789,36 @@ Id Builder::makeDoubleConstant(double d, bool specConstant)
return c->getResultId(); return c->getResultId();
} }
#ifdef AMD_EXTENSIONS
Id Builder::makeFloat16Constant(float f16, bool specConstant)
{
Op opcode = specConstant ? OpSpecConstant : OpConstant;
Id typeId = makeFloatType(16);
spvutils::HexFloat<spvutils::FloatProxy<float>> fVal(f16);
spvutils::HexFloat<spvutils::FloatProxy<spvutils::Float16>> f16Val(0);
fVal.castTo(f16Val, spvutils::round_direction::kToZero);
unsigned value = f16Val.value().getAsFloat().get_value();
// See if we already made it. Applies only to regular constants, because specialization constants
// must remain distinct for the purpose of applying a SpecId decoration.
if (!specConstant) {
Id existing = findScalarConstant(OpTypeFloat, opcode, typeId, value);
if (existing)
return existing;
}
Instruction* c = new Instruction(getUniqueId(), typeId, opcode);
c->addImmediateOperand(value);
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(c));
groupedConstants[OpTypeFloat].push_back(c);
module.mapInstruction(c);
return c->getResultId();
}
#endif
Id Builder::findCompositeConstant(Op typeClass, std::vector<Id>& comps) const Id Builder::findCompositeConstant(Op typeClass, std::vector<Id>& comps) const
{ {
Instruction* constant = 0; Instruction* constant = 0;
......
...@@ -191,6 +191,9 @@ public: ...@@ -191,6 +191,9 @@ public:
Id makeUint64Constant(unsigned long long u, bool specConstant = false) { return makeInt64Constant(makeUintType(64), u, specConstant); } Id makeUint64Constant(unsigned long long u, bool specConstant = false) { return makeInt64Constant(makeUintType(64), u, specConstant); }
Id makeFloatConstant(float f, bool specConstant = false); Id makeFloatConstant(float f, bool specConstant = false);
Id makeDoubleConstant(double d, bool specConstant = false); Id makeDoubleConstant(double d, bool specConstant = false);
#ifdef AMD_EXTENSIONS
Id makeFloat16Constant(float f16, bool specConstant = false);
#endif
// Turn the array of constants into a proper spv constant of the requested type. // Turn the array of constants into a proper spv constant of the requested type.
Id makeCompositeConstant(Id type, std::vector<Id>& comps, bool specConst = false); Id makeCompositeConstant(Id type, std::vector<Id>& comps, bool specConst = false);
......
// Copyright (c) 2015-2016 The Khronos Group Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef LIBSPIRV_UTIL_BITUTILS_H_
#define LIBSPIRV_UTIL_BITUTILS_H_
#include <cstdint>
#include <cstring>
namespace spvutils {
// Performs a bitwise copy of source to the destination type Dest.
template <typename Dest, typename Src>
Dest BitwiseCast(Src source) {
Dest dest;
static_assert(sizeof(source) == sizeof(dest),
"BitwiseCast: Source and destination must have the same size");
std::memcpy(&dest, &source, sizeof(dest));
return dest;
}
// SetBits<T, First, Num> returns an integer of type <T> with bits set
// for position <First> through <First + Num - 1>, counting from the least
// significant bit. In particular when Num == 0, no positions are set to 1.
// A static assert will be triggered if First + Num > sizeof(T) * 8, that is,
// a bit that will not fit in the underlying type is set.
template <typename T, size_t First = 0, size_t Num = 0>
struct SetBits {
static_assert(First < sizeof(T) * 8,
"Tried to set a bit that is shifted too far.");
const static T get = (T(1) << First) | SetBits<T, First + 1, Num - 1>::get;
};
template <typename T, size_t Last>
struct SetBits<T, Last, 0> {
const static T get = T(0);
};
// This is all compile-time so we can put our tests right here.
static_assert(SetBits<uint32_t, 0, 0>::get == uint32_t(0x00000000),
"SetBits failed");
static_assert(SetBits<uint32_t, 0, 1>::get == uint32_t(0x00000001),
"SetBits failed");
static_assert(SetBits<uint32_t, 31, 1>::get == uint32_t(0x80000000),
"SetBits failed");
static_assert(SetBits<uint32_t, 1, 2>::get == uint32_t(0x00000006),
"SetBits failed");
static_assert(SetBits<uint32_t, 30, 2>::get == uint32_t(0xc0000000),
"SetBits failed");
static_assert(SetBits<uint32_t, 0, 31>::get == uint32_t(0x7FFFFFFF),
"SetBits failed");
static_assert(SetBits<uint32_t, 0, 32>::get == uint32_t(0xFFFFFFFF),
"SetBits failed");
static_assert(SetBits<uint32_t, 16, 16>::get == uint32_t(0xFFFF0000),
"SetBits failed");
static_assert(SetBits<uint64_t, 0, 1>::get == uint64_t(0x0000000000000001LL),
"SetBits failed");
static_assert(SetBits<uint64_t, 63, 1>::get == uint64_t(0x8000000000000000LL),
"SetBits failed");
static_assert(SetBits<uint64_t, 62, 2>::get == uint64_t(0xc000000000000000LL),
"SetBits failed");
static_assert(SetBits<uint64_t, 31, 1>::get == uint64_t(0x0000000080000000LL),
"SetBits failed");
static_assert(SetBits<uint64_t, 16, 16>::get == uint64_t(0x00000000FFFF0000LL),
"SetBits failed");
} // namespace spvutils
#endif // LIBSPIRV_UTIL_BITUTILS_H_
#version 450 core
#extension GL_AMD_gpu_shader_half_float: enable
#extension GL_ARB_gpu_shader_int64: enable
void main()
{
}
// Half float literals
void literal()
{
const float16_t f16c = 0.000001hf;
const f16vec2 f16cv = f16vec2(-0.25HF, 0.03HF);
f16vec2 f16v;
f16v.x = f16c;
f16v += f16cv;
}
// Block memory layout
struct S
{
float16_t x; // rule 1: align = 2, takes offsets 0-1
f16vec2 y; // rule 2: align = 4, takes offsets 4-7
f16vec3 z; // rule 3: align = 8, takes offsets 8-13
};
layout(column_major, std140) uniform B1
{
float16_t a; // rule 1: align = 2, takes offsets 0-1
f16vec2 b; // rule 2: align = 4, takes offsets 4-7
f16vec3 c; // rule 3: align = 8, takes offsets 8-15
float16_t d[2]; // rule 4: align = 16, array stride = 16,
// takes offsets 16-47
f16mat2x3 e; // rule 5: align = 16, matrix stride = 16,
// takes offsets 48-79
f16mat2x3 f[2]; // rule 6: align = 16, matrix stride = 16,
// array stride = 32, f[0] takes
// offsets 80-111, f[1] takes offsets
// 112-143
S g; // rule 9: align = 16, g.x takes offsets
// 144-145, g.y takes offsets 148-151,
// g.z takes offsets 152-159
S h[2]; // rule 10: align = 16, array stride = 16, h[0]
// takes offsets 160-175, h[1] takes
// offsets 176-191
};
layout(row_major, std430) buffer B2
{
float16_t o; // rule 1: align = 2, takes offsets 0-1
f16vec2 p; // rule 2: align = 4, takes offsets 4-7
f16vec3 q; // rule 3: align = 8, takes offsets 8-13
float16_t r[2]; // rule 4: align = 2, array stride = 2, takes
// offsets 14-17
f16mat2x3 s; // rule 7: align = 4, matrix stride = 4, takes
// offsets 20-31
f16mat2x3 t[2]; // rule 8: align = 4, matrix stride = 4, array
// stride = 12, t[0] takes offsets
// 32-43, t[1] takes offsets 44-55
S u; // rule 9: align = 8, u.x takes offsets
// 56-57, u.y takes offsets 60-63, u.z
// takes offsets 64-69
S v[2]; // rule 10: align = 8, array stride = 16, v[0]
// takes offsets 72-87, v[1] takes
// offsets 88-103
};
// Specialization constant
layout(constant_id = 100) const float16_t sf16 = 0.125hf;
layout(constant_id = 101) const float sf = 0.25;
layout(constant_id = 102) const double sd = 0.5lf;
const float f16_to_f = float(sf16);
const double f16_to_d = float(sf16);
const float16_t f_to_f16 = float16_t(sf);
const float16_t d_to_f16 = float16_t(sd);
void operators()
{
float16_t f16;
f16vec2 f16v;
f16mat2x2 f16m;
bool b;
// Arithmetic
f16v += f16v;
f16v -= f16v;
f16v *= f16v;
f16v /= f16v;
f16v++;
f16v--;
++f16m;
--f16m;
f16v = -f16v;
f16m = -f16m;
f16 = f16v.x + f16v.y;
f16 = f16v.x - f16v.y;
f16 = f16v.x * f16v.y;
f16 = f16v.x / f16v.y;
// Relational
b = (f16v.x != f16);
b = (f16v.y == f16);
b = (f16v.x > f16);
b = (f16v.y < f16);
b = (f16v.x >= f16);
b = (f16v.y <= f16);
// Vector/matrix operations
f16v = f16v * f16;
f16m = f16m * f16;
f16v = f16m * f16v;
f16v = f16v * f16m;
f16m = f16m * f16m;
}
void typeCast()
{
bvec3 bv;
vec3 fv;
dvec3 dv;
ivec3 iv;
uvec3 uv;
i64vec3 i64v;
u64vec3 u64v;
f16vec3 f16v;
f16v = f16vec3(bv); // bool -> float16
bv = bvec3(f16v); // float16 -> bool
f16v = f16vec3(fv); // float -> float16
fv = vec3(f16v); // float16 -> float
f16v = f16vec3(dv); // double -> float16
dv = dvec3(dv); // float16 -> double
f16v = f16vec3(iv); // int -> float16
iv = ivec3(f16v); // float16 -> int
f16v = f16vec3(uv); // uint -> float16
uv = uvec3(f16v); // float16 -> uint
f16v = f16vec3(i64v); // int64 -> float16
i64v = i64vec3(f16v); // float16 -> int64
f16v = f16vec3(u64v); // uint64 -> float16
u64v = u64vec3(f16v); // float16 -> uint64
}
void builtinAngleTrigFuncs()
{
f16vec4 f16v1, f16v2;
f16v2 = radians(f16v1);
f16v2 = degrees(f16v1);
f16v2 = sin(f16v1);
f16v2 = cos(f16v1);
f16v2 = tan(f16v1);
f16v2 = asin(f16v1);
f16v2 = acos(f16v1);
f16v2 = atan(f16v1, f16v2);
f16v2 = atan(f16v1);
f16v2 = sinh(f16v1);
f16v2 = cosh(f16v1);
f16v2 = tanh(f16v1);
f16v2 = asinh(f16v1);
f16v2 = acosh(f16v1);
f16v2 = atanh(f16v1);
}
void builtinExpFuncs()
{
f16vec2 f16v1, f16v2;
f16v2 = pow(f16v1, f16v2);
f16v2 = exp(f16v1);
f16v2 = log(f16v1);
f16v2 = exp2(f16v1);
f16v2 = log2(f16v1);
f16v2 = sqrt(f16v1);
f16v2 = inversesqrt(f16v1);
}
void builtinCommonFuncs()
{
f16vec3 f16v1, f16v2, f16v3;
float16_t f16;
bool b;
bvec3 bv;
ivec3 iv;
f16v2 = abs(f16v1);
f16v2 = sign(f16v1);
f16v2 = floor(f16v1);
f16v2 = trunc(f16v1);
f16v2 = round(f16v1);
f16v2 = roundEven(f16v1);
f16v2 = ceil(f16v1);
f16v2 = fract(f16v1);
f16v2 = mod(f16v1, f16v2);
f16v2 = mod(f16v1, f16);
f16v3 = modf(f16v1, f16v2);
f16v3 = min(f16v1, f16v2);
f16v3 = min(f16v1, f16);
f16v3 = max(f16v1, f16v2);
f16v3 = max(f16v1, f16);
f16v3 = clamp(f16v1, f16, f16v2.x);
f16v3 = clamp(f16v1, f16v2, f16vec3(f16));
f16v3 = mix(f16v1, f16v2, f16);
f16v3 = mix(f16v1, f16v2, f16v3);
f16v3 = mix(f16v1, f16v2, bv);
f16v3 = step(f16v1, f16v2);
f16v3 = step(f16, f16v3);
f16v3 = smoothstep(f16v1, f16v2, f16v3);
f16v3 = smoothstep(f16, f16v1.x, f16v2);
b = isnan(f16);
bv = isinf(f16v1);
f16v3 = fma(f16v1, f16v2, f16v3);
f16v2 = frexp(f16v1, iv);
f16v2 = ldexp(f16v1, iv);
}
void builtinPackUnpackFuncs()
{
uint u;
f16vec2 f16v;
u = packFloat2x16(f16v);
f16v = unpackFloat2x16(u);
}
void builtinGeometryFuncs()
{
float16_t f16;
f16vec3 f16v1, f16v2, f16v3;
f16 = length(f16v1);
f16 = distance(f16v1, f16v2);
f16 = dot(f16v1, f16v2);
f16v3 = cross(f16v1, f16v2);
f16v2 = normalize(f16v1);
f16v3 = faceforward(f16v1, f16v2, f16v3);
f16v3 = reflect(f16v1, f16v2);
f16v3 = refract(f16v1, f16v2, f16);
}
void builtinMatrixFuncs()
{
f16mat2x3 f16m1, f16m2, f16m3;
f16mat3x2 f16m4;
f16mat3 f16m5;
f16mat4 f16m6, f16m7;
f16vec3 f16v1;
f16vec2 f16v2;
float16_t f16;
f16m3 = matrixCompMult(f16m1, f16m2);
f16m1 = outerProduct(f16v1, f16v2);
f16m4 = transpose(f16m1);
f16 = determinant(f16m5);
f16m6 = inverse(f16m7);
}
void builtinVecRelFuncs()
{
f16vec3 f16v1, f16v2;
bvec3 bv;
bv = lessThan(f16v1, f16v2);
bv = lessThanEqual(f16v1, f16v2);
bv = greaterThan(f16v1, f16v2);
bv = greaterThanEqual(f16v1, f16v2);
bv = equal(f16v1, f16v2);
bv = notEqual(f16v1, f16v2);
}
in f16vec3 if16v;
void builtinFragProcFuncs()
{
f16vec3 f16v;
// Derivative
f16v.x = dFdx(if16v.x);
f16v.y = dFdy(if16v.y);
f16v.xy = dFdxFine(if16v.xy);
f16v.xy = dFdyFine(if16v.xy);
f16v = dFdxCoarse(if16v);
f16v = dFdxCoarse(if16v);
f16v.x = fwidth(if16v.x);
f16v.xy = fwidthFine(if16v.xy);
f16v = fwidthCoarse(if16v);
// Interpolation
f16v.x = interpolateAtCentroid(if16v.x);
f16v.xy = interpolateAtSample(if16v.xy, 1);
f16v = interpolateAtOffset(if16v, vec2(0.5));
}
...@@ -46,6 +46,9 @@ enum TBasicType { ...@@ -46,6 +46,9 @@ enum TBasicType {
EbtVoid, EbtVoid,
EbtFloat, EbtFloat,
EbtDouble, EbtDouble,
#ifdef AMD_EXTENSIONS
EbtFloat16,
#endif
EbtInt, EbtInt,
EbtUint, EbtUint,
EbtInt64, EbtInt64,
......
...@@ -185,8 +185,6 @@ struct TSampler { // misnomer now; includes images, textures without sampler, ...@@ -185,8 +185,6 @@ struct TSampler { // misnomer now; includes images, textures without sampler,
case EbtFloat: break; case EbtFloat: break;
case EbtInt: s.append("i"); break; case EbtInt: s.append("i"); break;
case EbtUint: s.append("u"); break; case EbtUint: s.append("u"); break;
case EbtInt64: s.append("i64"); break;
case EbtUint64: s.append("u64"); break;
default: break; // some compilers want this default: break; // some compilers want this
} }
if (image) { if (image) {
...@@ -1277,7 +1275,11 @@ public: ...@@ -1277,7 +1275,11 @@ public:
virtual bool isImplicitlySizedArray() const { return isArray() && getOuterArraySize() == UnsizedArraySize && qualifier.storage != EvqBuffer; } virtual bool isImplicitlySizedArray() const { return isArray() && getOuterArraySize() == UnsizedArraySize && qualifier.storage != EvqBuffer; }
virtual bool isRuntimeSizedArray() const { return isArray() && getOuterArraySize() == UnsizedArraySize && qualifier.storage == EvqBuffer; } virtual bool isRuntimeSizedArray() const { return isArray() && getOuterArraySize() == UnsizedArraySize && qualifier.storage == EvqBuffer; }
virtual bool isStruct() const { return structure != nullptr; } virtual bool isStruct() const { return structure != nullptr; }
#ifdef AMD_EXTENSIONS
virtual bool isFloatingDomain() const { return basicType == EbtFloat || basicType == EbtDouble || basicType == EbtFloat16; }
#else
virtual bool isFloatingDomain() const { return basicType == EbtFloat || basicType == EbtDouble; } virtual bool isFloatingDomain() const { return basicType == EbtFloat || basicType == EbtDouble; }
#endif
virtual bool isOpaque() const { return basicType == EbtSampler || basicType == EbtAtomicUint; } virtual bool isOpaque() const { return basicType == EbtSampler || basicType == EbtAtomicUint; }
...@@ -1359,6 +1361,9 @@ public: ...@@ -1359,6 +1361,9 @@ public:
case EbtVoid: case EbtVoid:
case EbtFloat: case EbtFloat:
case EbtDouble: case EbtDouble:
#ifdef AMD_EXTENSIONS
case EbtFloat16:
#endif
case EbtInt: case EbtInt:
case EbtUint: case EbtUint:
case EbtInt64: case EbtInt64:
...@@ -1451,6 +1456,9 @@ public: ...@@ -1451,6 +1456,9 @@ public:
case EbtVoid: return "void"; case EbtVoid: return "void";
case EbtFloat: return "float"; case EbtFloat: return "float";
case EbtDouble: return "double"; case EbtDouble: return "double";
#ifdef AMD_EXTENSIONS
case EbtFloat16: return "float16_t";
#endif
case EbtInt: return "int"; case EbtInt: return "int";
case EbtUint: return "uint"; case EbtUint: return "uint";
case EbtInt64: return "int64_t"; case EbtInt64: return "int64_t";
......
...@@ -119,6 +119,22 @@ enum TOperator { ...@@ -119,6 +119,22 @@ enum TOperator {
EOpConvFloatToUint64, EOpConvFloatToUint64,
EOpConvDoubleToUint64, EOpConvDoubleToUint64,
EOpConvInt64ToUint64, EOpConvInt64ToUint64,
#ifdef AMD_EXTENSIONS
EOpConvBoolToFloat16,
EOpConvIntToFloat16,
EOpConvUintToFloat16,
EOpConvFloatToFloat16,
EOpConvDoubleToFloat16,
EOpConvInt64ToFloat16,
EOpConvUint64ToFloat16,
EOpConvFloat16ToBool,
EOpConvFloat16ToInt,
EOpConvFloat16ToUint,
EOpConvFloat16ToFloat,
EOpConvFloat16ToDouble,
EOpConvFloat16ToInt64,
EOpConvFloat16ToUint64,
#endif
// //
// binary operations // binary operations
...@@ -236,6 +252,10 @@ enum TOperator { ...@@ -236,6 +252,10 @@ enum TOperator {
EOpUnpackInt2x32, EOpUnpackInt2x32,
EOpPackUint2x32, EOpPackUint2x32,
EOpUnpackUint2x32, EOpUnpackUint2x32,
#ifdef AMD_EXTENSIONS
EOpPackFloat2x16,
EOpUnpackFloat2x16,
#endif
EOpLength, EOpLength,
EOpDistance, EOpDistance,
...@@ -396,6 +416,21 @@ enum TOperator { ...@@ -396,6 +416,21 @@ enum TOperator {
EOpConstructDMat4x2, EOpConstructDMat4x2,
EOpConstructDMat4x3, EOpConstructDMat4x3,
EOpConstructDMat4x4, EOpConstructDMat4x4,
#ifdef AMD_EXTENSIONS
EOpConstructFloat16,
EOpConstructF16Vec2,
EOpConstructF16Vec3,
EOpConstructF16Vec4,
EOpConstructF16Mat2x2,
EOpConstructF16Mat2x3,
EOpConstructF16Mat2x4,
EOpConstructF16Mat3x2,
EOpConstructF16Mat3x3,
EOpConstructF16Mat3x4,
EOpConstructF16Mat4x2,
EOpConstructF16Mat4x3,
EOpConstructF16Mat4x4,
#endif
EOpConstructStruct, EOpConstructStruct,
EOpConstructTextureSampler, EOpConstructTextureSampler,
EOpConstructGuardEnd, EOpConstructGuardEnd,
......
...@@ -176,6 +176,9 @@ TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TIntermTyped* right ...@@ -176,6 +176,9 @@ TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TIntermTyped* right
switch (getType().getBasicType()) { switch (getType().getBasicType()) {
case EbtDouble: case EbtDouble:
case EbtFloat: case EbtFloat:
#ifdef AMD_EXTENSIONS
case EbtFloat16:
#endif
newConstArray[i].setDConst(leftUnionArray[i].getDConst() / rightUnionArray[i].getDConst()); newConstArray[i].setDConst(leftUnionArray[i].getDConst() / rightUnionArray[i].getDConst());
break; break;
...@@ -450,6 +453,9 @@ TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TType& returnType) ...@@ -450,6 +453,9 @@ TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TType& returnType)
case EOpNegative: case EOpNegative:
switch (getType().getBasicType()) { switch (getType().getBasicType()) {
case EbtDouble: case EbtDouble:
#ifdef AMD_EXTENSIONS
case EbtFloat16:
#endif
case EbtFloat: newConstArray[i].setDConst(-unionArray[i].getDConst()); break; case EbtFloat: newConstArray[i].setDConst(-unionArray[i].getDConst()); break;
case EbtInt: newConstArray[i].setIConst(-unionArray[i].getIConst()); break; case EbtInt: newConstArray[i].setIConst(-unionArray[i].getIConst()); break;
case EbtUint: newConstArray[i].setUConst(static_cast<unsigned int>(-static_cast<int>(unionArray[i].getUConst()))); break; case EbtUint: newConstArray[i].setUConst(static_cast<unsigned int>(-static_cast<int>(unionArray[i].getUConst()))); break;
...@@ -688,6 +694,9 @@ TIntermTyped* TIntermediate::fold(TIntermAggregate* aggrNode) ...@@ -688,6 +694,9 @@ TIntermTyped* TIntermediate::fold(TIntermAggregate* aggrNode)
// Second, do the actual folding // Second, do the actual folding
bool isFloatingPoint = children[0]->getAsTyped()->getBasicType() == EbtFloat || bool isFloatingPoint = children[0]->getAsTyped()->getBasicType() == EbtFloat ||
#ifdef AMD_EXTENSIONS
children[0]->getAsTyped()->getBasicType() == EbtFloat16 ||
#endif
children[0]->getAsTyped()->getBasicType() == EbtDouble; children[0]->getAsTyped()->getBasicType() == EbtDouble;
bool isSigned = children[0]->getAsTyped()->getBasicType() == EbtInt || bool isSigned = children[0]->getAsTyped()->getBasicType() == EbtInt ||
children[0]->getAsTyped()->getBasicType() == EbtInt64; children[0]->getAsTyped()->getBasicType() == EbtInt64;
......
...@@ -4559,6 +4559,11 @@ void TParseContext::layoutTypeCheck(const TSourceLoc& loc, const TType& type) ...@@ -4559,6 +4559,11 @@ void TParseContext::layoutTypeCheck(const TSourceLoc& loc, const TType& type)
// containing a double, the offset must also be a multiple of 8..." // containing a double, the offset must also be a multiple of 8..."
if (type.containsBasicType(EbtDouble) && ! IsMultipleOfPow2(qualifier.layoutXfbOffset, 8)) if (type.containsBasicType(EbtDouble) && ! IsMultipleOfPow2(qualifier.layoutXfbOffset, 8))
error(loc, "type contains double; xfb_offset must be a multiple of 8", "xfb_offset", ""); error(loc, "type contains double; xfb_offset must be a multiple of 8", "xfb_offset", "");
#ifdef AMD_EXTENSIONS
// ..., if applied to an aggregate containing a float16_t, the offset must also be a multiple of 2..."
else if (type.containsBasicType(EbtFloat16) && !IsMultipleOfPow2(qualifier.layoutXfbOffset, 2))
error(loc, "type contains half float; xfb_offset must be a multiple of 2", "xfb_offset", "");
#endif
else if (! IsMultipleOfPow2(qualifier.layoutXfbOffset, 4)) else if (! IsMultipleOfPow2(qualifier.layoutXfbOffset, 4))
error(loc, "must be a multiple of size of first component", "xfb_offset", ""); error(loc, "must be a multiple of size of first component", "xfb_offset", "");
} }
...@@ -4662,6 +4667,9 @@ void TParseContext::layoutTypeCheck(const TSourceLoc& loc, const TType& type) ...@@ -4662,6 +4667,9 @@ void TParseContext::layoutTypeCheck(const TSourceLoc& loc, const TType& type)
case EbtBool: case EbtBool:
case EbtFloat: case EbtFloat:
case EbtDouble: case EbtDouble:
#ifdef AMD_EXTENSIONS
case EbtFloat16:
#endif
break; break;
default: default:
error(loc, "cannot be applied to this type", "constant_id", ""); error(loc, "cannot be applied to this type", "constant_id", "");
...@@ -5561,6 +5569,24 @@ TIntermTyped* TParseContext::constructBuiltIn(const TType& type, TOperator op, T ...@@ -5561,6 +5569,24 @@ TIntermTyped* TParseContext::constructBuiltIn(const TType& type, TOperator op, T
basicOp = EOpConstructDouble; basicOp = EOpConstructDouble;
break; break;
#ifdef AMD_EXTENSIONS
case EOpConstructF16Vec2:
case EOpConstructF16Vec3:
case EOpConstructF16Vec4:
case EOpConstructF16Mat2x2:
case EOpConstructF16Mat2x3:
case EOpConstructF16Mat2x4:
case EOpConstructF16Mat3x2:
case EOpConstructF16Mat3x3:
case EOpConstructF16Mat3x4:
case EOpConstructF16Mat4x2:
case EOpConstructF16Mat4x3:
case EOpConstructF16Mat4x4:
case EOpConstructFloat16:
basicOp = EOpConstructFloat16;
break;
#endif
case EOpConstructIVec2: case EOpConstructIVec2:
case EOpConstructIVec3: case EOpConstructIVec3:
case EOpConstructIVec4: case EOpConstructIVec4:
......
...@@ -463,6 +463,25 @@ void TScanContext::fillInKeywordMap() ...@@ -463,6 +463,25 @@ void TScanContext::fillInKeywordMap()
(*KeywordMap)["u64vec3"] = U64VEC3; (*KeywordMap)["u64vec3"] = U64VEC3;
(*KeywordMap)["u64vec4"] = U64VEC4; (*KeywordMap)["u64vec4"] = U64VEC4;
#ifdef AMD_EXTENSIONS
(*KeywordMap)["float16_t"] = FLOAT16_T;
(*KeywordMap)["f16vec2"] = F16VEC2;
(*KeywordMap)["f16vec3"] = F16VEC3;
(*KeywordMap)["f16vec4"] = F16VEC4;
(*KeywordMap)["f16mat2"] = F16MAT2;
(*KeywordMap)["f16mat3"] = F16MAT3;
(*KeywordMap)["f16mat4"] = F16MAT4;
(*KeywordMap)["f16mat2x2"] = F16MAT2X2;
(*KeywordMap)["f16mat2x3"] = F16MAT2X3;
(*KeywordMap)["f16mat2x4"] = F16MAT2X4;
(*KeywordMap)["f16mat3x2"] = F16MAT3X2;
(*KeywordMap)["f16mat3x3"] = F16MAT3X3;
(*KeywordMap)["f16mat3x4"] = F16MAT3X4;
(*KeywordMap)["f16mat4x2"] = F16MAT4X2;
(*KeywordMap)["f16mat4x3"] = F16MAT4X3;
(*KeywordMap)["f16mat4x4"] = F16MAT4X4;
#endif
(*KeywordMap)["sampler2D"] = SAMPLER2D; (*KeywordMap)["sampler2D"] = SAMPLER2D;
(*KeywordMap)["samplerCube"] = SAMPLERCUBE; (*KeywordMap)["samplerCube"] = SAMPLERCUBE;
(*KeywordMap)["samplerCubeArray"] = SAMPLERCUBEARRAY; (*KeywordMap)["samplerCubeArray"] = SAMPLERCUBEARRAY;
...@@ -687,6 +706,9 @@ int TScanContext::tokenize(TPpContext* pp, TParserToken& token) ...@@ -687,6 +706,9 @@ int TScanContext::tokenize(TPpContext* pp, TParserToken& token)
case PpAtomConstUint64: parserToken->sType.lex.i64 = ppToken.i64val; return UINT64CONSTANT; case PpAtomConstUint64: parserToken->sType.lex.i64 = ppToken.i64val; return UINT64CONSTANT;
case PpAtomConstFloat: parserToken->sType.lex.d = ppToken.dval; return FLOATCONSTANT; case PpAtomConstFloat: parserToken->sType.lex.d = ppToken.dval; return FLOATCONSTANT;
case PpAtomConstDouble: parserToken->sType.lex.d = ppToken.dval; return DOUBLECONSTANT; case PpAtomConstDouble: parserToken->sType.lex.d = ppToken.dval; return DOUBLECONSTANT;
#ifdef AMD_EXTENSIONS
case PpAtomConstFloat16: parserToken->sType.lex.d = ppToken.dval; return FLOAT16CONSTANT;
#endif
case PpAtomIdentifier: case PpAtomIdentifier:
{ {
int token = tokenizeIdentifier(); int token = tokenizeIdentifier();
...@@ -938,10 +960,38 @@ int TScanContext::tokenizeIdentifier() ...@@ -938,10 +960,38 @@ int TScanContext::tokenizeIdentifier()
case U64VEC2: case U64VEC2:
case U64VEC3: case U64VEC3:
case U64VEC4: case U64VEC4:
if (parseContext.profile != EEsProfile && parseContext.version >= 450) afterType = true;
if (parseContext.symbolTable.atBuiltInLevel() ||
(parseContext.extensionTurnedOn(E_GL_ARB_gpu_shader_int64) &&
parseContext.profile != EEsProfile && parseContext.version >= 450))
return keyword; return keyword;
return identifierOrType(); return identifierOrType();
#ifdef AMD_EXTENSIONS
case FLOAT16_T:
case F16VEC2:
case F16VEC3:
case F16VEC4:
case F16MAT2:
case F16MAT3:
case F16MAT4:
case F16MAT2X2:
case F16MAT2X3:
case F16MAT2X4:
case F16MAT3X2:
case F16MAT3X3:
case F16MAT3X4:
case F16MAT4X2:
case F16MAT4X3:
case F16MAT4X4:
afterType = true;
if (parseContext.symbolTable.atBuiltInLevel() ||
(parseContext.extensionTurnedOn(E_GL_AMD_gpu_shader_half_float) &&
parseContext.profile != EEsProfile && parseContext.version >= 450))
return keyword;
return identifierOrType();
#endif
case SAMPLERCUBEARRAY: case SAMPLERCUBEARRAY:
case SAMPLERCUBEARRAYSHADOW: case SAMPLERCUBEARRAYSHADOW:
case ISAMPLERCUBEARRAY: case ISAMPLERCUBEARRAY:
......
...@@ -60,6 +60,9 @@ void TType::buildMangledName(TString& mangledName) ...@@ -60,6 +60,9 @@ void TType::buildMangledName(TString& mangledName)
switch (basicType) { switch (basicType) {
case EbtFloat: mangledName += 'f'; break; case EbtFloat: mangledName += 'f'; break;
case EbtDouble: mangledName += 'd'; break; case EbtDouble: mangledName += 'd'; break;
#ifdef AMD_EXTENSIONS
case EbtFloat16: mangledName += "f16"; break;
#endif
case EbtInt: mangledName += 'i'; break; case EbtInt: mangledName += 'i'; break;
case EbtUint: mangledName += 'u'; break; case EbtUint: mangledName += 'u'; break;
case EbtInt64: mangledName += "i64"; break; case EbtInt64: mangledName += "i64"; break;
......
...@@ -192,6 +192,7 @@ void TParseVersions::initializeExtensionBehavior() ...@@ -192,6 +192,7 @@ void TParseVersions::initializeExtensionBehavior()
extensionBehavior[E_GL_AMD_shader_trinary_minmax] = EBhDisable; extensionBehavior[E_GL_AMD_shader_trinary_minmax] = EBhDisable;
extensionBehavior[E_GL_AMD_shader_explicit_vertex_parameter] = EBhDisable; extensionBehavior[E_GL_AMD_shader_explicit_vertex_parameter] = EBhDisable;
extensionBehavior[E_GL_AMD_gcn_shader] = EBhDisable; extensionBehavior[E_GL_AMD_gcn_shader] = EBhDisable;
extensionBehavior[E_GL_AMD_gpu_shader_half_float] = EBhDisable;
#endif #endif
// AEP // AEP
...@@ -299,6 +300,7 @@ void TParseVersions::getPreamble(std::string& preamble) ...@@ -299,6 +300,7 @@ void TParseVersions::getPreamble(std::string& preamble)
"#define GL_AMD_shader_trinary_minmax 1\n" "#define GL_AMD_shader_trinary_minmax 1\n"
"#define GL_AMD_shader_explicit_vertex_parameter 1\n" "#define GL_AMD_shader_explicit_vertex_parameter 1\n"
"#define GL_AMD_gcn_shader 1\n" "#define GL_AMD_gcn_shader 1\n"
"#define GL_AMD_gpu_shader_half_float 1\n"
#endif #endif
; ;
} }
...@@ -663,6 +665,19 @@ void TParseVersions::doubleCheck(const TSourceLoc& loc, const char* op) ...@@ -663,6 +665,19 @@ void TParseVersions::doubleCheck(const TSourceLoc& loc, const char* op)
profileRequires(loc, ECompatibilityProfile, 400, nullptr, op); profileRequires(loc, ECompatibilityProfile, 400, nullptr, op);
} }
#ifdef AMD_EXTENSIONS
// Call for any operation needing GLSL float16 data-type support.
void TParseVersions::float16Check(const TSourceLoc& loc, const char* op, bool builtIn)
{
if (!builtIn) {
requireExtensions(loc, 1, &E_GL_AMD_gpu_shader_half_float, "shader half float");
requireProfile(loc, ECoreProfile | ECompatibilityProfile, op);
profileRequires(loc, ECoreProfile, 450, nullptr, op);
profileRequires(loc, ECompatibilityProfile, 450, nullptr, op);
}
}
#endif
// Call for any operation needing GLSL 64-bit integer data-type support. // Call for any operation needing GLSL 64-bit integer data-type support.
void TParseVersions::int64Check(const TSourceLoc& loc, const char* op, bool builtIn) void TParseVersions::int64Check(const TSourceLoc& loc, const char* op, bool builtIn)
{ {
......
...@@ -136,10 +136,11 @@ const char* const E_GL_GOOGLE_cpp_style_line_directive = "GL_GOOGLE_cpp ...@@ -136,10 +136,11 @@ const char* const E_GL_GOOGLE_cpp_style_line_directive = "GL_GOOGLE_cpp
const char* const E_GL_GOOGLE_include_directive = "GL_GOOGLE_include_directive"; const char* const E_GL_GOOGLE_include_directive = "GL_GOOGLE_include_directive";
#ifdef AMD_EXTENSIONS #ifdef AMD_EXTENSIONS
const char* const E_GL_AMD_shader_ballot = "GL_AMD_shader_ballot"; const char* const E_GL_AMD_shader_ballot = "GL_AMD_shader_ballot";
const char* const E_GL_AMD_shader_trinary_minmax = "GL_AMD_shader_trinary_minmax"; const char* const E_GL_AMD_shader_trinary_minmax = "GL_AMD_shader_trinary_minmax";
const char* const E_GL_AMD_shader_explicit_vertex_parameter = "GL_AMD_shader_explicit_vertex_parameter"; const char* const E_GL_AMD_shader_explicit_vertex_parameter = "GL_AMD_shader_explicit_vertex_parameter";
const char* const E_GL_AMD_gcn_shader = "GL_AMD_gcn_shader"; const char* const E_GL_AMD_gcn_shader = "GL_AMD_gcn_shader";
const char* const E_GL_AMD_gpu_shader_half_float = "GL_AMD_gpu_shader_half_float";
#endif #endif
// AEP // AEP
......
...@@ -76,6 +76,24 @@ ...@@ -76,6 +76,24 @@
#define GL_DOUBLE_MAT4x2 0x8F4D #define GL_DOUBLE_MAT4x2 0x8F4D
#define GL_DOUBLE_MAT4x3 0x8F4E #define GL_DOUBLE_MAT4x3 0x8F4E
#ifdef AMD_EXTENSIONS
// Those constants are borrowed from extension NV_gpu_shader5
#define GL_FLOAT16_NV 0x8FF8
#define GL_FLOAT16_VEC2_NV 0x8FF9
#define GL_FLOAT16_VEC3_NV 0x8FFA
#define GL_FLOAT16_VEC4_NV 0x8FFB
#define GL_FLOAT16_MAT2_AMD 0x91C5
#define GL_FLOAT16_MAT3_AMD 0x91C6
#define GL_FLOAT16_MAT4_AMD 0x91C7
#define GL_FLOAT16_MAT2x3_AMD 0x91C8
#define GL_FLOAT16_MAT2x4_AMD 0x91C9
#define GL_FLOAT16_MAT3x2_AMD 0x91CA
#define GL_FLOAT16_MAT3x4_AMD 0x91CB
#define GL_FLOAT16_MAT4x2_AMD 0x91CC
#define GL_FLOAT16_MAT4x3_AMD 0x91CD
#endif
#define GL_SAMPLER_1D 0x8B5D #define GL_SAMPLER_1D 0x8B5D
#define GL_SAMPLER_2D 0x8B5E #define GL_SAMPLER_2D 0x8B5E
#define GL_SAMPLER_3D 0x8B5F #define GL_SAMPLER_3D 0x8B5F
......
...@@ -119,13 +119,14 @@ extern int yylex(YYSTYPE*, TParseContext&); ...@@ -119,13 +119,14 @@ extern int yylex(YYSTYPE*, TParseContext&);
%expect 1 // One shift reduce conflict because of if | else %expect 1 // One shift reduce conflict because of if | else
%token <lex> ATTRIBUTE VARYING %token <lex> ATTRIBUTE VARYING
%token <lex> CONST BOOL FLOAT DOUBLE INT UINT INT64_T UINT64_T %token <lex> CONST BOOL FLOAT DOUBLE INT UINT INT64_T UINT64_T FLOAT16_T
%token <lex> BREAK CONTINUE DO ELSE FOR IF DISCARD RETURN SWITCH CASE DEFAULT SUBROUTINE %token <lex> BREAK CONTINUE DO ELSE FOR IF DISCARD RETURN SWITCH CASE DEFAULT SUBROUTINE
%token <lex> BVEC2 BVEC3 BVEC4 IVEC2 IVEC3 IVEC4 I64VEC2 I64VEC3 I64VEC4 UVEC2 UVEC3 UVEC4 U64VEC2 U64VEC3 U64VEC4 VEC2 VEC3 VEC4 %token <lex> BVEC2 BVEC3 BVEC4 IVEC2 IVEC3 IVEC4 I64VEC2 I64VEC3 I64VEC4 UVEC2 UVEC3 UVEC4 U64VEC2 U64VEC3 U64VEC4 VEC2 VEC3 VEC4
%token <lex> MAT2 MAT3 MAT4 CENTROID IN OUT INOUT %token <lex> MAT2 MAT3 MAT4 CENTROID IN OUT INOUT
%token <lex> UNIFORM PATCH SAMPLE BUFFER SHARED %token <lex> UNIFORM PATCH SAMPLE BUFFER SHARED
%token <lex> COHERENT VOLATILE RESTRICT READONLY WRITEONLY %token <lex> COHERENT VOLATILE RESTRICT READONLY WRITEONLY
%token <lex> DVEC2 DVEC3 DVEC4 DMAT2 DMAT3 DMAT4 %token <lex> DVEC2 DVEC3 DVEC4 DMAT2 DMAT3 DMAT4
%token <lex> F16VEC2 F16VEC3 F16VEC4 F16MAT2 F16MAT3 F16MAT4
%token <lex> NOPERSPECTIVE FLAT SMOOTH LAYOUT __EXPLICITINTERPAMD %token <lex> NOPERSPECTIVE FLAT SMOOTH LAYOUT __EXPLICITINTERPAMD
%token <lex> MAT2X2 MAT2X3 MAT2X4 %token <lex> MAT2X2 MAT2X3 MAT2X4
...@@ -134,6 +135,9 @@ extern int yylex(YYSTYPE*, TParseContext&); ...@@ -134,6 +135,9 @@ extern int yylex(YYSTYPE*, TParseContext&);
%token <lex> DMAT2X2 DMAT2X3 DMAT2X4 %token <lex> DMAT2X2 DMAT2X3 DMAT2X4
%token <lex> DMAT3X2 DMAT3X3 DMAT3X4 %token <lex> DMAT3X2 DMAT3X3 DMAT3X4
%token <lex> DMAT4X2 DMAT4X3 DMAT4X4 %token <lex> DMAT4X2 DMAT4X3 DMAT4X4
%token <lex> F16MAT2X2 F16MAT2X3 F16MAT2X4
%token <lex> F16MAT3X2 F16MAT3X3 F16MAT3X4
%token <lex> F16MAT4X2 F16MAT4X3 F16MAT4X4
%token <lex> ATOMIC_UINT %token <lex> ATOMIC_UINT
// combined image/sampler // combined image/sampler
...@@ -182,7 +186,7 @@ extern int yylex(YYSTYPE*, TParseContext&); ...@@ -182,7 +186,7 @@ extern int yylex(YYSTYPE*, TParseContext&);
%token <lex> STRUCT VOID WHILE %token <lex> STRUCT VOID WHILE
%token <lex> IDENTIFIER TYPE_NAME %token <lex> IDENTIFIER TYPE_NAME
%token <lex> FLOATCONSTANT DOUBLECONSTANT INTCONSTANT UINTCONSTANT INT64CONSTANT UINT64CONSTANT BOOLCONSTANT %token <lex> FLOATCONSTANT DOUBLECONSTANT INTCONSTANT UINTCONSTANT INT64CONSTANT UINT64CONSTANT BOOLCONSTANT FLOAT16CONSTANT
%token <lex> LEFT_OP RIGHT_OP %token <lex> LEFT_OP RIGHT_OP
%token <lex> INC_OP DEC_OP LE_OP GE_OP EQ_OP NE_OP %token <lex> INC_OP DEC_OP LE_OP GE_OP EQ_OP NE_OP
%token <lex> AND_OP OR_OP XOR_OP MUL_ASSIGN DIV_ASSIGN ADD_ASSIGN %token <lex> AND_OP OR_OP XOR_OP MUL_ASSIGN DIV_ASSIGN ADD_ASSIGN
...@@ -274,6 +278,12 @@ primary_expression ...@@ -274,6 +278,12 @@ primary_expression
parseContext.doubleCheck($1.loc, "double literal"); parseContext.doubleCheck($1.loc, "double literal");
$$ = parseContext.intermediate.addConstantUnion($1.d, EbtDouble, $1.loc, true); $$ = parseContext.intermediate.addConstantUnion($1.d, EbtDouble, $1.loc, true);
} }
| FLOAT16CONSTANT {
#ifdef AMD_EXTENSIONS
parseContext.float16Check($1.loc, "half float literal");
$$ = parseContext.intermediate.addConstantUnion($1.d, EbtFloat16, $1.loc, true);
#endif
}
| BOOLCONSTANT { | BOOLCONSTANT {
$$ = parseContext.intermediate.addConstantUnion($1.b, $1.loc, true); $$ = parseContext.intermediate.addConstantUnion($1.b, $1.loc, true);
} }
...@@ -1324,6 +1334,13 @@ type_specifier_nonarray ...@@ -1324,6 +1334,13 @@ type_specifier_nonarray
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtDouble; $$.basicType = EbtDouble;
} }
| FLOAT16_T {
#ifdef AMD_EXTENSIONS
parseContext.float16Check($1.loc, "half float", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat16;
#endif
}
| INT { | INT {
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtInt; $$.basicType = EbtInt;
...@@ -1380,6 +1397,30 @@ type_specifier_nonarray ...@@ -1380,6 +1397,30 @@ type_specifier_nonarray
$$.basicType = EbtDouble; $$.basicType = EbtDouble;
$$.setVector(4); $$.setVector(4);
} }
| F16VEC2 {
#ifdef AMD_EXTENSIONS
parseContext.float16Check($1.loc, "half float vector", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat16;
$$.setVector(2);
#endif
}
| F16VEC3 {
#ifdef AMD_EXTENSIONS
parseContext.float16Check($1.loc, "half float vector", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat16;
$$.setVector(3);
#endif
}
| F16VEC4 {
#ifdef AMD_EXTENSIONS
parseContext.float16Check($1.loc, "half float vector", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat16;
$$.setVector(4);
#endif
}
| BVEC2 { | BVEC2 {
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtBool; $$.basicType = EbtBool;
...@@ -1596,6 +1637,102 @@ type_specifier_nonarray ...@@ -1596,6 +1637,102 @@ type_specifier_nonarray
$$.basicType = EbtDouble; $$.basicType = EbtDouble;
$$.setMatrix(4, 4); $$.setMatrix(4, 4);
} }
| F16MAT2 {
#ifdef AMD_EXTENSIONS
parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat16;
$$.setMatrix(2, 2);
#endif
}
| F16MAT3 {
#ifdef AMD_EXTENSIONS
parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat16;
$$.setMatrix(3, 3);
#endif
}
| F16MAT4 {
#ifdef AMD_EXTENSIONS
parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat16;
$$.setMatrix(4, 4);
#endif
}
| F16MAT2X2 {
#ifdef AMD_EXTENSIONS
parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat16;
$$.setMatrix(2, 2);
#endif
}
| F16MAT2X3 {
#ifdef AMD_EXTENSIONS
parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat16;
$$.setMatrix(2, 3);
#endif
}
| F16MAT2X4 {
#ifdef AMD_EXTENSIONS
parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat16;
$$.setMatrix(2, 4);
#endif
}
| F16MAT3X2 {
#ifdef AMD_EXTENSIONS
parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat16;
$$.setMatrix(3, 2);
#endif
}
| F16MAT3X3 {
#ifdef AMD_EXTENSIONS
parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat16;
$$.setMatrix(3, 3);
#endif
}
| F16MAT3X4 {
#ifdef AMD_EXTENSIONS
parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat16;
$$.setMatrix(3, 4);
#endif
}
| F16MAT4X2 {
#ifdef AMD_EXTENSIONS
parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat16;
$$.setMatrix(4, 2);
#endif
}
| F16MAT4X3 {
#ifdef AMD_EXTENSIONS
parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat16;
$$.setMatrix(4, 3);
#endif
}
| F16MAT4X4 {
#ifdef AMD_EXTENSIONS
parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat16;
$$.setMatrix(4, 4);
#endif
}
| ATOMIC_UINT { | ATOMIC_UINT {
parseContext.vulkanRemoved($1.loc, "atomic counter types"); parseContext.vulkanRemoved($1.loc, "atomic counter types");
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); $$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
......
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -304,6 +304,11 @@ bool TOutputTraverser::visitUnary(TVisit /* visit */, TIntermUnary* node) ...@@ -304,6 +304,11 @@ bool TOutputTraverser::visitUnary(TVisit /* visit */, TIntermUnary* node)
case EOpPackUint2x32: out.debug << "packUint2x32"; break; case EOpPackUint2x32: out.debug << "packUint2x32"; break;
case EOpUnpackUint2x32: out.debug << "unpackUint2x32"; break; case EOpUnpackUint2x32: out.debug << "unpackUint2x32"; break;
#ifdef AMD_EXTENSIONS
case EOpPackFloat2x16: out.debug << "packFloat2x16"; break;
case EOpUnpackFloat2x16: out.debug << "unpackFloat2x16"; break;
#endif
case EOpLength: out.debug << "length"; break; case EOpLength: out.debug << "length"; break;
case EOpNormalize: out.debug << "normalize"; break; case EOpNormalize: out.debug << "normalize"; break;
case EOpDPdx: out.debug << "dPdx"; break; case EOpDPdx: out.debug << "dPdx"; break;
...@@ -373,6 +378,21 @@ bool TOutputTraverser::visitUnary(TVisit /* visit */, TIntermUnary* node) ...@@ -373,6 +378,21 @@ bool TOutputTraverser::visitUnary(TVisit /* visit */, TIntermUnary* node)
case EOpCubeFaceIndex: out.debug << "cubeFaceIndex"; break; case EOpCubeFaceIndex: out.debug << "cubeFaceIndex"; break;
case EOpCubeFaceCoord: out.debug << "cubeFaceCoord"; break; case EOpCubeFaceCoord: out.debug << "cubeFaceCoord"; break;
case EOpConvBoolToFloat16: out.debug << "Convert bool to float16"; break;
case EOpConvIntToFloat16: out.debug << "Convert int to float16"; break;
case EOpConvUintToFloat16: out.debug << "Convert uint to float16"; break;
case EOpConvFloatToFloat16: out.debug << "Convert float to float16"; break;
case EOpConvDoubleToFloat16: out.debug << "Convert double to float16"; break;
case EOpConvInt64ToFloat16: out.debug << "Convert int64 to float16"; break;
case EOpConvUint64ToFloat16: out.debug << "Convert uint64 to float16"; break;
case EOpConvFloat16ToBool: out.debug << "Convert float16 to bool"; break;
case EOpConvFloat16ToInt: out.debug << "Convert float16 to int"; break;
case EOpConvFloat16ToUint: out.debug << "Convert float16 to uint"; break;
case EOpConvFloat16ToFloat: out.debug << "Convert float16 to float"; break;
case EOpConvFloat16ToDouble: out.debug << "Convert float16 to double"; break;
case EOpConvFloat16ToInt64: out.debug << "Convert float16 to int64"; break;
case EOpConvFloat16ToUint64: out.debug << "Convert float16 to uint64"; break;
#endif #endif
default: out.debug.message(EPrefixError, "Bad unary op"); default: out.debug.message(EPrefixError, "Bad unary op");
...@@ -447,6 +467,21 @@ bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node ...@@ -447,6 +467,21 @@ bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node
case EOpConstructDMat4x2: out.debug << "Construct dmat4x2"; break; case EOpConstructDMat4x2: out.debug << "Construct dmat4x2"; break;
case EOpConstructDMat4x3: out.debug << "Construct dmat4x3"; break; case EOpConstructDMat4x3: out.debug << "Construct dmat4x3"; break;
case EOpConstructDMat4x4: out.debug << "Construct dmat4"; break; case EOpConstructDMat4x4: out.debug << "Construct dmat4"; break;
#ifdef AMD_EXTENSIONS
case EOpConstructFloat16: out.debug << "Construct float16_t"; break;
case EOpConstructF16Vec2: out.debug << "Construct f16vec2"; break;
case EOpConstructF16Vec3: out.debug << "Construct f16vec3"; break;
case EOpConstructF16Vec4: out.debug << "Construct f16vec4"; break;
case EOpConstructF16Mat2x2: out.debug << "Construct f16mat2"; break;
case EOpConstructF16Mat2x3: out.debug << "Construct f16mat2x3"; break;
case EOpConstructF16Mat2x4: out.debug << "Construct f16mat2x4"; break;
case EOpConstructF16Mat3x2: out.debug << "Construct f16mat3x2"; break;
case EOpConstructF16Mat3x3: out.debug << "Construct f16mat3"; break;
case EOpConstructF16Mat3x4: out.debug << "Construct f16mat3x4"; break;
case EOpConstructF16Mat4x2: out.debug << "Construct f16mat4x2"; break;
case EOpConstructF16Mat4x3: out.debug << "Construct f16mat4x3"; break;
case EOpConstructF16Mat4x4: out.debug << "Construct f16mat4"; break;
#endif
case EOpConstructStruct: out.debug << "Construct structure"; break; case EOpConstructStruct: out.debug << "Construct structure"; break;
case EOpConstructTextureSampler: out.debug << "Construct combined texture-sampler"; break; case EOpConstructTextureSampler: out.debug << "Construct combined texture-sampler"; break;
...@@ -636,6 +671,9 @@ static void OutputConstantUnion(TInfoSink& out, const TIntermTyped* node, const ...@@ -636,6 +671,9 @@ static void OutputConstantUnion(TInfoSink& out, const TIntermTyped* node, const
break; break;
case EbtFloat: case EbtFloat:
case EbtDouble: case EbtDouble:
#ifdef AMD_EXTENSIONS
case EbtFloat16:
#endif
{ {
const double value = constUnion[i].getDConst(); const double value = constUnion[i].getDConst();
// Print infinity in a portable way, for test stability. // Print infinity in a portable way, for test stability.
......
...@@ -950,6 +950,9 @@ int TIntermediate::getBaseAlignmentScalar(const TType& type, int& size) ...@@ -950,6 +950,9 @@ int TIntermediate::getBaseAlignmentScalar(const TType& type, int& size)
case EbtInt64: case EbtInt64:
case EbtUint64: case EbtUint64:
case EbtDouble: size = 8; return 8; case EbtDouble: size = 8; return 8;
#ifdef AMD_EXTENSIONS
case EbtFloat16: size = 2; return 2;
#endif
default: size = 4; return 4; default: size = 4; return 4;
} }
} }
......
...@@ -76,6 +76,9 @@ public: ...@@ -76,6 +76,9 @@ public:
virtual void updateExtensionBehavior(int line, const char* const extension, const char* behavior); virtual void updateExtensionBehavior(int line, const char* const extension, const char* behavior);
virtual void fullIntegerCheck(const TSourceLoc&, const char* op); virtual void fullIntegerCheck(const TSourceLoc&, const char* op);
virtual void doubleCheck(const TSourceLoc&, const char* op); virtual void doubleCheck(const TSourceLoc&, const char* op);
#ifdef AMD_EXTENSIONS
virtual void float16Check(const TSourceLoc&, const char* op, bool builtIn = false);
#endif
virtual void int64Check(const TSourceLoc&, const char* op, bool builtIn = false); virtual void int64Check(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void spvRemoved(const TSourceLoc&, const char* op); virtual void spvRemoved(const TSourceLoc&, const char* op);
virtual void vulkanRemoved(const TSourceLoc&, const char* op); virtual void vulkanRemoved(const TSourceLoc&, const char* op);
......
...@@ -705,6 +705,9 @@ int TPpContext::CPPerror(TPpToken* ppToken) ...@@ -705,6 +705,9 @@ int TPpContext::CPPerror(TPpToken* ppToken)
while (token != '\n' && token != EndOfInput) { while (token != '\n' && token != EndOfInput) {
if (token == PpAtomConstInt || token == PpAtomConstUint || if (token == PpAtomConstInt || token == PpAtomConstUint ||
token == PpAtomConstInt64 || token == PpAtomConstUint64 || token == PpAtomConstInt64 || token == PpAtomConstUint64 ||
#ifdef AMD_EXTENSIONS
token == PpAtomConstFloat16 ||
#endif
token == PpAtomConstFloat || token == PpAtomConstDouble) { token == PpAtomConstFloat || token == PpAtomConstDouble) {
message.append(ppToken->name); message.append(ppToken->name);
} else if (token == PpAtomIdentifier || token == PpAtomConstString) { } else if (token == PpAtomIdentifier || token == PpAtomConstString) {
...@@ -739,6 +742,9 @@ int TPpContext::CPPpragma(TPpToken* ppToken) ...@@ -739,6 +742,9 @@ int TPpContext::CPPpragma(TPpToken* ppToken)
case PpAtomConstUint64: case PpAtomConstUint64:
case PpAtomConstFloat: case PpAtomConstFloat:
case PpAtomConstDouble: case PpAtomConstDouble:
#ifdef AMD_EXTENSIONS
case PpAtomConstFloat16:
#endif
tokens.push_back(ppToken->name); tokens.push_back(ppToken->name);
break; break;
default: default:
......
...@@ -117,6 +117,10 @@ int TPpContext::lFloatConst(int len, int ch, TPpToken* ppToken) ...@@ -117,6 +117,10 @@ int TPpContext::lFloatConst(int len, int ch, TPpToken* ppToken)
int declen; int declen;
int str_len; int str_len;
int isDouble = 0; int isDouble = 0;
#ifdef AMD_EXTENSIONS
int isFloat16 = 0;
bool enableFloat16 = parseContext.version >= 450 && parseContext.extensionTurnedOn(E_GL_AMD_gpu_shader_half_float);
#endif
declen = 0; declen = 0;
...@@ -200,6 +204,28 @@ int TPpContext::lFloatConst(int len, int ch, TPpToken* ppToken) ...@@ -200,6 +204,28 @@ int TPpContext::lFloatConst(int len, int ch, TPpToken* ppToken)
len = 1,str_len=1; len = 1,str_len=1;
} }
} }
#ifdef AMD_EXTENSIONS
} else if (enableFloat16 && (ch == 'h' || ch == 'H')) {
parseContext.float16Check(ppToken->loc, "half floating-point suffix");
if (!HasDecimalOrExponent)
parseContext.ppError(ppToken->loc, "float literal needs a decimal point or exponent", "", "");
int ch2 = getChar();
if (ch2 != 'f' && ch2 != 'F') {
ungetChar();
ungetChar();
}
else {
if (len < MaxTokenLength) {
str[len++] = (char)ch;
str[len++] = (char)ch2;
isFloat16 = 1;
}
else {
parseContext.ppError(ppToken->loc, "float literal too long", "", "");
len = 1, str_len = 1;
}
}
#endif
} else if (ch == 'f' || ch == 'F') { } else if (ch == 'f' || ch == 'F') {
parseContext.profileRequires(ppToken->loc, EEsProfile, 300, nullptr, "floating-point suffix"); parseContext.profileRequires(ppToken->loc, EEsProfile, 300, nullptr, "floating-point suffix");
if (! parseContext.relaxedErrors()) if (! parseContext.relaxedErrors())
...@@ -222,6 +248,10 @@ int TPpContext::lFloatConst(int len, int ch, TPpToken* ppToken) ...@@ -222,6 +248,10 @@ int TPpContext::lFloatConst(int len, int ch, TPpToken* ppToken)
if (isDouble) if (isDouble)
return PpAtomConstDouble; return PpAtomConstDouble;
#ifdef AMD_EXTENSIONS
else if (isFloat16)
return PpAtomConstFloat16;
#endif
else else
return PpAtomConstFloat; return PpAtomConstFloat;
} }
...@@ -744,6 +774,9 @@ const char* TPpContext::tokenize(TPpToken* ppToken) ...@@ -744,6 +774,9 @@ const char* TPpContext::tokenize(TPpToken* ppToken)
case PpAtomConstInt64: case PpAtomConstInt64:
case PpAtomConstUint64: case PpAtomConstUint64:
case PpAtomConstDouble: case PpAtomConstDouble:
#ifdef AMD_EXTENSIONS
case PpAtomConstFloat16:
#endif
tokenString = ppToken->name; tokenString = ppToken->name;
break; break;
case PpAtomConstString: case PpAtomConstString:
......
...@@ -144,6 +144,9 @@ void TPpContext::RecordToken(TokenStream *pTok, int token, TPpToken* ppToken) ...@@ -144,6 +144,9 @@ void TPpContext::RecordToken(TokenStream *pTok, int token, TPpToken* ppToken)
case PpAtomConstUint64: case PpAtomConstUint64:
case PpAtomConstFloat: case PpAtomConstFloat:
case PpAtomConstDouble: case PpAtomConstDouble:
#ifdef AMD_EXTENSIONS
case PpAtomConstFloat16:
#endif
str = ppToken->name; str = ppToken->name;
while (*str) { while (*str) {
lAddByte(pTok, (unsigned char) *str); lAddByte(pTok, (unsigned char) *str);
...@@ -195,6 +198,9 @@ int TPpContext::ReadToken(TokenStream *pTok, TPpToken *ppToken) ...@@ -195,6 +198,9 @@ int TPpContext::ReadToken(TokenStream *pTok, TPpToken *ppToken)
case PpAtomIdentifier: case PpAtomIdentifier:
case PpAtomConstFloat: case PpAtomConstFloat:
case PpAtomConstDouble: case PpAtomConstDouble:
#ifdef AMD_EXTENSIONS
case PpAtomConstFloat16:
#endif
case PpAtomConstInt: case PpAtomConstInt:
case PpAtomConstUint: case PpAtomConstUint:
case PpAtomConstInt64: case PpAtomConstInt64:
...@@ -221,6 +227,9 @@ int TPpContext::ReadToken(TokenStream *pTok, TPpToken *ppToken) ...@@ -221,6 +227,9 @@ int TPpContext::ReadToken(TokenStream *pTok, TPpToken *ppToken)
break; break;
case PpAtomConstFloat: case PpAtomConstFloat:
case PpAtomConstDouble: case PpAtomConstDouble:
#ifdef AMD_EXTENSIONS
case PpAtomConstFloat16:
#endif
ppToken->dval = atof(ppToken->name); ppToken->dval = atof(ppToken->name);
break; break;
case PpAtomConstInt: case PpAtomConstInt:
......
...@@ -123,6 +123,9 @@ enum EFixedAtoms { ...@@ -123,6 +123,9 @@ enum EFixedAtoms {
PpAtomConstUint64, PpAtomConstUint64,
PpAtomConstFloat, PpAtomConstFloat,
PpAtomConstDouble, PpAtomConstDouble,
#ifdef AMD_EXTENSIONS
PpAtomConstFloat16,
#endif
PpAtomConstString, PpAtomConstString,
// Identifiers // Identifiers
......
...@@ -529,6 +529,9 @@ public: ...@@ -529,6 +529,9 @@ public:
switch (type.getBasicType()) { switch (type.getBasicType()) {
case EbtFloat: return GL_FLOAT_VEC2 + offset; case EbtFloat: return GL_FLOAT_VEC2 + offset;
case EbtDouble: return GL_DOUBLE_VEC2 + offset; case EbtDouble: return GL_DOUBLE_VEC2 + offset;
#ifdef AMD_EXTENSIONS
case EbtFloat16: return GL_FLOAT16_VEC2_NV + offset;
#endif
case EbtInt: return GL_INT_VEC2 + offset; case EbtInt: return GL_INT_VEC2 + offset;
case EbtUint: return GL_UNSIGNED_INT_VEC2 + offset; case EbtUint: return GL_UNSIGNED_INT_VEC2 + offset;
case EbtInt64: return GL_INT64_ARB + offset; case EbtInt64: return GL_INT64_ARB + offset;
...@@ -588,6 +591,32 @@ public: ...@@ -588,6 +591,32 @@ public:
default: return 0; default: return 0;
} }
} }
#ifdef AMD_EXTENSIONS
case EbtFloat16:
switch (type.getMatrixCols()) {
case 2:
switch (type.getMatrixRows()) {
case 2: return GL_FLOAT16_MAT2_AMD;
case 3: return GL_FLOAT16_MAT2x3_AMD;
case 4: return GL_FLOAT16_MAT2x4_AMD;
default: return 0;
}
case 3:
switch (type.getMatrixRows()) {
case 2: return GL_FLOAT16_MAT3x2_AMD;
case 3: return GL_FLOAT16_MAT3_AMD;
case 4: return GL_FLOAT16_MAT3x4_AMD;
default: return 0;
}
case 4:
switch (type.getMatrixRows()) {
case 2: return GL_FLOAT16_MAT4x2_AMD;
case 3: return GL_FLOAT16_MAT4x3_AMD;
case 4: return GL_FLOAT16_MAT4_AMD;
default: return 0;
}
}
#endif
default: default:
return 0; return 0;
} }
...@@ -596,6 +625,9 @@ public: ...@@ -596,6 +625,9 @@ public:
switch (type.getBasicType()) { switch (type.getBasicType()) {
case EbtFloat: return GL_FLOAT; case EbtFloat: return GL_FLOAT;
case EbtDouble: return GL_DOUBLE; case EbtDouble: return GL_DOUBLE;
#ifdef AMD_EXTENSIONS
case EbtFloat16: return GL_FLOAT16_NV;
#endif
case EbtInt: return GL_INT; case EbtInt: return GL_INT;
case EbtUint: return GL_UNSIGNED_INT; case EbtUint: return GL_UNSIGNED_INT;
case EbtInt64: return GL_INT64_ARB; case EbtInt64: return GL_INT64_ARB;
......
...@@ -14,6 +14,7 @@ if (TARGET gmock) ...@@ -14,6 +14,7 @@ if (TARGET gmock)
${CMAKE_CURRENT_SOURCE_DIR}/AST.FromFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/AST.FromFile.cpp
${CMAKE_CURRENT_SOURCE_DIR}/BuiltInResource.FromFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/BuiltInResource.FromFile.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Config.FromFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/Config.FromFile.cpp
${CMAKE_CURRENT_SOURCE_DIR}/HexFloat.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Hlsl.FromFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/Hlsl.FromFile.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Link.FromFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/Link.FromFile.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Pp.FromFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/Pp.FromFile.cpp
......
...@@ -67,6 +67,9 @@ using OpenGLSemantics = GlslangTest<::testing::TestWithParam<std::string>>; ...@@ -67,6 +67,9 @@ using OpenGLSemantics = GlslangTest<::testing::TestWithParam<std::string>>;
using VulkanAstSemantics = GlslangTest<::testing::TestWithParam<std::string>>; using VulkanAstSemantics = GlslangTest<::testing::TestWithParam<std::string>>;
using HlslIoMap = GlslangTest<::testing::TestWithParam<IoMapData>>; using HlslIoMap = GlslangTest<::testing::TestWithParam<IoMapData>>;
using GlslIoMap = GlslangTest<::testing::TestWithParam<IoMapData>>; using GlslIoMap = GlslangTest<::testing::TestWithParam<IoMapData>>;
#ifdef AMD_EXTENSIONS
using CompileVulkanToSpirvTestAMD = GlslangTest<::testing::TestWithParam<std::string>>;
#endif
// Compiling GLSL to SPIR-V under Vulkan semantics. Expected to successfully // Compiling GLSL to SPIR-V under Vulkan semantics. Expected to successfully
// generate SPIR-V. // generate SPIR-V.
...@@ -138,6 +141,17 @@ TEST_P(GlslIoMap, FromFile) ...@@ -138,6 +141,17 @@ TEST_P(GlslIoMap, FromFile)
GetParam().flattenUniforms); GetParam().flattenUniforms);
} }
#ifdef AMD_EXTENSIONS
// Compiling GLSL to SPIR-V under Vulkan semantics (AMD extensions enabled).
// Expected to successfully generate SPIR-V.
TEST_P(CompileVulkanToSpirvTestAMD, FromFile)
{
loadFileCompileAndCheck(GLSLANG_TEST_DIRECTORY, GetParam(),
Source::GLSL, Semantics::Vulkan,
Target::Spv);
}
#endif
// clang-format off // clang-format off
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(
Glsl, CompileVulkanToSpirvTest, Glsl, CompileVulkanToSpirvTest,
...@@ -324,6 +338,16 @@ INSTANTIATE_TEST_CASE_P( ...@@ -324,6 +338,16 @@ INSTANTIATE_TEST_CASE_P(
})), })),
FileNameAsCustomTestSuffix FileNameAsCustomTestSuffix
); );
#ifdef AMD_EXTENSIONS
INSTANTIATE_TEST_CASE_P(
Glsl, CompileVulkanToSpirvTestAMD,
::testing::ValuesIn(std::vector<std::string>({
"spv.float16.frag",
})),
FileNameAsCustomTestSuffix
);
#endif
// clang-format on // clang-format on
} // anonymous namespace } // anonymous namespace
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment