Commit 08ca3c6e by Nicolas Capens

Eliminate compiling for the WebGL spec.

Bug 19331817 Change-Id: I85cbfd61a267e39832b951121422a676f5af4a54 Reviewed-on: https://swiftshader-review.googlesource.com/2289Reviewed-by: 's avatarNicolas Capens <capn@google.com> Tested-by: 's avatarNicolas Capens <capn@google.com>
parent b28964b6
......@@ -56,9 +56,8 @@ ShBuiltInResources::ShBuiltInResources()
MaxCallStackDepth = UINT_MAX;
}
TCompiler::TCompiler(ShShaderType type, ShShaderSpec spec)
TCompiler::TCompiler(GLenum type)
: shaderType(type),
shaderSpec(spec),
maxCallStackDepth(UINT_MAX)
{
allocator.push();
......@@ -95,10 +94,6 @@ bool TCompiler::compile(const char* const shaderStrings[],
if (numStrings == 0)
return true;
// If compiling for WebGL, validate loop and indexing as well.
if (shaderSpec == SH_WEBGL_SPEC)
compileOptions |= SH_VALIDATE_LOOP_INDEXING;
// First string is path of source file if flag is set. The actual source follows.
const char* sourcePath = NULL;
int firstSource = 0;
......@@ -110,7 +105,7 @@ bool TCompiler::compile(const char* const shaderStrings[],
TIntermediate intermediate(infoSink);
TParseContext parseContext(symbolTable, extensionBehavior, intermediate,
shaderType, shaderSpec, compileOptions, true,
shaderType, compileOptions, true,
sourcePath, infoSink);
SetGlobalParseContext(&parseContext);
......@@ -177,10 +172,10 @@ bool TCompiler::InitBuiltInSymbolTable(const ShBuiltInResources &resources)
switch(shaderType)
{
case SH_FRAGMENT_SHADER:
case GL_FRAGMENT_SHADER:
symbolTable.setDefaultPrecision(integer, EbpMedium);
break;
case SH_VERTEX_SHADER:
case GL_VERTEX_SHADER:
symbolTable.setDefaultPrecision(integer, EbpHigh);
symbolTable.setDefaultPrecision(floatingPoint, EbpHigh);
break;
......@@ -189,7 +184,7 @@ bool TCompiler::InitBuiltInSymbolTable(const ShBuiltInResources &resources)
InsertBuiltInFunctions(shaderType, resources, symbolTable);
IdentifyBuiltIns(shaderType, shaderSpec, resources, symbolTable);
IdentifyBuiltIns(shaderType, resources, symbolTable);
return true;
}
......
......@@ -11,32 +11,6 @@
#include "InfoSink.h"
#include "SymbolTable.h"
//
// The names of the following enums have been derived by replacing GL prefix
// with SH. For example, SH_INFO_LOG_LENGTH is equivalent to GL_INFO_LOG_LENGTH.
// The enum values are also equal to the values of their GL counterpart. This
// is done to make it easier for applications to use the shader library.
//
enum ShShaderType
{
SH_FRAGMENT_SHADER = 0x8B30,
SH_VERTEX_SHADER = 0x8B31
};
enum ShShaderSpec
{
SH_GLES2_SPEC = 0x8B40,
SH_WEBGL_SPEC = 0x8B41
};
enum ShShaderInfo
{
SH_INFO_LOG_LENGTH = 0x8B84,
SH_OBJECT_CODE_LENGTH = 0x8B88, // GL_SHADER_SOURCE_LENGTH
SH_ACTIVE_UNIFORM_MAX_LENGTH = 0x8B87,
SH_ACTIVE_ATTRIBUTE_MAX_LENGTH = 0x8B8A
};
enum ShCompileOptions
{
SH_VALIDATE = 0,
......@@ -75,6 +49,10 @@ struct ShBuiltInResources
unsigned int MaxCallStackDepth;
};
typedef unsigned int GLenum;
#define GL_FRAGMENT_SHADER 0x8B30
#define GL_VERTEX_SHADER 0x8B31
//
// The base class for the machine dependent compiler to derive from
// for managing object code from the compile.
......@@ -82,7 +60,7 @@ struct ShBuiltInResources
class TCompiler
{
public:
TCompiler(ShShaderType type, ShShaderSpec spec);
TCompiler(GLenum shaderType);
virtual ~TCompiler();
virtual TCompiler* getAsCompiler() { return this; }
......@@ -96,8 +74,7 @@ public:
TInfoSink& getInfoSink() { return infoSink; }
protected:
ShShaderType getShaderType() const { return shaderType; }
ShShaderSpec getShaderSpec() const { return shaderSpec; }
GLenum getShaderType() const { return shaderType; }
// Initialize symbol-table with built-in symbols.
bool InitBuiltInSymbolTable(const ShBuiltInResources& resources);
// Clears the results from the previous compilation.
......@@ -113,8 +90,7 @@ protected:
const TExtensionBehavior& getExtensionBehavior() const;
private:
ShShaderType shaderType;
ShShaderSpec shaderSpec;
GLenum shaderType;
unsigned int maxCallStackDepth;
......
......@@ -14,7 +14,7 @@
#include "intermediate.h"
void InsertBuiltInFunctions(ShShaderType type, const ShBuiltInResources &resources, TSymbolTable &symbolTable)
void InsertBuiltInFunctions(GLenum type, const ShBuiltInResources &resources, TSymbolTable &symbolTable)
{
TType *float1 = new TType(EbtFloat, EbpUndefined, EvqGlobal, 1);
TType *float2 = new TType(EbtFloat, EbpUndefined, EvqGlobal, 2);
......@@ -263,7 +263,7 @@ void InsertBuiltInFunctions(ShShaderType type, const ShBuiltInResources &resourc
symbolTable.insertBuiltIn(float4, "textureCube", samplerCube, float3);
symbolTable.insertBuiltIn(float4, "texture3D", sampler3D, float3);
if(type == SH_FRAGMENT_SHADER)
if(type == GL_FRAGMENT_SHADER)
{
symbolTable.insertBuiltIn(float4, "texture2D", sampler2D, float2, float1);
symbolTable.insertBuiltIn(float4, "texture2DProj", sampler2D, float3, float1);
......@@ -290,7 +290,7 @@ void InsertBuiltInFunctions(ShShaderType type, const ShBuiltInResources &resourc
}
}
if(type == SH_VERTEX_SHADER)
if(type == GL_VERTEX_SHADER)
{
symbolTable.insertBuiltIn(float4, "texture2DLod", sampler2D, float2, float1);
symbolTable.insertBuiltIn(float4, "texture2DProjLod", sampler2D, float3, float1);
......@@ -335,7 +335,7 @@ void InsertBuiltInFunctions(ShShaderType type, const ShBuiltInResources &resourc
symbolTable.insertConstInt("gl_MaxDrawBuffers", resources.MaxDrawBuffers);
}
void IdentifyBuiltIns(ShShaderType type, ShShaderSpec spec,
void IdentifyBuiltIns(GLenum shaderType,
const ShBuiltInResources& resources,
TSymbolTable &symbolTable)
{
......@@ -343,15 +343,16 @@ void IdentifyBuiltIns(ShShaderType type, ShShaderSpec spec,
// First, insert some special built-in variables that are not in
// the built-in header files.
//
switch(type) {
case SH_FRAGMENT_SHADER:
switch(shaderType)
{
case GL_FRAGMENT_SHADER:
symbolTable.insert(*new TVariable(NewPoolTString("gl_FragCoord"), TType(EbtFloat, EbpMedium, EvqFragCoord, 4)));
symbolTable.insert(*new TVariable(NewPoolTString("gl_FrontFacing"), TType(EbtBool, EbpUndefined, EvqFrontFacing, 1)));
symbolTable.insert(*new TVariable(NewPoolTString("gl_FragColor"), TType(EbtFloat, EbpMedium, EvqFragColor, 4)));
symbolTable.insert(*new TVariable(NewPoolTString("gl_FragData[gl_MaxDrawBuffers]"), TType(EbtFloat, EbpMedium, EvqFragData, 4)));
symbolTable.insert(*new TVariable(NewPoolTString("gl_PointCoord"), TType(EbtFloat, EbpMedium, EvqPointCoord, 2)));
break;
case SH_VERTEX_SHADER:
case GL_VERTEX_SHADER:
symbolTable.insert(*new TVariable(NewPoolTString("gl_Position"), TType(EbtFloat, EbpHigh, EvqPosition, 4)));
symbolTable.insert(*new TVariable(NewPoolTString("gl_PointSize"), TType(EbtFloat, EbpMedium, EvqPointSize, 1)));
break;
......@@ -418,10 +419,11 @@ void IdentifyBuiltIns(ShShaderType type, ShShaderSpec spec,
symbolTable.relateToOperator("all", EOpAll);
// Map language-specific operators.
switch(type) {
case SH_VERTEX_SHADER:
switch(shaderType)
{
case GL_VERTEX_SHADER:
break;
case SH_FRAGMENT_SHADER:
case GL_FRAGMENT_SHADER:
if (resources.OES_standard_derivatives) {
symbolTable.relateToOperator("dFdx", EOpDFdx);
symbolTable.relateToOperator("dFdy", EOpDFdy);
......@@ -436,9 +438,9 @@ void IdentifyBuiltIns(ShShaderType type, ShShaderSpec spec,
}
// Finally add resource-specific variables.
switch(type)
switch(shaderType)
{
case SH_FRAGMENT_SHADER:
case GL_FRAGMENT_SHADER:
{
// Set up gl_FragData. The array size.
TType fragData(EbtFloat, EbpMedium, EvqFragData, 4, false, true);
......
......@@ -11,9 +11,9 @@
#include "Compiler.h"
#include "SymbolTable.h"
void InsertBuiltInFunctions(ShShaderType type, const ShBuiltInResources &resources, TSymbolTable &table);
void InsertBuiltInFunctions(GLenum shaderType, const ShBuiltInResources &resources, TSymbolTable &table);
void IdentifyBuiltIns(ShShaderType type, ShShaderSpec spec,
void IdentifyBuiltIns(GLenum shaderType,
const ShBuiltInResources& resources,
TSymbolTable& symbolTable);
......
......@@ -432,16 +432,6 @@ bool TParseContext::reservedErrorCheck(int line, const TString& identifier)
error(line, reservedErrMsg, "gl_");
return true;
}
if (shaderSpec == SH_WEBGL_SPEC) {
if (identifier.compare(0, 6, "webgl_") == 0) {
error(line, reservedErrMsg, "webgl_");
return true;
}
if (identifier.compare(0, 7, "_webgl_") == 0) {
error(line, reservedErrMsg, "_webgl_");
return true;
}
}
if (identifier.find("__") != TString::npos) {
error(line, "identifiers containing two consecutive underscores (__) are reserved as possible future keywords", identifier.c_str());
return true;
......@@ -1333,36 +1323,6 @@ void TParseContext::exitStructDeclaration()
--structNestingLevel;
}
namespace {
const int kWebGLMaxStructNesting = 4;
} // namespace
bool TParseContext::structNestingErrorCheck(TSourceLoc line, const TType& fieldType)
{
if (shaderSpec != SH_WEBGL_SPEC) {
return false;
}
if (fieldType.getBasicType() != EbtStruct) {
return false;
}
// We're already inside a structure definition at this point, so add
// one to the field's struct nesting.
if (1 + fieldType.getDeepestStructNesting() > kWebGLMaxStructNesting) {
std::stringstream extraInfoStream;
extraInfoStream << "Reference of struct type " << fieldType.getTypeName()
<< " exceeds maximum struct nesting of " << kWebGLMaxStructNesting;
std::string extraInfo = extraInfoStream.str();
error(line, "", "", extraInfo.c_str());
return true;
}
return false;
}
//
// Parse an array of strings using yyparse.
//
......
......@@ -25,11 +25,10 @@ struct TMatrixFields {
// they can be passed to the parser without needing a global.
//
struct TParseContext {
TParseContext(TSymbolTable& symt, TExtensionBehavior& ext, TIntermediate& interm, ShShaderType type, ShShaderSpec spec, int options, bool checksPrecErrors, const char* sourcePath, TInfoSink& is) :
TParseContext(TSymbolTable& symt, TExtensionBehavior& ext, TIntermediate& interm, GLenum type, int options, bool checksPrecErrors, const char* sourcePath, TInfoSink& is) :
intermediate(interm),
symbolTable(symt),
shaderType(type),
shaderSpec(spec),
compileOptions(options),
sourcePath(sourcePath),
treeRoot(0),
......@@ -46,8 +45,7 @@ struct TParseContext {
scanner(NULL) { }
TIntermediate& intermediate; // to hold and build a parse tree
TSymbolTable& symbolTable; // symbol table that goes with the language currently being parsed
ShShaderType shaderType; // vertex or fragment language (future: pack or unpack)
ShShaderSpec shaderSpec; // The language specification compiler conforms to - GLES2 or WebGL.
GLenum shaderType; // vertex or fragment language (future: pack or unpack)
int compileOptions;
const char* sourcePath; // Path of source file or NULL.
TIntermNode* treeRoot; // root of parse tree being created
......
......@@ -13,7 +13,7 @@
#include "InitializeParseContext.h"
TranslatorASM::TranslatorASM(glsl::Shader *shaderObject, ShShaderType type, ShShaderSpec spec) : TCompiler(type, spec), shaderObject(shaderObject)
TranslatorASM::TranslatorASM(glsl::Shader *shaderObject, GLenum shaderType) : TCompiler(shaderType), shaderObject(shaderObject)
{
}
......
......@@ -23,7 +23,7 @@ namespace glsl
class TranslatorASM : public TCompiler
{
public:
TranslatorASM(glsl::Shader *shaderObject, ShShaderType type, ShShaderSpec spec);
TranslatorASM(glsl::Shader *shaderObject, GLenum type);
protected:
virtual bool translate(TIntermNode* root);
......
......@@ -96,7 +96,7 @@ private:
};
} // namespace
ValidateLimitations::ValidateLimitations(ShShaderType shaderType,
ValidateLimitations::ValidateLimitations(GLenum shaderType,
TInfoSinkBase& sink)
: mShaderType(shaderType),
mSink(sink),
......@@ -501,7 +501,7 @@ bool ValidateLimitations::validateIndexing(TIntermBinary* node)
// The index expession must be a constant-index-expression unless
// the operand is a uniform in a vertex shader.
TIntermTyped* operand = node->getLeft();
bool skip = (mShaderType == SH_VERTEX_SHADER) &&
bool skip = (mShaderType == GL_VERTEX_SHADER) &&
(operand->getQualifier() == EvqUniform);
if (!skip && !isConstIndexExpr(index)) {
error(index->getLine(), "Index expression must be constant", "[]");
......
......@@ -21,7 +21,7 @@ typedef TVector<TLoopInfo> TLoopStack;
// minimum functionality mandated in GLSL 1.0 spec, Appendix A.
class ValidateLimitations : public TIntermTraverser {
public:
ValidateLimitations(ShShaderType shaderType, TInfoSinkBase& sink);
ValidateLimitations(GLenum shaderType, TInfoSinkBase& sink);
int numErrors() const { return mNumErrors; }
......@@ -51,7 +51,7 @@ private:
bool isConstIndexExpr(TIntermNode* node);
bool validateIndexing(TIntermBinary* node);
ShShaderType mShaderType;
GLenum mShaderType;
TInfoSinkBase& mSink;
int mNumErrors;
TLoopStack mLoopStack;
......
......@@ -84,22 +84,22 @@ extern int yylex(YYSTYPE* yylval_param, void* yyscanner);
extern void yyerror(TParseContext* context, const char* reason);
#define FRAG_VERT_ONLY(S, L) { \
if (context->shaderType != SH_FRAGMENT_SHADER && \
context->shaderType != SH_VERTEX_SHADER) { \
if (context->shaderType != GL_FRAGMENT_SHADER && \
context->shaderType != GL_VERTEX_SHADER) { \
context->error(L, " supported in vertex/fragment shaders only ", S); \
context->recover(); \
} \
}
#define VERTEX_ONLY(S, L) { \
if (context->shaderType != SH_VERTEX_SHADER) { \
if (context->shaderType != GL_VERTEX_SHADER) { \
context->error(L, " supported in vertex shaders only ", S); \
context->recover(); \
} \
}
#define FRAG_ONLY(S, L) { \
if (context->shaderType != SH_FRAGMENT_SHADER) { \
if (context->shaderType != GL_FRAGMENT_SHADER) { \
context->error(L, " supported in fragment shaders only ", S); \
context->recover(); \
} \
......@@ -1493,7 +1493,7 @@ type_qualifier
| VARYING {
if (context->globalErrorCheck($1.line, context->symbolTable.atGlobalLevel(), "varying"))
context->recover();
if (context->shaderType == SH_VERTEX_SHADER)
if (context->shaderType == GL_VERTEX_SHADER)
$$.setBasic(EbtVoid, EvqVaryingOut, $1.line);
else
$$.setBasic(EbtVoid, EvqVaryingIn, $1.line);
......@@ -1501,7 +1501,7 @@ type_qualifier
| INVARIANT VARYING {
if (context->globalErrorCheck($1.line, context->symbolTable.atGlobalLevel(), "invariant varying"))
context->recover();
if (context->shaderType == SH_VERTEX_SHADER)
if (context->shaderType == GL_VERTEX_SHADER)
$$.setBasic(EbtVoid, EvqInvariantVaryingOut, $1.line);
else
$$.setBasic(EbtVoid, EvqInvariantVaryingIn, $1.line);
......@@ -1755,10 +1755,6 @@ struct_declaration
type->setStruct($1.userDef->getStruct());
type->setTypeName($1.userDef->getTypeName());
}
if (context->structNestingErrorCheck($1.line, *type)) {
context->recover();
}
}
}
;
......
......@@ -272,22 +272,22 @@ extern int yylex(YYSTYPE* yylval_param, void* yyscanner);
extern void yyerror(TParseContext* context, const char* reason);
#define FRAG_VERT_ONLY(S, L) { \
if (context->shaderType != SH_FRAGMENT_SHADER && \
context->shaderType != SH_VERTEX_SHADER) { \
if (context->shaderType != GL_FRAGMENT_SHADER && \
context->shaderType != GL_VERTEX_SHADER) { \
context->error(L, " supported in vertex/fragment shaders only ", S); \
context->recover(); \
} \
}
#define VERTEX_ONLY(S, L) { \
if (context->shaderType != SH_VERTEX_SHADER) { \
if (context->shaderType != GL_VERTEX_SHADER) { \
context->error(L, " supported in vertex shaders only ", S); \
context->recover(); \
} \
}
#define FRAG_ONLY(S, L) { \
if (context->shaderType != SH_FRAGMENT_SHADER) { \
if (context->shaderType != GL_FRAGMENT_SHADER) { \
context->error(L, " supported in fragment shaders only ", S); \
context->recover(); \
} \
......@@ -674,13 +674,13 @@ static const yytype_uint16 yyrline[] =
1527, 1534, 1537, 1540, 1546, 1549, 1564, 1568, 1572, 1576,
1585, 1590, 1595, 1600, 1605, 1610, 1615, 1620, 1625, 1630,
1636, 1642, 1648, 1653, 1658, 1667, 1672, 1677, 1690, 1690,
1704, 1704, 1713, 1716, 1731, 1767, 1771, 1777, 1785, 1801,
1805, 1809, 1810, 1816, 1817, 1818, 1819, 1820, 1824, 1825,
1825, 1825, 1835, 1836, 1840, 1840, 1841, 1841, 1846, 1849,
1859, 1862, 1868, 1869, 1873, 1881, 1885, 1895, 1900, 1917,
1917, 1922, 1922, 1929, 1929, 1937, 1940, 1946, 1949, 1955,
1959, 1966, 1973, 1980, 1987, 1998, 2007, 2011, 2018, 2021,
2027, 2027
1704, 1704, 1713, 1716, 1731, 1763, 1767, 1773, 1781, 1797,
1801, 1805, 1806, 1812, 1813, 1814, 1815, 1816, 1820, 1821,
1821, 1821, 1831, 1832, 1836, 1836, 1837, 1837, 1842, 1845,
1855, 1858, 1864, 1865, 1869, 1877, 1881, 1891, 1896, 1913,
1913, 1918, 1918, 1925, 1925, 1933, 1936, 1942, 1945, 1951,
1955, 1962, 1969, 1976, 1983, 1994, 2003, 2007, 2014, 2017,
2023, 2023
};
#endif
......@@ -3597,7 +3597,7 @@ yyreduce:
{
if (context->globalErrorCheck((yyvsp[(1) - (1)].lex).line, context->symbolTable.atGlobalLevel(), "varying"))
context->recover();
if (context->shaderType == SH_VERTEX_SHADER)
if (context->shaderType == GL_VERTEX_SHADER)
(yyval.interm.type).setBasic(EbtVoid, EvqVaryingOut, (yyvsp[(1) - (1)].lex).line);
else
(yyval.interm.type).setBasic(EbtVoid, EvqVaryingIn, (yyvsp[(1) - (1)].lex).line);
......@@ -3609,7 +3609,7 @@ yyreduce:
{
if (context->globalErrorCheck((yyvsp[(1) - (2)].lex).line, context->symbolTable.atGlobalLevel(), "invariant varying"))
context->recover();
if (context->shaderType == SH_VERTEX_SHADER)
if (context->shaderType == GL_VERTEX_SHADER)
(yyval.interm.type).setBasic(EbtVoid, EvqInvariantVaryingOut, (yyvsp[(1) - (2)].lex).line);
else
(yyval.interm.type).setBasic(EbtVoid, EvqInvariantVaryingIn, (yyvsp[(1) - (2)].lex).line);
......@@ -3987,10 +3987,6 @@ yyreduce:
type->setStruct((yyvsp[(1) - (3)].interm.type).userDef->getStruct());
type->setTypeName((yyvsp[(1) - (3)].interm.type).userDef->getTypeName());
}
if (context->structNestingErrorCheck((yyvsp[(1) - (3)].interm.type).line, *type)) {
context->recover();
}
}
}
break;
......
......@@ -152,7 +152,7 @@ void Shader::getSource(GLsizei bufSize, GLsizei *length, char *source)
}
}
TranslatorASM *Shader::createCompiler(ShShaderType type)
TranslatorASM *Shader::createCompiler(GLenum shaderType)
{
if(!compilerInitialized)
{
......@@ -160,7 +160,7 @@ TranslatorASM *Shader::createCompiler(ShShaderType type)
compilerInitialized = true;
}
TranslatorASM *assembler = new TranslatorASM(this, type, SH_GLES2_SPEC);
TranslatorASM *assembler = new TranslatorASM(this, shaderType);
ShBuiltInResources resources;
resources.MaxVertexAttribs = MAX_VERTEX_ATTRIBS;
......@@ -370,7 +370,7 @@ void VertexShader::compile()
delete vertexShader;
vertexShader = new sw::VertexShader();
TranslatorASM *compiler = createCompiler(SH_VERTEX_SHADER);
TranslatorASM *compiler = createCompiler(GL_VERTEX_SHADER);
// Ensure we don't pass a NULL source to the compiler
char *source = "\0";
......@@ -455,7 +455,7 @@ void FragmentShader::compile()
delete pixelShader;
pixelShader = new sw::PixelShader();
TranslatorASM *compiler = createCompiler(SH_FRAGMENT_SHADER);
TranslatorASM *compiler = createCompiler(GL_FRAGMENT_SHADER);
// Ensure we don't pass a NULL source to the compiler
char *source = "\0";
......
......@@ -70,7 +70,7 @@ public:
protected:
static bool compilerInitialized;
TranslatorASM *createCompiler(ShShaderType type);
TranslatorASM *createCompiler(GLenum shaderType);
void clear();
static GLenum parseType(const std::string &type);
......
......@@ -152,7 +152,7 @@ void Shader::getSource(GLsizei bufSize, GLsizei *length, char *source)
}
}
TranslatorASM *Shader::createCompiler(ShShaderType type)
TranslatorASM *Shader::createCompiler(GLenum shaderType)
{
if(!compilerInitialized)
{
......@@ -160,7 +160,7 @@ TranslatorASM *Shader::createCompiler(ShShaderType type)
compilerInitialized = true;
}
TranslatorASM *assembler = new TranslatorASM(this, type, SH_GLES2_SPEC);
TranslatorASM *assembler = new TranslatorASM(this, shaderType);
ShBuiltInResources resources;
resources.MaxVertexAttribs = MAX_VERTEX_ATTRIBS;
......@@ -370,7 +370,7 @@ void VertexShader::compile()
delete vertexShader;
vertexShader = new sw::VertexShader();
TranslatorASM *compiler = createCompiler(SH_VERTEX_SHADER);
TranslatorASM *compiler = createCompiler(GL_VERTEX_SHADER);
// Ensure we don't pass a NULL source to the compiler
char *source = "\0";
......@@ -455,7 +455,7 @@ void FragmentShader::compile()
delete pixelShader;
pixelShader = new sw::PixelShader();
TranslatorASM *compiler = createCompiler(SH_FRAGMENT_SHADER);
TranslatorASM *compiler = createCompiler(GL_FRAGMENT_SHADER);
// Ensure we don't pass a NULL source to the compiler
char *source = "\0";
......
......@@ -67,7 +67,7 @@ public:
protected:
static bool compilerInitialized;
TranslatorASM *createCompiler(ShShaderType type);
TranslatorASM *createCompiler(GLenum shaderType);
void clear();
static GLenum parseType(const std::string &type);
......
......@@ -152,7 +152,7 @@ void Shader::getSource(GLsizei bufSize, GLsizei *length, char *source)
}
}
TranslatorASM *Shader::createCompiler(ShShaderType type)
TranslatorASM *Shader::createCompiler(GLenum type)
{
if(!compilerInitialized)
{
......@@ -160,7 +160,7 @@ TranslatorASM *Shader::createCompiler(ShShaderType type)
compilerInitialized = true;
}
TranslatorASM *assembler = new TranslatorASM(this, type, SH_GLES2_SPEC);
TranslatorASM *assembler = new TranslatorASM(this, type);
ShBuiltInResources resources;
resources.MaxVertexAttribs = MAX_VERTEX_ATTRIBS;
......@@ -370,7 +370,7 @@ void VertexShader::compile()
delete vertexShader;
vertexShader = new sw::VertexShader();
TranslatorASM *compiler = createCompiler(SH_VERTEX_SHADER);
TranslatorASM *compiler = createCompiler(GL_VERTEX_SHADER);
// Ensure we don't pass a NULL source to the compiler
char *source = "\0";
......@@ -455,7 +455,7 @@ void FragmentShader::compile()
delete pixelShader;
pixelShader = new sw::PixelShader();
TranslatorASM *compiler = createCompiler(SH_FRAGMENT_SHADER);
TranslatorASM *compiler = createCompiler(GL_FRAGMENT_SHADER);
// Ensure we don't pass a NULL source to the compiler
char *source = "\0";
......
......@@ -65,7 +65,7 @@ public:
protected:
static bool compilerInitialized;
TranslatorASM *createCompiler(ShShaderType type);
TranslatorASM *createCompiler(GLenum type);
void clear();
static GLenum parseType(const std::string &type);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment