Commit 2c958eef by alokp@chromium.org

Moved error-handling to a separate class - Diagnostics. We were earlier…

Moved error-handling to a separate class - Diagnostics. We were earlier returning errors as tokens which did not work very well when error occured while parsing a preprocessor directive. Now all returned tokens are valid. Errors are reported via an abstract Diagnostics interface. Updated unit-tests with the new scheme. Review URL: https://codereview.appspot.com/6203089 git-svn-id: https://angleproject.googlecode.com/svn/trunk@1087 736b8ea6-26fd-11df-bfd4-992fa37f6226
parent 08365f68
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
'include_dirs': [ 'include_dirs': [
], ],
'sources': [ 'sources': [
'compiler/preprocessor/new/Diagnostics.cpp',
'compiler/preprocessor/new/Diagnostics.h',
'compiler/preprocessor/new/DirectiveParser.cpp', 'compiler/preprocessor/new/DirectiveParser.cpp',
'compiler/preprocessor/new/DirectiveParser.h', 'compiler/preprocessor/new/DirectiveParser.h',
'compiler/preprocessor/new/ExpressionParser.cpp', 'compiler/preprocessor/new/ExpressionParser.cpp',
...@@ -27,6 +29,7 @@ ...@@ -27,6 +29,7 @@
'compiler/preprocessor/new/MacroExpander.h', 'compiler/preprocessor/new/MacroExpander.h',
'compiler/preprocessor/new/Preprocessor.cpp', 'compiler/preprocessor/new/Preprocessor.cpp',
'compiler/preprocessor/new/Preprocessor.h', 'compiler/preprocessor/new/Preprocessor.h',
'compiler/preprocessor/new/SourceLocation.h',
'compiler/preprocessor/new/Token.cpp', 'compiler/preprocessor/new/Token.cpp',
'compiler/preprocessor/new/Token.h', 'compiler/preprocessor/new/Token.h',
'compiler/preprocessor/new/Tokenizer.cpp', 'compiler/preprocessor/new/Tokenizer.cpp',
......
//
// Copyright (c) 2012 The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
#include "Diagnostics.h"
#include <cassert>
namespace pp
{
void Diagnostics::report(ID id,
const SourceLocation& loc,
const std::string& text)
{
// TODO(alokp): Keep a count of errors and warnings.
print(id, loc, text);
}
Diagnostics::Severity Diagnostics::severity(ID id)
{
if ((id > ERROR_BEGIN) && (id < ERROR_END))
return ERROR;
if ((id > WARNING_BEGIN) && (id < WARNING_END))
return WARNING;
assert(false);
return ERROR;
}
} // namespace pp
//
// Copyright (c) 2012 The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
#ifndef COMPILER_PREPROCESSOR_DIAGNOSTICS_H_
#define COMPILER_PREPROCESSOR_DIAGNOSTICS_H_
#include <string>
namespace pp
{
struct SourceLocation;
// Base class for reporting diagnostic messages.
// Derived classes are responsible for formatting and printing the messages.
class Diagnostics
{
public:
enum ID
{
ERROR_BEGIN,
INTERNAL_ERROR,
OUT_OF_MEMORY,
INVALID_CHARACTER,
INVALID_NUMBER,
INVALID_DIRECTIVE,
INVALID_EXPRESSION,
DIVISION_BY_ZERO,
EOF_IN_COMMENT,
EOF_IN_DIRECTIVE,
UNEXPECTED_TOKEN_IN_DIRECTIVE,
ERROR_END,
WARNING_BEGIN,
WARNING_END
};
void report(ID id, const SourceLocation& loc, const std::string& text);
protected:
enum Severity
{
ERROR,
WARNING
};
Severity severity(ID id);
virtual void print(ID id,
const SourceLocation& loc,
const std::string& text) = 0;
};
} // namespace pp
#endif // COMPILER_PREPROCESSOR_DIAGNOSTICS_H_
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <cassert> #include <cassert>
#include "Diagnostics.h"
#include "ExpressionParser.h" #include "ExpressionParser.h"
#include "MacroExpander.h" #include "MacroExpander.h"
#include "Token.h" #include "Token.h"
...@@ -48,6 +49,13 @@ class DefinedParser : public Lexer ...@@ -48,6 +49,13 @@ class DefinedParser : public Lexer
Lexer* mLexer; Lexer* mLexer;
}; };
DirectiveParser::DirectiveParser(Tokenizer* tokenizer,
Diagnostics* diagnostics) :
mTokenizer(tokenizer),
mDiagnostics(diagnostics)
{
}
void DirectiveParser::lex(Token* token) void DirectiveParser::lex(Token* token)
{ {
do do
...@@ -91,16 +99,24 @@ void DirectiveParser::parseDirective(Token* token) ...@@ -91,16 +99,24 @@ void DirectiveParser::parseDirective(Token* token)
else if (token->value == kDirectiveLine) else if (token->value == kDirectiveLine)
parseLine(token); parseLine(token);
else else
token->type = pp::Token::INVALID_DIRECTIVE; mDiagnostics->report(Diagnostics::INVALID_DIRECTIVE,
token->location,
token->value.c_str());
} }
if ((token->type != '\n') && (token->type != 0))
mDiagnostics->report(Diagnostics::UNEXPECTED_TOKEN_IN_DIRECTIVE,
token->location,
token->value.c_str());
while (token->type != '\n') while (token->type != '\n')
{ {
if (token->type == 0) { if (token->type == 0) {
//token->type = pp::Token::EOF_IN_DIRECTIVE; mDiagnostics->report(Diagnostics::EOF_IN_DIRECTIVE,
token->location,
token->value.c_str());
break; break;
} }
//token->type = pp::Token::INVALID_DIRECTIVE;
mTokenizer->lex(token); mTokenizer->lex(token);
} }
} }
...@@ -125,8 +141,8 @@ void DirectiveParser::parseIf(Token* token) ...@@ -125,8 +141,8 @@ void DirectiveParser::parseIf(Token* token)
assert(token->value == kDirectiveIf); assert(token->value == kDirectiveIf);
DefinedParser definedParser(mTokenizer); DefinedParser definedParser(mTokenizer);
MacroExpander macroExpander(&definedParser); MacroExpander macroExpander(&definedParser, mDiagnostics);
ExpressionParser expressionParser(&macroExpander); ExpressionParser expressionParser(&macroExpander, mDiagnostics);
macroExpander.lex(token); macroExpander.lex(token);
int expression = 0; int expression = 0;
...@@ -207,7 +223,7 @@ void DirectiveParser::parseLine(Token* token) ...@@ -207,7 +223,7 @@ void DirectiveParser::parseLine(Token* token)
{ {
// TODO(alokp): Implement me. // TODO(alokp): Implement me.
assert(token->value == kDirectiveLine); assert(token->value == kDirectiveLine);
MacroExpander macroExpander(mTokenizer); MacroExpander macroExpander(mTokenizer, mDiagnostics);
macroExpander.lex(token); macroExpander.lex(token);
} }
......
...@@ -13,12 +13,13 @@ ...@@ -13,12 +13,13 @@
namespace pp namespace pp
{ {
class Diagnostics;
class Tokenizer; class Tokenizer;
class DirectiveParser : public Lexer class DirectiveParser : public Lexer
{ {
public: public:
DirectiveParser(Tokenizer* tokenizer) : mTokenizer(tokenizer) { } DirectiveParser(Tokenizer* tokenizer, Diagnostics* diagnostics);
virtual void lex(Token* token); virtual void lex(Token* token);
...@@ -41,6 +42,7 @@ class DirectiveParser : public Lexer ...@@ -41,6 +42,7 @@ class DirectiveParser : public Lexer
void parseLine(Token* token); void parseLine(Token* token);
Tokenizer* mTokenizer; Tokenizer* mTokenizer;
Diagnostics* mDiagnostics;
}; };
} // namespace pp } // namespace pp
......
...@@ -93,12 +93,14 @@ ...@@ -93,12 +93,14 @@
#include <cassert> #include <cassert>
#include <sstream> #include <sstream>
#include "Diagnostics.h"
#include "Lexer.h" #include "Lexer.h"
#include "Token.h" #include "Token.h"
namespace { namespace {
struct Context struct Context
{ {
pp::Diagnostics* diagnostics;
pp::Lexer* lexer; pp::Lexer* lexer;
pp::Token* token; pp::Token* token;
int* result; int* result;
...@@ -454,9 +456,9 @@ static const yytype_int8 yyrhs[] = ...@@ -454,9 +456,9 @@ static const yytype_int8 yyrhs[] =
/* YYRLINE[YYN] -- source line where rule number YYN was defined. */ /* YYRLINE[YYN] -- source line where rule number YYN was defined. */
static const yytype_uint8 yyrline[] = static const yytype_uint8 yyrline[] =
{ {
0, 72, 72, 79, 80, 83, 86, 89, 92, 95, 0, 74, 74, 81, 82, 85, 88, 91, 94, 97,
98, 101, 104, 107, 110, 113, 116, 119, 122, 125, 100, 103, 106, 109, 112, 115, 118, 121, 124, 127,
136, 147, 150, 153, 156, 159, 162 140, 153, 156, 159, 162, 165, 168
}; };
#endif #endif
...@@ -1539,8 +1541,10 @@ yyreduce: ...@@ -1539,8 +1541,10 @@ yyreduce:
if ((yyvsp[(3) - (3)]) == 0) { if ((yyvsp[(3) - (3)]) == 0) {
std::stringstream stream; std::stringstream stream;
stream << (yyvsp[(1) - (3)]) << " % " << (yyvsp[(3) - (3)]); stream << (yyvsp[(1) - (3)]) << " % " << (yyvsp[(3) - (3)]);
context->token->type = pp::Token::DIVISION_BY_ZERO; std::string text = stream.str();
context->token->value = stream.str(); context->diagnostics->report(pp::Diagnostics::DIVISION_BY_ZERO,
context->token->location,
text.c_str());
YYABORT; YYABORT;
} else { } else {
(yyval) = (yyvsp[(1) - (3)]) % (yyvsp[(3) - (3)]); (yyval) = (yyvsp[(1) - (3)]) % (yyvsp[(3) - (3)]);
...@@ -1554,8 +1558,10 @@ yyreduce: ...@@ -1554,8 +1558,10 @@ yyreduce:
if ((yyvsp[(3) - (3)]) == 0) { if ((yyvsp[(3) - (3)]) == 0) {
std::stringstream stream; std::stringstream stream;
stream << (yyvsp[(1) - (3)]) << " / " << (yyvsp[(3) - (3)]); stream << (yyvsp[(1) - (3)]) << " / " << (yyvsp[(3) - (3)]);
context->token->type = pp::Token::DIVISION_BY_ZERO; std::string text = stream.str();
context->token->value = stream.str(); context->diagnostics->report(pp::Diagnostics::DIVISION_BY_ZERO,
context->token->location,
text.c_str());
YYABORT; YYABORT;
} else { } else {
(yyval) = (yyvsp[(1) - (3)]) / (yyvsp[(3) - (3)]); (yyval) = (yyvsp[(1) - (3)]) / (yyvsp[(3) - (3)]);
...@@ -1865,15 +1871,23 @@ int yylex(int* lvalp, Context* context) ...@@ -1865,15 +1871,23 @@ int yylex(int* lvalp, Context* context)
void yyerror(Context* context, const char* reason) void yyerror(Context* context, const char* reason)
{ {
context->token->type = pp::Token::INVALID_EXPRESSION; context->diagnostics->report(pp::Diagnostics::INVALID_EXPRESSION,
context->token->value = reason; context->token->location,
reason);
} }
namespace pp { namespace pp {
ExpressionParser::ExpressionParser(Lexer* lexer, Diagnostics* diagnostics) :
mLexer(lexer),
mDiagnostics(diagnostics)
{
}
bool ExpressionParser::parse(Token* token, int* result) bool ExpressionParser::parse(Token* token, int* result)
{ {
Context context; Context context;
context.diagnostics = mDiagnostics;
context.lexer = mLexer; context.lexer = mLexer;
context.token = token; context.token = token;
context.result = result; context.result = result;
...@@ -1885,14 +1899,12 @@ bool ExpressionParser::parse(Token* token, int* result) ...@@ -1885,14 +1899,12 @@ bool ExpressionParser::parse(Token* token, int* result)
break; break;
case 2: case 2:
token->type = pp::Token::OUT_OF_MEMORY; mDiagnostics->report(Diagnostics::OUT_OF_MEMORY, token->location, "");
token->value.clear();
break; break;
default: default:
assert(false); assert(false);
token->type = pp::Token::INTERNAL_ERROR; mDiagnostics->report(Diagnostics::INTERNAL_ERROR, token->location, "");
token->value.clear();
break; break;
} }
......
...@@ -12,13 +12,14 @@ ...@@ -12,13 +12,14 @@
namespace pp namespace pp
{ {
class Diagnostics;
class Lexer; class Lexer;
struct Token; struct Token;
class ExpressionParser class ExpressionParser
{ {
public: public:
ExpressionParser(Lexer* lexer) : mLexer(lexer) { } ExpressionParser(Lexer* lexer, Diagnostics* diagnostics);
bool parse(Token* token, int* result); bool parse(Token* token, int* result);
...@@ -26,6 +27,7 @@ class ExpressionParser ...@@ -26,6 +27,7 @@ class ExpressionParser
PP_DISALLOW_COPY_AND_ASSIGN(ExpressionParser); PP_DISALLOW_COPY_AND_ASSIGN(ExpressionParser);
Lexer* mLexer; Lexer* mLexer;
Diagnostics* mDiagnostics;
}; };
} // namespace pp } // namespace pp
......
...@@ -30,12 +30,14 @@ WHICH GENERATES THE GLSL ES preprocessor expression parser. ...@@ -30,12 +30,14 @@ WHICH GENERATES THE GLSL ES preprocessor expression parser.
#include <cassert> #include <cassert>
#include <sstream> #include <sstream>
#include "Diagnostics.h"
#include "Lexer.h" #include "Lexer.h"
#include "Token.h" #include "Token.h"
namespace { namespace {
struct Context struct Context
{ {
pp::Diagnostics* diagnostics;
pp::Lexer* lexer; pp::Lexer* lexer;
pp::Token* token; pp::Token* token;
int* result; int* result;
...@@ -126,8 +128,10 @@ expression ...@@ -126,8 +128,10 @@ expression
if ($3 == 0) { if ($3 == 0) {
std::stringstream stream; std::stringstream stream;
stream << $1 << " % " << $3; stream << $1 << " % " << $3;
context->token->type = pp::Token::DIVISION_BY_ZERO; std::string text = stream.str();
context->token->value = stream.str(); context->diagnostics->report(pp::Diagnostics::DIVISION_BY_ZERO,
context->token->location,
text.c_str());
YYABORT; YYABORT;
} else { } else {
$$ = $1 % $3; $$ = $1 % $3;
...@@ -137,8 +141,10 @@ expression ...@@ -137,8 +141,10 @@ expression
if ($3 == 0) { if ($3 == 0) {
std::stringstream stream; std::stringstream stream;
stream << $1 << " / " << $3; stream << $1 << " / " << $3;
context->token->type = pp::Token::DIVISION_BY_ZERO; std::string text = stream.str();
context->token->value = stream.str(); context->diagnostics->report(pp::Diagnostics::DIVISION_BY_ZERO,
context->token->location,
text.c_str());
YYABORT; YYABORT;
} else { } else {
$$ = $1 / $3; $$ = $1 / $3;
...@@ -212,15 +218,23 @@ int yylex(int* lvalp, Context* context) ...@@ -212,15 +218,23 @@ int yylex(int* lvalp, Context* context)
void yyerror(Context* context, const char* reason) void yyerror(Context* context, const char* reason)
{ {
context->token->type = pp::Token::INVALID_EXPRESSION; context->diagnostics->report(pp::Diagnostics::INVALID_EXPRESSION,
context->token->value = reason; context->token->location,
reason);
} }
namespace pp { namespace pp {
ExpressionParser::ExpressionParser(Lexer* lexer, Diagnostics* diagnostics) :
mLexer(lexer),
mDiagnostics(diagnostics)
{
}
bool ExpressionParser::parse(Token* token, int* result) bool ExpressionParser::parse(Token* token, int* result)
{ {
Context context; Context context;
context.diagnostics = mDiagnostics;
context.lexer = mLexer; context.lexer = mLexer;
context.token = token; context.token = token;
context.result = result; context.result = result;
...@@ -232,14 +246,12 @@ bool ExpressionParser::parse(Token* token, int* result) ...@@ -232,14 +246,12 @@ bool ExpressionParser::parse(Token* token, int* result)
break; break;
case 2: case 2:
token->type = pp::Token::OUT_OF_MEMORY; mDiagnostics->report(Diagnostics::OUT_OF_MEMORY, token->location, "");
token->value.clear();
break; break;
default: default:
assert(false); assert(false);
token->type = pp::Token::INTERNAL_ERROR; mDiagnostics->report(Diagnostics::INTERNAL_ERROR, token->location, "");
token->value.clear();
break; break;
} }
......
...@@ -9,7 +9,9 @@ ...@@ -9,7 +9,9 @@
namespace pp namespace pp
{ {
MacroExpander::MacroExpander(Lexer* lexer) : mLexer(lexer) MacroExpander::MacroExpander(Lexer* lexer, Diagnostics* diagnostics) :
mLexer(lexer),
mDiagnostics(diagnostics)
{ {
} }
......
...@@ -13,16 +13,20 @@ ...@@ -13,16 +13,20 @@
namespace pp namespace pp
{ {
class Diagnostics;
class MacroExpander : public Lexer class MacroExpander : public Lexer
{ {
public: public:
MacroExpander(Lexer* lexer); MacroExpander(Lexer* lexer, Diagnostics* diagnostics);
virtual void lex(Token* token); virtual void lex(Token* token);
private: private:
PP_DISALLOW_COPY_AND_ASSIGN(MacroExpander); PP_DISALLOW_COPY_AND_ASSIGN(MacroExpander);
Lexer* mLexer; Lexer* mLexer;
Diagnostics* mDiagnostics;
}; };
} // namespace pp } // namespace pp
......
...@@ -11,8 +11,11 @@ ...@@ -11,8 +11,11 @@
namespace pp namespace pp
{ {
Preprocessor::Preprocessor() : mDirectiveParser(&mTokenizer), Preprocessor::Preprocessor(Diagnostics* diagnostics) :
mMacroExpander(&mDirectiveParser) mDiagnostics(diagnostics),
mTokenizer(mDiagnostics),
mDirectiveParser(&mTokenizer, mDiagnostics),
mMacroExpander(&mDirectiveParser, mDiagnostics)
{ {
} }
...@@ -23,10 +26,9 @@ bool Preprocessor::init(int count, ...@@ -23,10 +26,9 @@ bool Preprocessor::init(int count,
return mTokenizer.init(count, string, length); return mTokenizer.init(count, string, length);
} }
int Preprocessor::lex(Token* token) void Preprocessor::lex(Token* token)
{ {
mMacroExpander.lex(token); mMacroExpander.lex(token);
return token->type;
} }
} // namespace pp } // namespace pp
......
...@@ -14,10 +14,12 @@ ...@@ -14,10 +14,12 @@
namespace pp namespace pp
{ {
class Diagnostics;
class Preprocessor class Preprocessor
{ {
public: public:
Preprocessor(); Preprocessor(Diagnostics* diagnostics);
// count: specifies the number of elements in the string and length arrays. // count: specifies the number of elements in the string and length arrays.
// string: specifies an array of pointers to strings. // string: specifies an array of pointers to strings.
...@@ -30,11 +32,12 @@ class Preprocessor ...@@ -30,11 +32,12 @@ class Preprocessor
// is null terminated. // is null terminated.
bool init(int count, const char* const string[], const int length[]); bool init(int count, const char* const string[], const int length[]);
int lex(Token* token); void lex(Token* token);
private: private:
PP_DISALLOW_COPY_AND_ASSIGN(Preprocessor); PP_DISALLOW_COPY_AND_ASSIGN(Preprocessor);
Diagnostics* mDiagnostics;
Tokenizer mTokenizer; Tokenizer mTokenizer;
DirectiveParser mDirectiveParser; DirectiveParser mDirectiveParser;
MacroExpander mMacroExpander; MacroExpander mMacroExpander;
......
//
// Copyright (c) 2012 The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
#ifndef COMPILER_PREPROCESSOR_SOURCE_LOCATION_H_
#define COMPILER_PREPROCESSOR_SOURCE_LOCATION_H_
namespace pp
{
struct SourceLocation
{
SourceLocation() : file(0), line(0) { }
SourceLocation(int f, int l) : file(f), line(l) { }
bool equals(const SourceLocation& other) const
{
return (file == other.file) && (line == other.line);
}
int file;
int line;
};
inline bool operator==(const SourceLocation& lhs, const SourceLocation& rhs)
{
return lhs.equals(rhs);
}
inline bool operator!=(const SourceLocation& lhs, const SourceLocation& rhs)
{
return !lhs.equals(rhs);
}
} // namespace pp
#endif // COMPILER_PREPROCESSOR_SOURCE_LOCATION_H_
...@@ -10,6 +10,8 @@ ...@@ -10,6 +10,8 @@
#include <ostream> #include <ostream>
#include <string> #include <string>
#include "SourceLocation.h"
namespace pp namespace pp
{ {
...@@ -17,20 +19,7 @@ struct Token ...@@ -17,20 +19,7 @@ struct Token
{ {
enum Type enum Type
{ {
// Token IDs for error conditions are negative. LAST = 0, // EOF.
INTERNAL_ERROR = -1,
OUT_OF_MEMORY = -2,
INVALID_CHARACTER = -3,
INVALID_NUMBER = -4,
INVALID_DIRECTIVE = -5,
INVALID_EXPRESSION = -6,
DIVISION_BY_ZERO = -7,
EOF_IN_COMMENT = -8,
EOF_IN_DIRECTIVE = -9,
UNEXPECTED_TOKEN_IN_DIRECTIVE = -10,
// Indicates EOF.
LAST = 0,
IDENTIFIER = 258, IDENTIFIER = 258,
...@@ -63,17 +52,6 @@ struct Token ...@@ -63,17 +52,6 @@ struct Token
{ {
HAS_LEADING_SPACE = 1 << 0 HAS_LEADING_SPACE = 1 << 0
}; };
struct Location
{
Location() : file(0), line(0) { }
bool equals(const Location& other) const
{
return (file == other.file) && (line == other.line);
}
int file;
int line;
};
Token() : type(0), flags(0) { } Token() : type(0), flags(0) { }
...@@ -81,7 +59,7 @@ struct Token ...@@ -81,7 +59,7 @@ struct Token
{ {
type = 0; type = 0;
flags = 0; flags = 0;
location = Location(); location = SourceLocation();
value.clear(); value.clear();
} }
...@@ -104,7 +82,7 @@ struct Token ...@@ -104,7 +82,7 @@ struct Token
int type; int type;
int flags; int flags;
Location location; SourceLocation location;
std::string value; std::string value;
}; };
......
...@@ -515,10 +515,12 @@ IF YOU MODIFY THIS FILE YOU ALSO NEED TO RUN generate_parser.sh. ...@@ -515,10 +515,12 @@ IF YOU MODIFY THIS FILE YOU ALSO NEED TO RUN generate_parser.sh.
*/ */
#include "Tokenizer.h" #include "Tokenizer.h"
#include "Diagnostics.h"
#include "Token.h" #include "Token.h"
typedef std::string YYSTYPE; typedef std::string YYSTYPE;
typedef pp::Token::Location YYLTYPE; typedef pp::SourceLocation YYLTYPE;
// Use the unused yycolumn variable to track file (string) number. // Use the unused yycolumn variable to track file (string) number.
#define yyfileno yycolumn #define yyfileno yycolumn
...@@ -884,11 +886,18 @@ YY_RULE_SETUP ...@@ -884,11 +886,18 @@ YY_RULE_SETUP
{ ++yylineno; } { ++yylineno; }
YY_BREAK YY_BREAK
case YY_STATE_EOF(COMMENT): case YY_STATE_EOF(COMMENT):
{ return pp::Token::EOF_IN_COMMENT; } {
yyextra->diagnostics->report(pp::Diagnostics::EOF_IN_COMMENT,
pp::SourceLocation(yyfileno, yylineno), "");
yyterminate();
}
YY_BREAK YY_BREAK
case 6: case 6:
YY_RULE_SETUP YY_RULE_SETUP
{ yyextra->leadingSpace = true; BEGIN(INITIAL); } {
yyextra->leadingSpace = true;
BEGIN(INITIAL);
}
YY_BREAK YY_BREAK
case 7: case 7:
YY_RULE_SETUP YY_RULE_SETUP
...@@ -897,8 +906,9 @@ YY_RULE_SETUP ...@@ -897,8 +906,9 @@ YY_RULE_SETUP
if (yyextra->lineStart) { if (yyextra->lineStart) {
return yytext[0]; return yytext[0];
} else { } else {
yylval->assign(yytext, yyleng); yyextra->diagnostics->report(pp::Diagnostics::INVALID_CHARACTER,
return pp::Token::INVALID_CHARACTER; pp::SourceLocation(yyfileno, yylineno),
std::string(yytext, yyleng));
} }
} }
YY_BREAK YY_BREAK
...@@ -928,8 +938,9 @@ YY_RULE_SETUP ...@@ -928,8 +938,9 @@ YY_RULE_SETUP
case 11: case 11:
YY_RULE_SETUP YY_RULE_SETUP
{ {
yylval->assign(yytext, yyleng); yyextra->diagnostics->report(pp::Diagnostics::INVALID_NUMBER,
return pp::Token::INVALID_NUMBER; pp::SourceLocation(yyfileno, yylineno),
std::string(yytext, yyleng));
} }
YY_BREAK YY_BREAK
case 12: case 12:
...@@ -1035,8 +1046,9 @@ YY_RULE_SETUP ...@@ -1035,8 +1046,9 @@ YY_RULE_SETUP
case 36: case 36:
YY_RULE_SETUP YY_RULE_SETUP
{ {
yylval->assign(yytext, yyleng); yyextra->diagnostics->report(pp::Diagnostics::INVALID_CHARACTER,
return pp::Token::INVALID_CHARACTER; pp::SourceLocation(yyfileno, yylineno),
std::string(yytext, yyleng));
} }
YY_BREAK YY_BREAK
case YY_STATE_EOF(INITIAL): case YY_STATE_EOF(INITIAL):
...@@ -2183,8 +2195,9 @@ void ppfree (void * ptr , yyscan_t yyscanner) ...@@ -2183,8 +2195,9 @@ void ppfree (void * ptr , yyscan_t yyscanner)
namespace pp { namespace pp {
Tokenizer::Tokenizer() : mHandle(0) Tokenizer::Tokenizer(Diagnostics* diagnostics) : mHandle(0)
{ {
mContext.diagnostics = diagnostics;
} }
Tokenizer::~Tokenizer() Tokenizer::~Tokenizer()
......
...@@ -14,11 +14,15 @@ ...@@ -14,11 +14,15 @@
namespace pp namespace pp
{ {
class Diagnostics;
class Tokenizer : public Lexer class Tokenizer : public Lexer
{ {
public: public:
struct Context struct Context
{ {
Diagnostics* diagnostics;
Input input; Input input;
// The location where yytext points to. Token location should track // The location where yytext points to. Token location should track
// scanLoc instead of Input::mReadLoc because they may not be the same // scanLoc instead of Input::mReadLoc because they may not be the same
...@@ -29,7 +33,7 @@ class Tokenizer : public Lexer ...@@ -29,7 +33,7 @@ class Tokenizer : public Lexer
bool lineStart; bool lineStart;
}; };
Tokenizer(); Tokenizer(Diagnostics* diagnostics);
~Tokenizer(); ~Tokenizer();
bool init(int count, const char* const string[], const int length[]); bool init(int count, const char* const string[], const int length[]);
......
...@@ -24,10 +24,12 @@ IF YOU MODIFY THIS FILE YOU ALSO NEED TO RUN generate_parser.sh. ...@@ -24,10 +24,12 @@ IF YOU MODIFY THIS FILE YOU ALSO NEED TO RUN generate_parser.sh.
%{ %{
#include "Tokenizer.h" #include "Tokenizer.h"
#include "Diagnostics.h"
#include "Token.h" #include "Token.h"
typedef std::string YYSTYPE; typedef std::string YYSTYPE;
typedef pp::Token::Location YYLTYPE; typedef pp::SourceLocation YYLTYPE;
// Use the unused yycolumn variable to track file (string) number. // Use the unused yycolumn variable to track file (string) number.
#define yyfileno yycolumn #define yyfileno yycolumn
...@@ -89,16 +91,24 @@ FRACTIONAL_CONSTANT ({DIGIT}*"."{DIGIT}+)|({DIGIT}+".") ...@@ -89,16 +91,24 @@ FRACTIONAL_CONSTANT ({DIGIT}*"."{DIGIT}+)|({DIGIT}+".")
<COMMENT>[^*\r\n]+ <COMMENT>[^*\r\n]+
<COMMENT>"*" <COMMENT>"*"
<COMMENT>{NEWLINE} { ++yylineno; } <COMMENT>{NEWLINE} { ++yylineno; }
<COMMENT><<EOF>> { return pp::Token::EOF_IN_COMMENT; } <COMMENT><<EOF>> {
<COMMENT>"*/" { yyextra->leadingSpace = true; BEGIN(INITIAL); } yyextra->diagnostics->report(pp::Diagnostics::EOF_IN_COMMENT,
pp::SourceLocation(yyfileno, yylineno), "");
yyterminate();
}
<COMMENT>"*/" {
yyextra->leadingSpace = true;
BEGIN(INITIAL);
}
# { # {
// # is only valid at start of line for preprocessor directives. // # is only valid at start of line for preprocessor directives.
if (yyextra->lineStart) { if (yyextra->lineStart) {
return yytext[0]; return yytext[0];
} else { } else {
yylval->assign(yytext, yyleng); yyextra->diagnostics->report(pp::Diagnostics::INVALID_CHARACTER,
return pp::Token::INVALID_CHARACTER; pp::SourceLocation(yyfileno, yylineno),
std::string(yytext, yyleng));
} }
} }
...@@ -120,8 +130,9 @@ FRACTIONAL_CONSTANT ({DIGIT}*"."{DIGIT}+)|({DIGIT}+".") ...@@ -120,8 +130,9 @@ FRACTIONAL_CONSTANT ({DIGIT}*"."{DIGIT}+)|({DIGIT}+".")
/* Anything that starts with a {DIGIT} or .{DIGIT} must be a number. */ /* Anything that starts with a {DIGIT} or .{DIGIT} must be a number. */
/* Rule to catch all invalid integers and floats. */ /* Rule to catch all invalid integers and floats. */
({DIGIT}+[_a-zA-Z0-9.]*)|("."{DIGIT}+[_a-zA-Z0-9.]*) { ({DIGIT}+[_a-zA-Z0-9.]*)|("."{DIGIT}+[_a-zA-Z0-9.]*) {
yylval->assign(yytext, yyleng); yyextra->diagnostics->report(pp::Diagnostics::INVALID_NUMBER,
return pp::Token::INVALID_NUMBER; pp::SourceLocation(yyfileno, yylineno),
std::string(yytext, yyleng));
} }
"++" { return pp::Token::OP_INC; } "++" { return pp::Token::OP_INC; }
...@@ -155,8 +166,9 @@ FRACTIONAL_CONSTANT ({DIGIT}*"."{DIGIT}+)|({DIGIT}+".") ...@@ -155,8 +166,9 @@ FRACTIONAL_CONSTANT ({DIGIT}*"."{DIGIT}+)|({DIGIT}+".")
} }
. { . {
yylval->assign(yytext, yyleng); yyextra->diagnostics->report(pp::Diagnostics::INVALID_CHARACTER,
return pp::Token::INVALID_CHARACTER; pp::SourceLocation(yyfileno, yylineno),
std::string(yytext, yyleng));
} }
<<EOF>> { yyterminate(); } <<EOF>> { yyterminate(); }
...@@ -165,8 +177,9 @@ FRACTIONAL_CONSTANT ({DIGIT}*"."{DIGIT}+)|({DIGIT}+".") ...@@ -165,8 +177,9 @@ FRACTIONAL_CONSTANT ({DIGIT}*"."{DIGIT}+)|({DIGIT}+".")
namespace pp { namespace pp {
Tokenizer::Tokenizer() : mHandle(0) Tokenizer::Tokenizer(Diagnostics* diagnostics) : mHandle(0)
{ {
mContext.diagnostics = diagnostics;
} }
Tokenizer::~Tokenizer() Tokenizer::~Tokenizer()
......
...@@ -47,6 +47,7 @@ ...@@ -47,6 +47,7 @@
'preprocessor_tests/identifier_test.cpp', 'preprocessor_tests/identifier_test.cpp',
'preprocessor_tests/input_test.cpp', 'preprocessor_tests/input_test.cpp',
'preprocessor_tests/location_test.cpp', 'preprocessor_tests/location_test.cpp',
'preprocessor_tests/MockDiagnostics.h',
'preprocessor_tests/number_test.cpp', 'preprocessor_tests/number_test.cpp',
'preprocessor_tests/operator_test.cpp', 'preprocessor_tests/operator_test.cpp',
'preprocessor_tests/token_test.cpp', 'preprocessor_tests/token_test.cpp',
......
//
// Copyright (c) 2012 The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
#ifndef PREPROCESSOR_TESTS_MOCK_DIAGNOSTICS_H_
#define PREPROCESSOR_TESTS_MOCK_DIAGNOSTICS_H_
#include "gmock/gmock.h"
#include "Diagnostics.h"
class MockDiagnostics : public pp::Diagnostics
{
public:
MOCK_METHOD3(print,
void(ID id, const pp::SourceLocation& loc, const std::string& text));
};
#endif // PREPROCESSOR_TESTS_MOCK_DIAGNOSTICS_H_
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
#include <climits> #include <climits>
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "MockDiagnostics.h"
#include "Preprocessor.h" #include "Preprocessor.h"
#include "Token.h" #include "Token.h"
...@@ -23,81 +25,75 @@ static const char kPunctuators[] = { ...@@ -23,81 +25,75 @@ static const char kPunctuators[] = {
static const int kNumPunctuators = static const int kNumPunctuators =
sizeof(kPunctuators) / sizeof(kPunctuators[0]); sizeof(kPunctuators) / sizeof(kPunctuators[0]);
bool isPunctuator(char c)
{
static const char* kPunctuatorBeg = kPunctuators;
static const char* kPunctuatorEnd = kPunctuators + kNumPunctuators;
return std::find(kPunctuatorBeg, kPunctuatorEnd, c) != kPunctuatorEnd;
}
static const char kWhitespaces[] = {' ', '\t', '\v', '\f', '\n', '\r'}; static const char kWhitespaces[] = {' ', '\t', '\v', '\f', '\n', '\r'};
static const int kNumWhitespaces = static const int kNumWhitespaces =
sizeof(kWhitespaces) / sizeof(kWhitespaces[0]); sizeof(kWhitespaces) / sizeof(kWhitespaces[0]);
bool isWhitespace(char c)
{
static const char* kWhitespaceBeg = kWhitespaces;
static const char* kWhitespaceEnd = kWhitespaces + kNumWhitespaces;
return std::find(kWhitespaceBeg, kWhitespaceEnd, c) != kWhitespaceEnd;
}
TEST_P(CharTest, Identified) TEST_P(CharTest, Identified)
{ {
std::string str(1, GetParam()); std::string str(1, GetParam());
const char* cstr = str.c_str(); const char* cstr = str.c_str();
int length = 1; int length = 1;
pp::Preprocessor preprocessor; MockDiagnostics diagnostics;
pp::Preprocessor preprocessor(&diagnostics);
// Note that we pass the length param as well because the invalid // Note that we pass the length param as well because the invalid
// string may contain the null character. // string may contain the null character.
ASSERT_TRUE(preprocessor.init(1, &cstr, &length)); ASSERT_TRUE(preprocessor.init(1, &cstr, &length));
pp::Token token; int expectedType = pp::Token::LAST;
int ret = preprocessor.lex(&token); std::string expectedValue;
// Handle identifier. if (str[0] == '#')
if ((cstr[0] == '_') ||
((cstr[0] >= 'a') && (cstr[0] <= 'z')) ||
((cstr[0] >= 'A') && (cstr[0] <= 'Z')))
{ {
EXPECT_EQ(pp::Token::IDENTIFIER, ret); // Lone '#' is ignored.
EXPECT_EQ(pp::Token::IDENTIFIER, token.type);
EXPECT_EQ(cstr[0], token.value[0]);
return;
} }
else if ((str[0] == '_') ||
// Handle numbers. ((str[0] >= 'a') && (str[0] <= 'z')) ||
if (cstr[0] >= '0' && cstr[0] <= '9') ((str[0] >= 'A') && (str[0] <= 'Z')))
{ {
EXPECT_EQ(pp::Token::CONST_INT, ret); expectedType = pp::Token::IDENTIFIER;
EXPECT_EQ(pp::Token::CONST_INT, token.type); expectedValue = str;
EXPECT_EQ(cstr[0], token.value[0]);
return;
} }
else if (str[0] >= '0' && str[0] <= '9')
// Handle punctuators.
const char* lastIter = kPunctuators + kNumPunctuators;
const char* iter = std::find(kPunctuators, lastIter, cstr[0]);
if (iter != lastIter)
{ {
EXPECT_EQ(cstr[0], ret); expectedType = pp::Token::CONST_INT;
EXPECT_EQ(cstr[0], token.type); expectedValue = str;
EXPECT_TRUE(token.value.empty());
return;
} }
else if (isPunctuator(str[0]))
// Handle whitespace. {
lastIter = kWhitespaces + kNumWhitespaces; expectedType = str[0];
iter = std::find(kWhitespaces, lastIter, cstr[0]); }
if (iter != lastIter) else if (isWhitespace(str[0]))
{ {
// Whitespace is ignored. // Whitespace is ignored.
EXPECT_EQ(pp::Token::LAST, ret);
EXPECT_EQ(pp::Token::LAST, token.type);
EXPECT_TRUE(token.value.empty());
return;
} }
else
// Handle number sign.
if (cstr[0] == '#')
{ {
// Lone '#' is ignored. // Everything else is invalid.
EXPECT_EQ(pp::Token::LAST, ret); using testing::_;
EXPECT_EQ(pp::Token::LAST, token.type); EXPECT_CALL(diagnostics,
EXPECT_TRUE(token.value.empty()); print(pp::Diagnostics::INVALID_CHARACTER, _, str));
return;
} }
// Everything else is invalid. pp::Token token;
EXPECT_EQ(pp::Token::INVALID_CHARACTER, ret); preprocessor.lex(&token);
EXPECT_EQ(pp::Token::INVALID_CHARACTER, token.type); EXPECT_EQ(expectedType, token.type);
EXPECT_EQ(cstr[0], token.value[0]); EXPECT_EQ(expectedValue, token.value);
}; };
// Note +1 for the max-value in range. It is there because the max-value // Note +1 for the max-value in range. It is there because the max-value
......
...@@ -5,6 +5,8 @@ ...@@ -5,6 +5,8 @@
// //
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "MockDiagnostics.h"
#include "Preprocessor.h" #include "Preprocessor.h"
#include "Token.h" #include "Token.h"
...@@ -16,10 +18,12 @@ TEST_P(CommentTest, CommentIgnored) ...@@ -16,10 +18,12 @@ TEST_P(CommentTest, CommentIgnored)
{ {
const char* str = GetParam(); const char* str = GetParam();
pp::Token token; MockDiagnostics diagnostics;
pp::Preprocessor preprocessor; pp::Preprocessor preprocessor(&diagnostics);
ASSERT_TRUE(preprocessor.init(1, &str, 0)); ASSERT_TRUE(preprocessor.init(1, &str, 0));
EXPECT_EQ(pp::Token::LAST, preprocessor.lex(&token));
pp::Token token;
preprocessor.lex(&token);
EXPECT_EQ(pp::Token::LAST, token.type); EXPECT_EQ(pp::Token::LAST, token.type);
} }
...@@ -42,10 +46,12 @@ TEST(BlockComment, CommentReplacedWithSpace) ...@@ -42,10 +46,12 @@ TEST(BlockComment, CommentReplacedWithSpace)
{ {
const char* str = "/*foo*/bar"; const char* str = "/*foo*/bar";
pp::Token token; MockDiagnostics diagnostics;
pp::Preprocessor preprocessor; pp::Preprocessor preprocessor(&diagnostics);
ASSERT_TRUE(preprocessor.init(1, &str, 0)); ASSERT_TRUE(preprocessor.init(1, &str, 0));
EXPECT_EQ(pp::Token::IDENTIFIER, preprocessor.lex(&token));
pp::Token token;
preprocessor.lex(&token);
EXPECT_EQ(pp::Token::IDENTIFIER, token.type); EXPECT_EQ(pp::Token::IDENTIFIER, token.type);
EXPECT_STREQ("bar", token.value.c_str()); EXPECT_STREQ("bar", token.value.c_str());
EXPECT_TRUE(token.hasLeadingSpace()); EXPECT_TRUE(token.hasLeadingSpace());
...@@ -55,9 +61,13 @@ TEST(BlockComment, UnterminatedComment) ...@@ -55,9 +61,13 @@ TEST(BlockComment, UnterminatedComment)
{ {
const char* str = "/*foo"; const char* str = "/*foo";
pp::Token token; MockDiagnostics diagnostics;
pp::Preprocessor preprocessor; pp::Preprocessor preprocessor(&diagnostics);
ASSERT_TRUE(preprocessor.init(1, &str, 0)); ASSERT_TRUE(preprocessor.init(1, &str, 0));
EXPECT_EQ(pp::Token::EOF_IN_COMMENT, preprocessor.lex(&token));
EXPECT_EQ(pp::Token::EOF_IN_COMMENT, token.type); using testing::_;
EXPECT_CALL(diagnostics, print(pp::Diagnostics::EOF_IN_COMMENT, _, _));
pp::Token token;
preprocessor.lex(&token);
} }
...@@ -5,15 +5,19 @@ ...@@ -5,15 +5,19 @@
// //
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "MockDiagnostics.h"
#include "Preprocessor.h" #include "Preprocessor.h"
#include "Token.h" #include "Token.h"
static void PreprocessAndVerifyIdentifier(const char* str) static void PreprocessAndVerifyIdentifier(const char* str)
{ {
pp::Token token; MockDiagnostics diagnostics;
pp::Preprocessor preprocessor; pp::Preprocessor preprocessor(&diagnostics);
ASSERT_TRUE(preprocessor.init(1, &str, 0)); ASSERT_TRUE(preprocessor.init(1, &str, 0));
EXPECT_EQ(pp::Token::IDENTIFIER, preprocessor.lex(&token));
pp::Token token;
preprocessor.lex(&token);
EXPECT_EQ(pp::Token::IDENTIFIER, token.type); EXPECT_EQ(pp::Token::IDENTIFIER, token.type);
EXPECT_STREQ(str, token.value.c_str()); EXPECT_STREQ(str, token.value.c_str());
} }
......
...@@ -5,27 +5,33 @@ ...@@ -5,27 +5,33 @@
// //
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "MockDiagnostics.h"
#include "Preprocessor.h" #include "Preprocessor.h"
#include "Token.h" #include "Token.h"
TEST(InputTest, NegativeCount) TEST(InputTest, NegativeCount)
{ {
pp::Preprocessor preprocessor; MockDiagnostics diagnostics;
pp::Preprocessor preprocessor(&diagnostics);
EXPECT_FALSE(preprocessor.init(-1, NULL, NULL)); EXPECT_FALSE(preprocessor.init(-1, NULL, NULL));
} }
TEST(InputTest, ZeroCount) TEST(InputTest, ZeroCount)
{ {
pp::Token token; MockDiagnostics diagnostics;
pp::Preprocessor preprocessor; pp::Preprocessor preprocessor(&diagnostics);
EXPECT_TRUE(preprocessor.init(0, NULL, NULL)); EXPECT_TRUE(preprocessor.init(0, NULL, NULL));
EXPECT_EQ(pp::Token::LAST, preprocessor.lex(&token));
pp::Token token;
preprocessor.lex(&token);
EXPECT_EQ(pp::Token::LAST, token.type); EXPECT_EQ(pp::Token::LAST, token.type);
} }
TEST(InputTest, NullString) TEST(InputTest, NullString)
{ {
pp::Preprocessor preprocessor; MockDiagnostics diagnostics;
pp::Preprocessor preprocessor(&diagnostics);
EXPECT_FALSE(preprocessor.init(1, NULL, NULL)); EXPECT_FALSE(preprocessor.init(1, NULL, NULL));
} }
......
...@@ -5,18 +5,21 @@ ...@@ -5,18 +5,21 @@
// //
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "MockDiagnostics.h"
#include "Preprocessor.h" #include "Preprocessor.h"
#include "Token.h" #include "Token.h"
static void PreprocessAndVerifyLocation(int count, static void PreprocessAndVerifyLocation(int count,
const char* const string[], const char* const string[],
const int length[], const int length[],
pp::Token::Location location) const pp::SourceLocation& location)
{ {
pp::Token token; MockDiagnostics diagnostics;
pp::Preprocessor preprocessor; pp::Preprocessor preprocessor(&diagnostics);
ASSERT_TRUE(preprocessor.init(count, string, length)); ASSERT_TRUE(preprocessor.init(count, string, length));
EXPECT_EQ(pp::Token::IDENTIFIER, preprocessor.lex(&token));
pp::Token token;
preprocessor.lex(&token);
EXPECT_EQ(pp::Token::IDENTIFIER, token.type); EXPECT_EQ(pp::Token::IDENTIFIER, token.type);
EXPECT_STREQ("foo", token.value.c_str()); EXPECT_STREQ("foo", token.value.c_str());
...@@ -27,7 +30,7 @@ static void PreprocessAndVerifyLocation(int count, ...@@ -27,7 +30,7 @@ static void PreprocessAndVerifyLocation(int count,
TEST(LocationTest, String0_Line1) TEST(LocationTest, String0_Line1)
{ {
const char* str = "foo"; const char* str = "foo";
pp::Token::Location loc; pp::SourceLocation loc;
loc.file = 0; loc.file = 0;
loc.line = 1; loc.line = 1;
...@@ -38,7 +41,7 @@ TEST(LocationTest, String0_Line1) ...@@ -38,7 +41,7 @@ TEST(LocationTest, String0_Line1)
TEST(LocationTest, String0_Line2) TEST(LocationTest, String0_Line2)
{ {
const char* str = "\nfoo"; const char* str = "\nfoo";
pp::Token::Location loc; pp::SourceLocation loc;
loc.file = 0; loc.file = 0;
loc.line = 2; loc.line = 2;
...@@ -49,7 +52,7 @@ TEST(LocationTest, String0_Line2) ...@@ -49,7 +52,7 @@ TEST(LocationTest, String0_Line2)
TEST(LocationTest, String1_Line1) TEST(LocationTest, String1_Line1)
{ {
const char* const str[] = {"\n\n", "foo"}; const char* const str[] = {"\n\n", "foo"};
pp::Token::Location loc; pp::SourceLocation loc;
loc.file = 1; loc.file = 1;
loc.line = 1; loc.line = 1;
...@@ -60,7 +63,7 @@ TEST(LocationTest, String1_Line1) ...@@ -60,7 +63,7 @@ TEST(LocationTest, String1_Line1)
TEST(LocationTest, String1_Line2) TEST(LocationTest, String1_Line2)
{ {
const char* const str[] = {"\n\n", "\nfoo"}; const char* const str[] = {"\n\n", "\nfoo"};
pp::Token::Location loc; pp::SourceLocation loc;
loc.file = 1; loc.file = 1;
loc.line = 2; loc.line = 2;
...@@ -71,7 +74,7 @@ TEST(LocationTest, String1_Line2) ...@@ -71,7 +74,7 @@ TEST(LocationTest, String1_Line2)
TEST(LocationTest, NewlineInsideCommentCounted) TEST(LocationTest, NewlineInsideCommentCounted)
{ {
const char* str = "/*\n\n*/foo"; const char* str = "/*\n\n*/foo";
pp::Token::Location loc; pp::SourceLocation loc;
loc.file = 0; loc.file = 0;
loc.line = 3; loc.line = 3;
...@@ -83,15 +86,16 @@ TEST(LocationTest, ErrorLocationAfterComment) ...@@ -83,15 +86,16 @@ TEST(LocationTest, ErrorLocationAfterComment)
{ {
const char* str = "/*\n\n*/@"; const char* str = "/*\n\n*/@";
pp::Token token; MockDiagnostics diagnostics;
pp::Preprocessor preprocessor; pp::Preprocessor preprocessor(&diagnostics);
ASSERT_TRUE(preprocessor.init(1, &str, 0)); ASSERT_TRUE(preprocessor.init(1, &str, 0));
EXPECT_EQ(pp::Token::INVALID_CHARACTER, preprocessor.lex(&token));
EXPECT_EQ(pp::Token::INVALID_CHARACTER, token.type);
EXPECT_STREQ("@", token.value.c_str());
EXPECT_EQ(0, token.location.file); pp::Diagnostics::ID id(pp::Diagnostics::INVALID_CHARACTER);
EXPECT_EQ(3, token.location.line); pp::SourceLocation loc(0, 3);
EXPECT_CALL(diagnostics, print(id, loc, "@"));
pp::Token token;
preprocessor.lex(&token);
} }
// The location of a token straddling two or more strings is that of the // The location of a token straddling two or more strings is that of the
...@@ -100,7 +104,7 @@ TEST(LocationTest, ErrorLocationAfterComment) ...@@ -100,7 +104,7 @@ TEST(LocationTest, ErrorLocationAfterComment)
TEST(LocationTest, TokenStraddlingTwoStrings) TEST(LocationTest, TokenStraddlingTwoStrings)
{ {
const char* const str[] = {"f", "oo"}; const char* const str[] = {"f", "oo"};
pp::Token::Location loc; pp::SourceLocation loc;
loc.file = 0; loc.file = 0;
loc.line = 1; loc.line = 1;
...@@ -111,7 +115,7 @@ TEST(LocationTest, TokenStraddlingTwoStrings) ...@@ -111,7 +115,7 @@ TEST(LocationTest, TokenStraddlingTwoStrings)
TEST(LocationTest, TokenStraddlingThreeStrings) TEST(LocationTest, TokenStraddlingThreeStrings)
{ {
const char* const str[] = {"f", "o", "o"}; const char* const str[] = {"f", "o", "o"};
pp::Token::Location loc; pp::SourceLocation loc;
loc.file = 0; loc.file = 0;
loc.line = 1; loc.line = 1;
......
...@@ -5,6 +5,8 @@ ...@@ -5,6 +5,8 @@
// //
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "MockDiagnostics.h"
#include "Preprocessor.h" #include "Preprocessor.h"
#include "Token.h" #include "Token.h"
...@@ -18,12 +20,15 @@ TEST_P(InvalidNumberTest, InvalidNumberIdentified) ...@@ -18,12 +20,15 @@ TEST_P(InvalidNumberTest, InvalidNumberIdentified)
{ {
const char* str = GetParam(); const char* str = GetParam();
pp::Token token; MockDiagnostics diagnostics;
pp::Preprocessor preprocessor; pp::Preprocessor preprocessor(&diagnostics);
ASSERT_TRUE(preprocessor.init(1, &str, 0)); ASSERT_TRUE(preprocessor.init(1, &str, 0));
EXPECT_EQ(pp::Token::INVALID_NUMBER, preprocessor.lex(&token));
EXPECT_EQ(pp::Token::INVALID_NUMBER, token.type); using testing::_;
EXPECT_STREQ(str, token.value.c_str()); EXPECT_CALL(diagnostics, print(pp::Diagnostics::INVALID_NUMBER, _, str));
pp::Token token;
preprocessor.lex(&token);
} }
INSTANTIATE_TEST_CASE_P(InvalidIntegers, InvalidNumberTest, INSTANTIATE_TEST_CASE_P(InvalidIntegers, InvalidNumberTest,
...@@ -48,10 +53,12 @@ TEST_P(IntegerTest, IntegerIdentified) ...@@ -48,10 +53,12 @@ TEST_P(IntegerTest, IntegerIdentified)
str.push_back(std::tr1::get<1>(GetParam())); // digit. str.push_back(std::tr1::get<1>(GetParam())); // digit.
const char* cstr = str.c_str(); const char* cstr = str.c_str();
pp::Token token; MockDiagnostics diagnostics;
pp::Preprocessor preprocessor; pp::Preprocessor preprocessor(&diagnostics);
ASSERT_TRUE(preprocessor.init(1, &cstr, 0)); ASSERT_TRUE(preprocessor.init(1, &cstr, 0));
EXPECT_EQ(pp::Token::CONST_INT, preprocessor.lex(&token));
pp::Token token;
preprocessor.lex(&token);
EXPECT_EQ(pp::Token::CONST_INT, token.type); EXPECT_EQ(pp::Token::CONST_INT, token.type);
EXPECT_STREQ(cstr, token.value.c_str()); EXPECT_STREQ(cstr, token.value.c_str());
} }
...@@ -85,10 +92,12 @@ INSTANTIATE_TEST_CASE_P(HexadecimalInteger_A_F, ...@@ -85,10 +92,12 @@ INSTANTIATE_TEST_CASE_P(HexadecimalInteger_A_F,
static void PreprocessAndVerifyFloat(const char* str) static void PreprocessAndVerifyFloat(const char* str)
{ {
pp::Token token; MockDiagnostics diagnostics;
pp::Preprocessor preprocessor; pp::Preprocessor preprocessor(&diagnostics);
ASSERT_TRUE(preprocessor.init(1, &str, 0)); ASSERT_TRUE(preprocessor.init(1, &str, 0));
EXPECT_EQ(pp::Token::CONST_FLOAT, preprocessor.lex(&token));
pp::Token token;
preprocessor.lex(&token);
EXPECT_EQ(pp::Token::CONST_FLOAT, token.type); EXPECT_EQ(pp::Token::CONST_FLOAT, token.type);
EXPECT_STREQ(str, token.value.c_str()); EXPECT_STREQ(str, token.value.c_str());
} }
......
...@@ -5,6 +5,8 @@ ...@@ -5,6 +5,8 @@
// //
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "MockDiagnostics.h"
#include "Preprocessor.h" #include "Preprocessor.h"
#include "Token.h" #include "Token.h"
...@@ -24,10 +26,12 @@ TEST_P(OperatorTest, Identified) ...@@ -24,10 +26,12 @@ TEST_P(OperatorTest, Identified)
{ {
OperatorTestParam param = GetParam(); OperatorTestParam param = GetParam();
pp::Token token; MockDiagnostics diagnostics;
pp::Preprocessor preprocessor; pp::Preprocessor preprocessor(&diagnostics);
ASSERT_TRUE(preprocessor.init(1, &param.str, 0)); ASSERT_TRUE(preprocessor.init(1, &param.str, 0));
EXPECT_EQ(param.op, preprocessor.lex(&token));
pp::Token token;
preprocessor.lex(&token);
EXPECT_EQ(param.op, token.type); EXPECT_EQ(param.op, token.type);
} }
......
...@@ -5,6 +5,8 @@ ...@@ -5,6 +5,8 @@
// //
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "MockDiagnostics.h"
#include "Preprocessor.h" #include "Preprocessor.h"
#include "Token.h" #include "Token.h"
...@@ -29,11 +31,13 @@ TEST_P(SpaceCharTest, SpaceIgnored) ...@@ -29,11 +31,13 @@ TEST_P(SpaceCharTest, SpaceIgnored)
str.append(identifier); str.append(identifier);
const char* cstr = str.c_str(); const char* cstr = str.c_str();
pp::Token token; MockDiagnostics diagnostics;
pp::Preprocessor preprocessor; pp::Preprocessor preprocessor(&diagnostics);
ASSERT_TRUE(preprocessor.init(1, &cstr, 0)); ASSERT_TRUE(preprocessor.init(1, &cstr, 0));
pp::Token token;
// Identifier "foo" is returned after ignoring the whitespace characters. // Identifier "foo" is returned after ignoring the whitespace characters.
EXPECT_EQ(pp::Token::IDENTIFIER, preprocessor.lex(&token)); preprocessor.lex(&token);
EXPECT_EQ(pp::Token::IDENTIFIER, token.type); EXPECT_EQ(pp::Token::IDENTIFIER, token.type);
EXPECT_STREQ(identifier, token.value.c_str()); EXPECT_STREQ(identifier, token.value.c_str());
// The whitespace character is however recorded with the next token. // The whitespace character is however recorded with the next token.
...@@ -66,11 +70,13 @@ TEST_P(SpaceStringTest, SpaceIgnored) ...@@ -66,11 +70,13 @@ TEST_P(SpaceStringTest, SpaceIgnored)
str.append(identifier); str.append(identifier);
const char* cstr = str.c_str(); const char* cstr = str.c_str();
pp::Token token; MockDiagnostics diagnostics;
pp::Preprocessor preprocessor; pp::Preprocessor preprocessor(&diagnostics);
ASSERT_TRUE(preprocessor.init(1, &cstr, 0)); ASSERT_TRUE(preprocessor.init(1, &cstr, 0));
pp::Token token;
preprocessor.lex(&token);
// Identifier "foo" is returned after ignoring the whitespace characters. // Identifier "foo" is returned after ignoring the whitespace characters.
EXPECT_EQ(pp::Token::IDENTIFIER, preprocessor.lex(&token));
EXPECT_EQ(pp::Token::IDENTIFIER, token.type); EXPECT_EQ(pp::Token::IDENTIFIER, token.type);
EXPECT_STREQ(identifier, token.value.c_str()); EXPECT_STREQ(identifier, token.value.c_str());
// The whitespace character is however recorded with the next token. // The whitespace character is however recorded with the next token.
...@@ -93,23 +99,24 @@ TEST(SpaceTest, LeadingSpace) ...@@ -93,23 +99,24 @@ TEST(SpaceTest, LeadingSpace)
const char* str = " foo+ -bar"; const char* str = " foo+ -bar";
pp::Token token; pp::Token token;
pp::Preprocessor preprocessor; MockDiagnostics diagnostics;
pp::Preprocessor preprocessor(&diagnostics);
ASSERT_TRUE(preprocessor.init(1, &str, 0)); ASSERT_TRUE(preprocessor.init(1, &str, 0));
EXPECT_EQ(pp::Token::IDENTIFIER, preprocessor.lex(&token)); preprocessor.lex(&token);
EXPECT_EQ(pp::Token::IDENTIFIER, token.type); EXPECT_EQ(pp::Token::IDENTIFIER, token.type);
EXPECT_STREQ("foo", token.value.c_str()); EXPECT_STREQ("foo", token.value.c_str());
EXPECT_TRUE(token.hasLeadingSpace()); EXPECT_TRUE(token.hasLeadingSpace());
EXPECT_EQ('+', preprocessor.lex(&token)); preprocessor.lex(&token);
EXPECT_EQ('+', token.type); EXPECT_EQ('+', token.type);
EXPECT_FALSE(token.hasLeadingSpace()); EXPECT_FALSE(token.hasLeadingSpace());
EXPECT_EQ('-', preprocessor.lex(&token)); preprocessor.lex(&token);
EXPECT_EQ('-', token.type); EXPECT_EQ('-', token.type);
EXPECT_TRUE(token.hasLeadingSpace()); EXPECT_TRUE(token.hasLeadingSpace());
EXPECT_EQ(pp::Token::IDENTIFIER, preprocessor.lex(&token)); preprocessor.lex(&token);
EXPECT_EQ(pp::Token::IDENTIFIER, token.type); EXPECT_EQ(pp::Token::IDENTIFIER, token.type);
EXPECT_STREQ("bar", token.value.c_str()); EXPECT_STREQ("bar", token.value.c_str());
EXPECT_FALSE(token.hasLeadingSpace()); EXPECT_FALSE(token.hasLeadingSpace());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment