Now supports strings.

git-svn-id: https://svn.tlawal.org/svn/monkey@63 f6afcba9-9ef1-4bdd-9b72-7484f5705bac
This commit is contained in:
Tijani Lawal 2023-05-09 19:59:18 +00:00
parent 4f3ef7d314
commit ec6c1ce5aa
25 changed files with 86 additions and 2762 deletions

View File

@ -286,3 +286,13 @@ func (ce *CallExpression) String() string {
return out.String() return out.String()
} }
// String
type StringLiteral struct {
Token token.Token
Value string
}
func (sl *StringLiteral) expression_node() {}
func (sl *StringLiteral) TokenLiteral() string { return sl.Token.Literal }
func (sl *StringLiteral) String() string { return sl.Token.Literal }

View File

@ -1,288 +0,0 @@
package ast
import (
"bytes"
"monkey/token"
"strings"
)
type Node interface {
TokenLiteral() string
String() string
}
type Statement interface {
Node
statement_node()
}
type Expression interface {
Node
expression_node()
}
func (l_program *Program) String() string {
var out bytes.Buffer
for _, s := range l_program.Statements {
out.WriteString(s.String())
}
return out.String()
}
type Program struct {
Statements []Statement
}
// Let Statements
type LetStatement struct {
Token token.Token // token.LET token
Name *Identifier
Value Expression
}
func (ls *LetStatement) statement_node() {}
func (ls *LetStatement) TokenLiteral() string {
return ls.Token.Literal
}
func (ls *LetStatement) String() string {
var out bytes.Buffer
out.WriteString(ls.TokenLiteral() + " ")
out.WriteString(ls.Name.String())
out.WriteString(" = ")
if ls.Value != nil {
out.WriteString(ls.Value.String())
}
out.WriteString(";")
return out.String()
}
// Identifier
type Identifier struct {
Token token.Token // the token.IDENT token
Value string
}
func (i *Identifier) expression_node() {}
func (i *Identifier) TokenLiteral() string {
return i.Token.Literal
}
func (i *Identifier) String() string {
return i.Value
}
// Program
func (p *Program) TokenLiteral() string {
if len(p.Statements) > 0 {
return p.Statements[0].TokenLiteral()
} else {
return ""
}
}
// Return Statements
type ReturnStatement struct {
Token token.Token // token.RETURN token
ReturnValue Expression
}
func (rs *ReturnStatement) statement_node() {}
func (rs *ReturnStatement) TokenLiteral() string {
return rs.Token.Literal
}
func (rs *ReturnStatement) String() string {
var out bytes.Buffer
out.WriteString(rs.TokenLiteral() + " ")
if rs.ReturnValue != nil {
out.WriteString(rs.ReturnValue.String())
}
out.WriteString(";")
return out.String()
}
// Expression Statement
type ExpressionStatement struct {
Token token.Token // the first token in the expression
Expression Expression
}
func (es *ExpressionStatement) statement_node() {}
func (es *ExpressionStatement) TokenLiteral() string {
return es.Token.Literal
}
func (es *ExpressionStatement) String() string {
if es.Expression != nil {
return es.Expression.String()
}
return ""
}
// IntegerLiteral
type IntegerLiteral struct {
Token token.Token
Value int64
}
func (il *IntegerLiteral) expression_node() {}
func (il *IntegerLiteral) TokenLiteral() string {
return il.Token.Literal
}
func (il *IntegerLiteral) String() string {
return il.Token.Literal
}
// PrefixExpression
type PrefixExpression struct {
Token token.Token // prefix token i.e. !
Operator string
Right Expression
}
func (pe *PrefixExpression) expression_node() {}
func (pe *PrefixExpression) TokenLiteral() string {
return pe.Token.Literal
}
func (pe *PrefixExpression) String() string {
var out bytes.Buffer
out.WriteString("(")
out.WriteString(pe.Operator)
out.WriteString(pe.Right.String())
out.WriteString(")")
return out.String()
}
// Infix Expression
type InfixExpression struct {
Token token.Token // operator tokens i.e. +, -, *, /
Left Expression
Operator string
Right Expression
}
func (ie *InfixExpression) expression_node() {}
func (ie *InfixExpression) TokenLiteral() string { return ie.Token.Literal }
func (ie *InfixExpression) String() string {
var out bytes.Buffer
out.WriteString("(")
out.WriteString(ie.Left.String())
out.WriteString(" " + ie.Operator + " ")
out.WriteString(ie.Right.String())
out.WriteString(")")
return out.String()
}
// Booleans
type Boolean struct {
Token token.Token
Value bool
}
func (b *Boolean) expression_node() {}
func (b *Boolean) TokenLiteral() string { return b.Token.Literal }
func (b *Boolean) String() string { return b.Token.Literal }
// If Expression
type IfExpression struct {
Token token.Token // the 'if' token
Condition Expression
Consequence *BlockStatement
Alternative *BlockStatement
}
func (ie *IfExpression) expression_node() {}
func (ie *IfExpression) TokenLiteral() string { return ie.Token.Literal }
func (ie *IfExpression) String() string {
var out bytes.Buffer
out.WriteString("if")
out.WriteString(ie.Condition.String())
out.WriteString(" ")
out.WriteString(ie.Consequence.String())
if ie.Alternative != nil {
out.WriteString("else")
out.WriteString(ie.Alternative.String())
}
return out.String()
}
// Block Statements
type BlockStatement struct {
Token token.Token // the { token
Statements []Statement
}
func (bs *BlockStatement) statement_node() {}
func (bs *BlockStatement) TokenLiteral() string {
return bs.Token.Literal
}
func (bs *BlockStatement) String() string {
var out bytes.Buffer
for _, s := range bs.Statements {
out.WriteString(s.String())
}
return out.String()
}
// Function literals
type FunctionLiteral struct {
Token token.Token // the 'fn' token
Parameters []*Identifier
Body *BlockStatement
}
func (fl *FunctionLiteral) expression_node() {}
func (fl *FunctionLiteral) TokenLiteral() string { return fl.Token.Literal }
func (fl *FunctionLiteral) String() string {
var out bytes.Buffer
params := []string{}
for _, p := range fl.Parameters {
params = append(params, p.String())
}
out.WriteString(fl.TokenLiteral())
out.WriteString("(")
out.WriteString(strings.Join(params, ", "))
out.WriteString(") ")
out.WriteString(fl.Body.String())
return out.String()
}
// Call Expression
type CallExpression struct {
Token token.Token // The '(' token
Function Expression // Identifier or FunctionLiteral
Arguments []Expression
}
func (ce *CallExpression) expression_node() {}
func (ce *CallExpression) TokenLiteral() string { return ce.Token.Literal }
func (ce *CallExpression) String() string {
var out bytes.Buffer
args := []string{}
for _, a := range ce.Arguments {
args = append(args, a.String())
}
out.WriteString(ce.Function.String())
out.WriteString("(")
out.WriteString(strings.Join(args, ", "))
out.WriteString(")")
return out.String()
}

View File

@ -1,28 +0,0 @@
package ast
import (
"monkey/token"
"testing"
)
func TestString(l_test *testing.T) {
program := &Program{
Statements: []Statement{
&LetStatement{
Token: token.Token{Type: token.LET, Literal: "let"},
Name: &Identifier{
Token: token.Token{Type: token.IDENT, Literal: "my_var"},
Value: "my_var",
},
Value: &Identifier{
Token: token.Token{Type: token.IDENT, Literal: "another_var"},
Value: "another_var",
},
},
},
}
if program.String() != "let my_var = another_var;" {
l_test.Errorf("program.String() wrong, got=%q", program.String())
}
}

View File

@ -1,311 +0,0 @@
package evaluator
import (
"fmt"
"monkey/ast"
"monkey/object"
)
var (
NULL = &object.Null{}
TRUE = &object.Boolean{Value: true}
FALSE = &object.Boolean{Value: false}
)
func Eval(node ast.Node, env *object.Environment) object.Object {
switch node := node.(type) {
// Statements
case *ast.Program:
return eval_program(node, env)
case *ast.BlockStatement:
return eval_block_statement(node, env)
case *ast.ExpressionStatement:
return Eval(node.Expression, env)
case *ast.ReturnStatement:
val := Eval(node.ReturnValue, env)
if is_error(val) {
return val
}
return &object.ReturnValue{Value: val}
case *ast.LetStatement:
val := Eval(node.Value, env)
if is_error(val) {
return val
}
env.Set(node.Name.Value, val)
// Expressions
case *ast.IntegerLiteral:
return &object.Integer{Value: node.Value}
case *ast.Boolean:
return native_bool_to_boolean_object(node.Value)
case *ast.PrefixExpression:
right := Eval(node.Right, env)
if is_error(right) {
return right
}
return eval_prefix_expression(node.Operator, right)
case *ast.InfixExpression:
left := Eval(node.Left, env)
if is_error(left) {
return left
}
right := Eval(node.Right, env)
if is_error(right) {
return right
}
return eval_infix_expression(node.Operator, left, right)
case *ast.IfExpression:
return eval_if_expression(node, env)
case *ast.Identifier:
return eval_identifier(node, env)
case *ast.FunctionLiteral:
params := node.Parameters
body := node.Body
return &object.Function{Parameters: params, Env: env, Body: body}
case *ast.CallExpression:
function := Eval(node.Function, env)
if is_error(function) {
return function
}
args := eval_expression(node.Arguments, env)
if len(args) == 1 && is_error(args[0]) {
return args[0]
}
return apply_function(function, args)
}
return nil
}
func eval_program(program *ast.Program, env *object.Environment) object.Object {
var result object.Object
for _, statement := range program.Statements {
result = Eval(statement, env)
switch result := result.(type) {
case *object.ReturnValue:
return result.Value
case *object.Error:
return result
}
}
return result
}
func apply_function(fn object.Object, args []object.Object) object.Object {
function, ok := fn.(*object.Function)
if !ok {
return new_error("not a function: %s", fn.Type())
}
extended_env := extend_function_env(function, args)
evaluated := Eval(function.Body, extended_env)
return unwrap_return_value(evaluated)
}
func extend_function_env(fn *object.Function, args []object.Object) *object.Environment {
env := object.NewEnclosedEnvironment(fn.Env)
for param_index, param := range fn.Parameters {
env.Set(param.Value, args[param_index])
}
return env
}
func unwrap_return_value(obj object.Object) object.Object {
if return_value, ok := obj.(*object.ReturnValue); ok {
return return_value.Value
}
return obj
}
func eval_expression(expressions []ast.Expression, env *object.Environment) []object.Object {
var result []object.Object
for _, e := range expressions {
evaluated := Eval(e, env)
if is_error(evaluated) {
return []object.Object{evaluated}
}
result = append(result, evaluated)
}
return result
}
func eval_identifier(node *ast.Identifier, env *object.Environment) object.Object {
val, ok := env.Get(node.Value)
if !ok {
return new_error("identifier not found: " + node.Value)
}
return val
}
func eval_block_statement(block *ast.BlockStatement, env *object.Environment) object.Object {
var result object.Object
for _, statement := range block.Statements {
result = Eval(statement, env)
if result != nil {
rt := result.Type()
if rt == object.RETURN_VALUE_OBJECT || rt == object.ERROR_OBJECT {
return result
}
}
}
return result
}
func native_bool_to_boolean_object(input bool) *object.Boolean {
if input {
return TRUE
}
return FALSE
}
func eval_prefix_expression(operator string, right object.Object) object.Object {
switch operator {
case "!":
return eval_bang_operator_expression(right)
case "-":
return eval_minus_prefix_operator_expression(right)
default:
return new_error("unknown operator: %s%s", operator, right.Type())
}
}
func eval_bang_operator_expression(right object.Object) object.Object {
switch right {
case TRUE:
return FALSE
case FALSE:
return TRUE
case NULL:
return TRUE
default:
return FALSE
}
}
func eval_minus_prefix_operator_expression(right object.Object) object.Object {
if right.Type() != object.INTEGER_OBJECT {
return new_error("unknown operator: -%s", right.Type())
}
value := right.(*object.Integer).Value
return &object.Integer{Value: -value}
}
func eval_infix_expression(operator string, left object.Object, right object.Object) object.Object {
switch {
case left.Type() == object.INTEGER_OBJECT && right.Type() == object.INTEGER_OBJECT:
return eval_integer_infix_expression(operator, left, right)
case operator == "==":
return native_bool_to_boolean_object(left == right)
case operator == "!=":
return native_bool_to_boolean_object(left != right)
case left.Type() != right.Type():
return new_error("type mismatch: %s %s %s", left.Type(), operator, right.Type())
default:
return new_error("unknown operator: %s %s %s", left.Type(), operator, right.Type())
}
}
func eval_integer_infix_expression(operator string, left object.Object, right object.Object) object.Object {
left_value := left.(*object.Integer).Value
right_value := right.(*object.Integer).Value
switch operator {
case "+":
return &object.Integer{Value: left_value + right_value}
case "-":
return &object.Integer{Value: left_value - right_value}
case "*":
return &object.Integer{Value: left_value * right_value}
case "/":
return &object.Integer{Value: left_value / right_value}
case "<":
return native_bool_to_boolean_object(left_value < right_value)
case ">":
return native_bool_to_boolean_object(left_value > right_value)
case "==":
return native_bool_to_boolean_object(left_value == right_value)
case "!=":
return native_bool_to_boolean_object(left_value != right_value)
default:
return new_error("unknown operator: %s %s %s", left.Type(), operator, right.Type())
}
}
func eval_if_expression(ie *ast.IfExpression, env *object.Environment) object.Object {
condition := Eval(ie.Condition, env)
if is_error(condition) {
return condition
}
if is_truthy(condition) {
return Eval(ie.Consequence, env)
} else if ie.Alternative != nil {
return Eval(ie.Alternative, env)
} else {
return NULL
}
}
func is_truthy(object object.Object) bool {
switch object {
case NULL:
return false
case TRUE:
return true
case FALSE:
return false
default:
return true
}
}
func new_error(format string, a ...interface{}) *object.Error {
return &object.Error{Message: fmt.Sprintf(format, a...)}
}
func is_error(l_object object.Object) bool {
if l_object != nil {
return l_object.Type() == object.ERROR_OBJECT
}
return false
}

View File

@ -1,283 +0,0 @@
package evaluator
import (
"monkey/lexer"
"monkey/object"
"monkey/parser"
"testing"
)
func TestEvalIntegerExpression(l_test *testing.T) {
tests := []struct {
input string
expected int64
}{
{"5", 5},
{"10", 10},
{"-10", -10},
{"-5", -5},
{"5 + 5 + 5 + 5 - 10", 10},
{"2 * 2 * 2 * 2 * 2", 32},
{"-50 + 100 + -50", 0},
{"5 * 2 + 10", 20},
{"5 + 2 * 10", 25},
{"20 + 2 * -10", 0},
{"50 / 2 * 2 + 10", 60},
{"2 * (5 + 10)", 30},
{"3 * 3 * 3 + 10", 37},
{"3 * (3 * 3) + 10", 37},
{"(5 + 10 * 2 + 15 / 3) * 2 + -10", 50},
{"8 * (4 + 32 - (64 / 2) + 5) / (100 / 2 + (25 - 10 + 15) * 18)", 0},
}
for _, tt := range tests {
evaluated := test_eval(tt.input)
test_integer_object(l_test, evaluated, tt.expected)
}
}
func TestErrorHandling(l_test *testing.T) {
tests := []struct {
input string
expected_message string
}{
{"5 + true;", "type mismatch: INTEGER + BOOLEAN"},
{"5 + true; 5;", "type mismatch: INTEGER + BOOLEAN"},
{"-true;", "unknown operator: -BOOLEAN"},
{"true + false;", "unknown operator: BOOLEAN + BOOLEAN"},
{"5; true + false; 5", "unknown operator: BOOLEAN + BOOLEAN"},
{"if (10 > 1) {true + false; }", "unknown operator: BOOLEAN + BOOLEAN"},
{`
if (10 > 1){
if (10 > 1){
return true + false;
}
return 1;
}
`, "unknown operator: BOOLEAN + BOOLEAN"},
{"foobar", "identifier not found: foobar"},
}
for _, tt := range tests {
evaluated := test_eval(tt.input)
error_object, ok := evaluated.(*object.Error)
if !ok {
l_test.Errorf("no error object returned, got=%T(%+v)", evaluated, evaluated)
continue
}
if error_object.Message != tt.expected_message {
l_test.Errorf("wrong error message, expected=%q, got=%q", tt.expected_message, error_object.Message)
}
}
}
func TestEvalBooleanExpression(l_test *testing.T) {
tests := []struct {
input string
expected bool
}{
{"true", true},
{"false", false},
{"1 < 2", true},
{"1 > 2", false},
{"1 < 1", false},
{"1 > 1", false},
{"1 == 1", true},
{"1 != 1", false},
{"1 == 2", false},
{"1 != 2", true},
{"true == true", true},
{"false == false", true},
{"true == false", false},
{"true != false", true},
{"false != true", true},
{"(1 < 2) == true", true},
{"(1 < 2) == false", false},
{"(1 > 2) == true", false},
{"(1 > 2) == false", true},
}
for _, tt := range tests {
evaluated := test_eval(tt.input)
test_boolean_object(l_test, evaluated, tt.expected)
}
}
// Test to convert the '!' operator to boolean value and negate it
func TestBangOperator(l_test *testing.T) {
tests := []struct {
input string
expected bool
}{
{"!true", false},
{"!false", true},
{"!5", false},
{"!!true", true},
{"!!false", false},
{"!!5", true},
}
for _, tt := range tests {
evaluated := test_eval(tt.input)
test_boolean_object(l_test, evaluated, tt.expected)
}
}
func TestIfElseExpressions(l_test *testing.T) {
tests := []struct {
input string
expected interface{}
}{
{"if (true) { 10 }", 10},
{"if (false) { 10 }", nil},
{"if (1) { 10 }", 10},
{"if (1 < 2) { 10 }", 10},
{"if (1 > 2) { 10 }", nil},
{"if (1 > 2) { 10 } else {20}", 20},
{"if (1 < 2) { 10 } else {20}", 10},
{"if(10 > 1){if(10 > 1){ return 10;} return 1;}", 10},
}
for _, tt := range tests {
evaluated := test_eval(tt.input)
integer, ok := tt.expected.(int)
if ok {
test_integer_object(l_test, evaluated, int64(integer))
} else {
test_null_object(l_test, evaluated)
}
}
}
func TestReturnStatements(l_test *testing.T) {
tests := []struct {
input string
expected int64
}{
{"return 10;", 10},
{"return 10; 9;", 10},
{"return 2 * 5; 9;", 10},
{"9; return 2 * 5; 9;", 10},
}
for _, tt := range tests {
evaluated := test_eval(tt.input)
test_integer_object(l_test, evaluated, tt.expected)
}
}
func TestLetStatements(l_test *testing.T) {
tests := []struct {
input string
expected int64
}{
{"let a = 5; a;", 5},
{"let a = 5 * 5; a;", 25},
{"let a = 5; let b = a; b", 5},
{"let a = 5; let b = a; let c = a + b + 5; c;", 15},
}
for _, tt := range tests {
test_integer_object(l_test, test_eval(tt.input), tt.expected)
}
}
func TestFunctionObject(l_test *testing.T) {
input := "fn(x) { x + 2;};"
evaluated := test_eval(input)
fn, ok := evaluated.(*object.Function)
if !ok {
l_test.Fatalf("object is not Function, got=%T (%+v)", evaluated, evaluated)
}
if len(fn.Parameters) != 1 {
l_test.Fatalf("function has wrong parameters, Parameters=%+v", fn.Parameters)
}
if fn.Parameters[0].String() != "x" {
l_test.Fatalf("parameter is not 'x', got=%q", fn.Parameters[0])
}
expected_body := "(x + 2)"
if fn.Body.String() != expected_body {
l_test.Fatalf("body is not %q, got=%q", expected_body, fn.Body.String())
}
}
func TestFunctionApplication(l_test *testing.T) {
tests := []struct {
input string
expected int64
}{
{"let identity = fn(x) { x; }; identity(5);", 5},
{"let identity = fn(x) { return x; }; identity(5);", 5},
{"let double = fn(x) { x * 2; }; double(5);", 10},
{"let add = fn(x, y) { x + y; }; add(5, 5);", 10},
{"let add = fn(x, y) { x + y; }; add(5 + 5, add(5, 5));", 20},
{"fn(x) { x; }(5)", 5},
}
for _, tt := range tests {
test_integer_object(l_test, test_eval(tt.input), tt.expected)
}
}
func TestClosures(l_test *testing.T) {
input := `
let newAdder = fn(x) {
fn(y) { x + y };
};
let addTwo = newAdder(2);
addTwo(2);
`
test_integer_object(l_test, test_eval(input), 4)
}
// Helpers
func test_eval(input string) object.Object {
l_lexer := lexer.New(input)
l_parser := parser.New(l_lexer)
program := l_parser.ParseProgram()
env := object.NewEnvironment()
return Eval(program, env)
}
func test_integer_object(l_test *testing.T, l_object object.Object, expected int64) bool {
result, ok := l_object.(*object.Integer)
if !ok {
l_test.Errorf("object is not integer, got=%T (%+v)", l_object, l_object)
return false
}
if result.Value != expected {
l_test.Errorf("object has wrong value, got=%d, want=%d", result.Value, expected)
return false
}
return true
}
func test_boolean_object(l_test *testing.T, l_object object.Object, expected bool) bool {
result, ok := l_object.(*object.Boolean)
if !ok {
l_test.Errorf("object is not Boolean, got=%T (%+v)", l_object, l_object)
return false
}
if result.Value != expected {
l_test.Errorf("object has wrong value, got=%T, want=%t", result.Value, expected)
return false
}
return true
}
func test_null_object(l_test *testing.T, object object.Object) bool {
if object != NULL {
l_test.Errorf("object is not NULL, got=%T (%+v)", object, object)
return false
}
return true
}

View File

@ -1,3 +0,0 @@
module monkey
go 1.18

View File

@ -1,136 +0,0 @@
package lexer
import "monkey/token"
type Lexer struct {
input string
position int // current position in input (the current_char)
read_position int // current reading position in input (after current_char)
current_char byte
}
func New(input string) *Lexer {
l := &Lexer{input: input}
l.read_char()
return l
}
func (l_lexer *Lexer) NextToken() token.Token {
var tok token.Token
l_lexer.skip_whitespace()
switch l_lexer.current_char {
case '=':
if l_lexer.peek_char() == '=' {
ch := l_lexer.current_char
l_lexer.read_char()
literal := string(ch) + string(l_lexer.current_char)
tok = token.Token{Type: token.EQ, Literal: literal}
} else {
tok = new_token(token.ASSIGN, l_lexer.current_char)
}
case '!':
if l_lexer.peek_char() == '=' {
ch := l_lexer.current_char
l_lexer.read_char()
literal := string(ch) + string(l_lexer.current_char)
tok = token.Token{Type: token.NOT_EQ, Literal: literal}
} else {
tok = new_token(token.BANG, l_lexer.current_char)
}
case ';':
tok = new_token(token.SEMICOLON, l_lexer.current_char)
case '(':
tok = new_token(token.LPAREN, l_lexer.current_char)
case ')':
tok = new_token(token.RPAREN, l_lexer.current_char)
case '{':
tok = new_token(token.LBRACE, l_lexer.current_char)
case '}':
tok = new_token(token.RBRACE, l_lexer.current_char)
case ',':
tok = new_token(token.COMMA, l_lexer.current_char)
case '+':
tok = new_token(token.PLUS, l_lexer.current_char)
case '-':
tok = new_token(token.MINUS, l_lexer.current_char)
case '/':
tok = new_token(token.SLASH, l_lexer.current_char)
case '*':
tok = new_token(token.ASTERISK, l_lexer.current_char)
case '<':
tok = new_token(token.LT, l_lexer.current_char)
case '>':
tok = new_token(token.GT, l_lexer.current_char)
case 0:
tok.Literal = ""
tok.Type = token.EOF
default:
if is_letter(l_lexer.current_char) {
tok.Literal = l_lexer.read_identifier()
tok.Type = token.LookupIdentifier(tok.Literal)
return tok
} else if is_digit(l_lexer.current_char) {
tok.Type = token.INT
tok.Literal = l_lexer.read_number()
return tok
} else {
tok = new_token(token.ILLEGAL, l_lexer.current_char)
}
}
l_lexer.read_char()
return tok
}
func new_token(TokenType token.TokenType, ch byte) token.Token {
return token.Token{Type: TokenType, Literal: string(ch)}
}
func (l_lexer *Lexer) read_char() {
if l_lexer.read_position >= len(l_lexer.input) {
l_lexer.current_char = 0
} else {
l_lexer.current_char = l_lexer.input[l_lexer.read_position]
}
l_lexer.position = l_lexer.read_position
l_lexer.read_position += 1
}
func (l_lexer *Lexer) peek_char() byte {
if l_lexer.read_position >= len(l_lexer.input) {
return 0
} else {
return l_lexer.input[l_lexer.read_position]
}
}
func (l_lexer *Lexer) read_identifier() string {
position := l_lexer.position
for is_letter(l_lexer.current_char) {
l_lexer.read_char()
}
return l_lexer.input[position:l_lexer.position]
}
func is_letter(ch byte) bool {
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_'
}
func (l_lexer *Lexer) skip_whitespace() {
for l_lexer.current_char == ' ' || l_lexer.current_char == '\t' || l_lexer.current_char == '\n' || l_lexer.current_char == '\r' {
l_lexer.read_char()
}
}
func (l_lexer *Lexer) read_number() string {
position := l_lexer.position
for is_digit(l_lexer.current_char) {
l_lexer.read_char()
}
return l_lexer.input[position:l_lexer.position]
}
func is_digit(ch byte) bool {
return '0' <= ch && ch <= '9'
}

View File

@ -1,127 +0,0 @@
package lexer
import (
"testing"
"monkey/token"
)
func TestNextToken(t *testing.T) {
input := `let five = 5;
let ten = 10;
let add = fn(x, y){
x + y;
};
let result = add(five, ten);
!-/*5;
5 < 10 > 5;
if(5 < 10){
return true;
} else {
return false;
}
10 == 10;
10 != 9;
`
tests := []struct {
expectedType token.TokenType
expectedLiteral string
}{
{token.LET, "let"},
{token.IDENT, "five"},
{token.ASSIGN, "="},
{token.INT, "5"},
{token.SEMICOLON, ";"},
{token.LET, "let"},
{token.IDENT, "ten"},
{token.ASSIGN, "="},
{token.INT, "10"},
{token.SEMICOLON, ";"},
{token.LET, "let"},
{token.IDENT, "add"},
{token.ASSIGN, "="},
{token.FUNCTION, "fn"},
{token.LPAREN, "("},
{token.IDENT, "x"},
{token.COMMA, ","},
{token.IDENT, "y"},
{token.RPAREN, ")"},
{token.LBRACE, "{"},
{token.IDENT, "x"},
{token.PLUS, "+"},
{token.IDENT, "y"},
{token.SEMICOLON, ";"},
{token.RBRACE, "}"},
{token.SEMICOLON, ";"},
{token.LET, "let"},
{token.IDENT, "result"},
{token.ASSIGN, "="},
{token.IDENT, "add"},
{token.LPAREN, "("},
{token.IDENT, "five"},
{token.COMMA, ","},
{token.IDENT, "ten"},
{token.RPAREN, ")"},
{token.SEMICOLON, ";"},
{token.BANG, "!"},
{token.MINUS, "-"},
{token.SLASH, "/"},
{token.ASTERISK, "*"},
{token.INT, "5"},
{token.SEMICOLON, ";"},
{token.INT, "5"},
{token.LT, "<"},
{token.INT, "10"},
{token.GT, ">"},
{token.INT, "5"},
{token.SEMICOLON, ";"},
{token.IF, "if"},
{token.LPAREN, "("},
{token.INT, "5"},
{token.LT, "<"},
{token.INT, "10"},
{token.RPAREN, ")"},
{token.LBRACE, "{"},
{token.RETURN, "return"},
{token.TRUE, "true"},
{token.SEMICOLON, ";"},
{token.RBRACE, "}"},
{token.ELSE, "else"},
{token.LBRACE, "{"},
{token.RETURN, "return"},
{token.FALSE, "false"},
{token.SEMICOLON, ";"},
{token.RBRACE, "}"},
{token.INT, "10"},
{token.EQ, "=="},
{token.INT, "10"},
{token.SEMICOLON, ";"},
{token.INT, "10"},
{token.NOT_EQ, "!="},
{token.INT, "9"},
{token.SEMICOLON, ";"},
{token.EOF, ""},
}
l := New(input)
for i, tt := range tests {
tok := l.NextToken()
if tok.Type != tt.expectedType {
t.Fatalf("test[%d] - tokentype wrong. expected=%q, got=%q", i, tt.expectedType, tok.Type)
}
if tok.Literal != tt.expectedLiteral {
t.Fatalf("tests[%d] - literal wrong. expected=%q, got=%q", i, tt.expectedLiteral, tok.Literal)
}
}
}

View File

@ -1,20 +0,0 @@
/* TODO(tijani):
- add quit command to the repl.
- remove semicolons from the language to mark the end of a statement.
- add line and position reporting in the lexer when a lexing or parsing error occurs.
*/
package main
import (
"fmt"
"os"
"monkey/repl"
)
func main() {
fmt.Printf("Welcome to the Monk programming language!\n")
repl.Start(os.Stdin, os.Stdout)
}

View File

@ -1,65 +0,0 @@
/*
Environment
An environment in this interpreter is what is used to keep track of values by associating them with a name.
Under the hood, the environment is basically an hash map that associates strings with objects.
*/
package object
type Environment struct {
store map[string]Object
outer *Environment
}
func NewEnvironment() *Environment {
s := make(map[string]Object)
return &Environment{store: s, outer: nil}
}
/*
Enclosing Environments
Here is a problem case, lets say in monkey I would want to type this:
```
let i = 5;
let print_num = fn(i) {
puts(i);
}
print_num(10);
puts(i);
```
The ideal result of the above code in the monkey programming language is for 10 and 5 to be the outputs respectively.
In a situation where enclosed environment does not exists, both outputs will be 10 because the current value of i
would be overwritten. The ideal situation would be to preserve the previous binding to 'i' while also making a a new
one.
This works be creating a new instance of object.Environment with a pointer to the environment it should extend, doing this
encloses a fresh and empty environment with an existing one. When the Get method is called and it itself doesn't have the value
associated with the given name, it calls the Get of the enclosing environment. That's the environment it's extending. If that
enclosing environment can't find the value, it calls its own enclosing environment and so on until there is no enclosing environment
anymore and it will error out to an unknown identifier.
*/
func NewEnclosedEnvironment(outer *Environment) *Environment {
env := NewEnvironment()
env.outer = outer
return env
}
func (l_environment *Environment) Get(name string) (Object, bool) {
obj, ok := l_environment.store[name]
if !ok && l_environment.outer != nil {
obj, ok = l_environment.outer.Get(name)
}
return obj, ok
}
func (l_environment *Environment) Set(name string, value Object) Object {
l_environment.store[name] = value
return value
}

View File

@ -1,103 +0,0 @@
package object
import (
"bytes"
"fmt"
"monkey/ast"
"strings"
)
type ObjectType string
const (
INTEGER_OBJECT = "INTEGER"
BOOLEAN_OBJECT = "BOOLEAN"
NULL_OBJECT = "NULL"
RETURN_VALUE_OBJECT = "RETURN_VALUE"
ERROR_OBJECT = "ERROR"
FUNCTION_OBJECT = "FUNCTION"
)
type Object interface {
Type() ObjectType
Inspect() string
}
// Integer
type Integer struct {
Value int64
}
func (i *Integer) Type() ObjectType {
return INTEGER_OBJECT
}
func (i *Integer) Inspect() string {
return fmt.Sprintf("%d", i.Value)
}
// Booleans
type Boolean struct {
Value bool
}
func (b *Boolean) Type() ObjectType {
return BOOLEAN_OBJECT
}
func (b *Boolean) Inspect() string {
return fmt.Sprintf("%t", b.Value)
}
// Null
type Null struct{}
func (n *Null) Type() ObjectType {
return NULL_OBJECT
}
func (n *Null) Inspect() string {
return "null"
}
// Return
type ReturnValue struct {
Value Object
}
func (rv *ReturnValue) Type() ObjectType { return RETURN_VALUE_OBJECT }
func (rv *ReturnValue) Inspect() string { return rv.Value.Inspect() }
// Error
type Error struct {
Message string
}
func (err *Error) Type() ObjectType { return ERROR_OBJECT }
func (err *Error) Inspect() string { return "ERROR: " + err.Message }
// Function
type Function struct {
Parameters []*ast.Identifier
Body *ast.BlockStatement
Env *Environment
}
func (f *Function) Type() ObjectType { return FUNCTION_OBJECT }
func (f *Function) Inspect() string {
var out bytes.Buffer
params := []string{}
for _, p := range f.Parameters {
params = append(params, p.String())
}
out.WriteString("fn")
out.WriteString("(")
out.WriteString(strings.Join(params, ", "))
out.WriteString(") {\n")
out.WriteString(f.Body.String())
out.WriteString("\n}")
return out.String()
}

View File

@ -1,433 +0,0 @@
package parser
import (
"fmt"
"monkey/ast"
"monkey/lexer"
"monkey/token"
"strconv"
)
// Precedence of operations
const (
_ int = iota // iota means start from 0, hence _ starts from 0
LOWEST
EQUALS // ==
LESSGREATER // > OR <
SUM // +
PRODUCT // *
PREFIX // -x OR !x
CALL // simple_function(x)
)
// Precedence Table
var precedences = map[token.TokenType]int{
token.EQ: EQUALS,
token.NOT_EQ: EQUALS,
token.LT: LESSGREATER,
token.GT: LESSGREATER,
token.PLUS: SUM,
token.MINUS: SUM,
token.SLASH: PRODUCT,
token.ASTERISK: PRODUCT,
token.LPAREN: CALL,
}
func (l_parser *Parser) peek_precedence() int {
if l_parser, ok := precedences[l_parser.peek_token.Type]; ok {
return l_parser
}
return LOWEST
}
func (l_parser *Parser) current_precedence() int {
if l_parser, ok := precedences[l_parser.current_token.Type]; ok {
return l_parser
}
return LOWEST
}
// Pratt Parsing
type (
prefix_parse_function func() ast.Expression
infix_parse_function func(ast.Expression) ast.Expression
)
func (l_parser *Parser) register_prefix(l_token_type token.TokenType, l_function prefix_parse_function) {
l_parser.prefix_parse_functions[l_token_type] = l_function
}
func (l_parser *Parser) register_infix(l_token_type token.TokenType, l_function infix_parse_function) {
l_parser.infix_parse_functions[l_token_type] = l_function
}
type Parser struct {
lexer *lexer.Lexer
current_token token.Token
peek_token token.Token
errors []string
prefix_parse_functions map[token.TokenType]prefix_parse_function
infix_parse_functions map[token.TokenType]infix_parse_function
}
func New(l_lexer *lexer.Lexer) *Parser {
l_parser := &Parser{lexer: l_lexer, errors: []string{}}
// Read two tokens so current_token and peek_token are both set
l_parser.next_token()
l_parser.next_token()
// Prefix Operations
l_parser.prefix_parse_functions = make(map[token.TokenType]prefix_parse_function)
l_parser.register_prefix(token.IDENT, l_parser.parse_identifier)
l_parser.register_prefix(token.INT, l_parser.parse_integer_literal)
l_parser.register_prefix(token.BANG, l_parser.parse_prefix_expression)
l_parser.register_prefix(token.MINUS, l_parser.parse_prefix_expression)
// Infix Operation
l_parser.infix_parse_functions = make(map[token.TokenType]infix_parse_function)
l_parser.register_infix(token.PLUS, l_parser.parse_infix_expression)
l_parser.register_infix(token.MINUS, l_parser.parse_infix_expression)
l_parser.register_infix(token.SLASH, l_parser.parse_infix_expression)
l_parser.register_infix(token.ASTERISK, l_parser.parse_infix_expression)
l_parser.register_infix(token.EQ, l_parser.parse_infix_expression)
l_parser.register_infix(token.NOT_EQ, l_parser.parse_infix_expression)
l_parser.register_infix(token.LT, l_parser.parse_infix_expression)
l_parser.register_infix(token.GT, l_parser.parse_infix_expression)
// Boolean
l_parser.register_prefix(token.TRUE, l_parser.parse_boolean)
l_parser.register_prefix(token.FALSE, l_parser.parse_boolean)
// Grouped Expression
l_parser.register_prefix(token.LPAREN, l_parser.parse_grouped_expression)
// IF Expression
l_parser.register_prefix(token.IF, l_parser.parse_if_expression)
// Function Literals
l_parser.register_prefix(token.FUNCTION, l_parser.parse_function_literal)
// Call Expression
l_parser.register_infix(token.LPAREN, l_parser.parse_call_expression)
return l_parser
}
func (l_parser *Parser) Errors() []string {
return l_parser.errors
}
func (l_parser *Parser) ParseProgram() *ast.Program {
program := &ast.Program{}
program.Statements = []ast.Statement{}
for !l_parser.current_token_is(token.EOF) {
statement := l_parser.parse_statement()
if statement != nil {
program.Statements = append(program.Statements, statement)
}
l_parser.next_token()
}
return program
}
func (l_parser *Parser) peek_error(l_token token.TokenType) {
message := fmt.Sprintf("expected next token to be %s, got %s", l_token, l_parser.peek_token.Type)
l_parser.errors = append(l_parser.errors, message)
}
func (l_parser *Parser) current_token_is(l_token token.TokenType) bool {
return l_parser.current_token.Type == l_token
}
func (l_parser *Parser) peek_token_is(l_token token.TokenType) bool {
return l_parser.peek_token.Type == l_token
}
func (l_parser *Parser) next_token() {
l_parser.current_token = l_parser.peek_token
l_parser.peek_token = l_parser.lexer.NextToken()
}
func (l_parser *Parser) expect_peek(l_token token.TokenType) bool {
if l_parser.peek_token_is(l_token) {
l_parser.next_token()
return true
} else {
l_parser.peek_error(l_token)
return false
}
}
func (l_parser *Parser) parse_statement() ast.Statement {
switch l_parser.current_token.Type {
case token.LET:
return l_parser.parse_let_statement()
case token.RETURN:
return l_parser.parse_return_statement()
default:
return l_parser.parse_expression_statement()
}
}
func (l_parser *Parser) parse_let_statement() *ast.LetStatement {
//defer untrace(trace("parse_let_statement"))
statement := &ast.LetStatement{Token: l_parser.current_token}
if !l_parser.expect_peek(token.IDENT) {
return nil
}
statement.Name = &ast.Identifier{Token: l_parser.current_token, Value: l_parser.current_token.Literal}
if !l_parser.expect_peek(token.ASSIGN) {
return nil
}
l_parser.next_token()
statement.Value = l_parser.parse_expression(LOWEST)
if l_parser.peek_token_is(token.SEMICOLON) {
l_parser.next_token()
}
return statement
}
func (l_parser *Parser) parse_return_statement() *ast.ReturnStatement {
//defer untrace(trace("parse_return_statement"))
statement := &ast.ReturnStatement{Token: l_parser.current_token}
l_parser.next_token()
statement.ReturnValue = l_parser.parse_expression(LOWEST)
if l_parser.peek_token_is(token.SEMICOLON) {
l_parser.next_token()
}
return statement
}
func (l_parser *Parser) parse_expression_statement() *ast.ExpressionStatement {
//defer untrace(trace("parse_expression_statement"))
statement := &ast.ExpressionStatement{Token: l_parser.current_token}
statement.Expression = l_parser.parse_expression(LOWEST)
if l_parser.peek_token_is(token.SEMICOLON) {
l_parser.next_token()
}
return statement
}
func (l_parser *Parser) parse_identifier() ast.Expression {
//defer untrace(trace("parse_identifier"))
return &ast.Identifier{Token: l_parser.current_token, Value: l_parser.current_token.Literal}
}
func (l_parser *Parser) parse_integer_literal() ast.Expression {
//defer untrace(trace("parse_integer_literal"))
literal := &ast.IntegerLiteral{Token: l_parser.current_token}
value, error := strconv.ParseInt(l_parser.current_token.Literal, 0, 64)
if error != nil {
message := fmt.Sprintf("could not parse %q as integer", l_parser.current_token.Literal)
l_parser.errors = append(l_parser.errors, message)
return nil
}
literal.Value = value
return literal
}
// Here lies the heart of Pratt Parsing
func (l_parser *Parser) parse_expression(precedence int) ast.Expression {
//defer untrace(trace("parse_expression"))
prefix := l_parser.prefix_parse_functions[l_parser.current_token.Type]
if prefix == nil {
l_parser.no_prefix_parse_function_error(l_parser.current_token.Type)
return nil
}
left_expression := prefix()
for !l_parser.peek_token_is(token.SEMICOLON) && precedence < l_parser.peek_precedence() {
infix := l_parser.infix_parse_functions[l_parser.peek_token.Type]
if infix == nil {
return left_expression
}
l_parser.next_token()
left_expression = infix(left_expression)
}
return left_expression
}
func (l_parser *Parser) parse_prefix_expression() ast.Expression {
//defer untrace(trace("parse_prefix_expression"))
expression := &ast.PrefixExpression{
Token: l_parser.current_token,
Operator: l_parser.current_token.Literal,
}
l_parser.next_token()
expression.Right = l_parser.parse_expression(PREFIX)
return expression
}
func (l_parser *Parser) parse_infix_expression(left ast.Expression) ast.Expression {
//defer untrace(trace("parse_prefix_expression"))
expression := &ast.InfixExpression{
Token: l_parser.current_token,
Operator: l_parser.current_token.Literal,
Left: left,
}
precedence := l_parser.current_precedence()
l_parser.next_token()
expression.Right = l_parser.parse_expression(precedence)
return expression
}
func (l_parser *Parser) no_prefix_parse_function_error(l_token_type token.TokenType) {
message := fmt.Sprintf("no prefix parse function for %s, found", l_token_type)
l_parser.errors = append(l_parser.errors, message)
}
func (l_parser *Parser) parse_boolean() ast.Expression {
// defer untrace(trace("parse_boolean"))
return &ast.Boolean{
Token: l_parser.current_token,
Value: l_parser.current_token_is(token.TRUE),
}
}
func (l_parser *Parser) parse_grouped_expression() ast.Expression {
//defer untrace(trace("parse_grouped_expression"))
l_parser.next_token()
expression := l_parser.parse_expression(LOWEST)
if !l_parser.expect_peek(token.RPAREN) {
return nil
}
return expression
}
func (l_parser *Parser) parse_if_expression() ast.Expression {
//defer untrace(trace("parse_if_expression"))
expression := &ast.IfExpression{
Token: l_parser.current_token,
}
if !l_parser.expect_peek(token.LPAREN) {
return nil
}
l_parser.next_token()
expression.Condition = l_parser.parse_expression(LOWEST)
if !l_parser.expect_peek(token.RPAREN) {
return nil
}
if !l_parser.expect_peek(token.LBRACE) {
return nil
}
expression.Consequence = l_parser.parse_block_statement()
if l_parser.peek_token_is(token.ELSE) {
l_parser.next_token()
if !l_parser.expect_peek(token.LBRACE) {
return nil
}
expression.Alternative = l_parser.parse_block_statement()
}
return expression
}
func (l_parser *Parser) parse_block_statement() *ast.BlockStatement {
// defer untrace(trace("parse_block_statement"))
block := &ast.BlockStatement{Token: l_parser.current_token}
block.Statements = []ast.Statement{}
l_parser.next_token()
for !l_parser.current_token_is(token.RBRACE) && !l_parser.current_token_is(token.EOF) {
statement := l_parser.parse_statement()
if statement != nil {
block.Statements = append(block.Statements, statement)
}
l_parser.next_token()
}
return block
}
func (l_parser *Parser) parse_function_literal() ast.Expression {
// defer untrace(trace("parse_function_literal"))
literal := &ast.FunctionLiteral{Token: l_parser.current_token}
if !l_parser.expect_peek(token.LPAREN) {
return nil
}
literal.Parameters = l_parser.parse_function_parameters()
if !l_parser.expect_peek(token.LBRACE) {
return nil
}
literal.Body = l_parser.parse_block_statement()
return literal
}
func (l_parser *Parser) parse_function_parameters() []*ast.Identifier {
// defer untrace(trace("parse_function_parameters"))
identifiers := []*ast.Identifier{}
if l_parser.peek_token_is(token.RPAREN) {
l_parser.next_token()
return identifiers
}
l_parser.next_token()
ident := &ast.Identifier{Token: l_parser.current_token, Value: l_parser.current_token.Literal}
identifiers = append(identifiers, ident)
for l_parser.peek_token_is(token.COMMA) {
l_parser.next_token()
l_parser.next_token()
ident := &ast.Identifier{Token: l_parser.current_token, Value: l_parser.current_token.Literal}
identifiers = append(identifiers, ident)
}
if !l_parser.expect_peek(token.RPAREN) {
return nil
}
return identifiers
}
func (l_parser *Parser) parse_call_expression(function ast.Expression) ast.Expression {
// defer untrace(trace("parse_call_expression"))
expression := &ast.CallExpression{Token: l_parser.current_token, Function: function}
expression.Arguments = l_parser.parse_call_arguments()
return expression
}
func (l_parser *Parser) parse_call_arguments() []ast.Expression {
// defer untrace(trace("parse_call_arguments"))
args := []ast.Expression{}
if l_parser.peek_token_is(token.RPAREN) {
l_parser.next_token()
return args
}
l_parser.next_token()
args = append(args, l_parser.parse_expression(LOWEST))
for l_parser.peek_token_is(token.COMMA) {
l_parser.next_token()
l_parser.next_token()
args = append(args, l_parser.parse_expression(LOWEST))
}
if !l_parser.expect_peek(token.RPAREN) {
return nil
}
return args
}

View File

@ -1,797 +0,0 @@
package parser
import (
"fmt"
"monkey/ast"
"monkey/lexer"
"testing"
)
func TestLetStatement(l_test *testing.T) {
input := `
let x = 4;
let y = 19;
let foobar = 8948398493;
`
l_lexer := lexer.New(input)
l_parser := New(l_lexer)
program := l_parser.ParseProgram()
check_parser_errors(l_test, l_parser)
if program == nil {
l_test.Fatalf("ParseProgram() returned nil")
}
if len(program.Statements) != 3 {
l_test.Fatalf("program.Statements does not contain 3 statements, got=%d", len(program.Statements))
}
tests := []struct {
expected_identifier string
}{
{"x"},
{"y"},
{"foobar"},
}
for i, tt := range tests {
statement := program.Statements[i]
if !testLetStatement(l_test, statement, tt.expected_identifier) {
return
}
}
}
func TestReturnStatement(l_test *testing.T) {
input := `
return 6;
return 10;
return 8419849;
`
l_lexer := lexer.New(input)
l_parser := New(l_lexer)
program := l_parser.ParseProgram()
check_parser_errors(l_test, l_parser)
if len(program.Statements) != 3 {
l_test.Fatalf("program.Statements does not contain 3 statements, got=%d", len(program.Statements))
}
for _, statement := range program.Statements {
return_statement, ok := statement.(*ast.ReturnStatement)
if !ok {
l_test.Errorf("statment not *ast.ReturnStatement, got =%T", statement)
continue
}
if return_statement.TokenLiteral() != "return" {
l_test.Errorf("return_statement.TokenLiteral() not 'return', got %q", return_statement.TokenLiteral())
}
}
}
func TestIdentifierExpression(l_test *testing.T) {
input := "foobar;"
l_lexer := lexer.New(input)
l_parser := New(l_lexer)
program := l_parser.ParseProgram()
check_parser_errors(l_test, l_parser)
if len(program.Statements) != 1 {
l_test.Fatalf("program does not have enough staments, got=%d", len(program.Statements))
}
statement, ok := program.Statements[0].(*ast.ExpressionStatement)
if !ok {
l_test.Fatalf("program.Statements[0] is not ast.ExpressionStatement, got=%T", program.Statements[0])
}
identifier, ok := statement.Expression.(*ast.Identifier)
if !ok {
l_test.Fatalf("expression not *ast.Identifier, got=%T", statement.Expression)
}
if identifier.Value != "foobar" {
l_test.Errorf("identifier.Value not %s, got=%s", "foobar", identifier.Value)
}
if identifier.TokenLiteral() != "foobar" {
l_test.Errorf("identifier.TokenLiteral not %s, got=%s", "foobar", identifier.TokenLiteral())
}
}
func TestIntegerLiteralExpressions(l_test *testing.T) {
input := "5;"
l_lexer := lexer.New(input)
l_parser := New(l_lexer)
program := l_parser.ParseProgram()
check_parser_errors(l_test, l_parser)
if len(program.Statements) != 1 {
l_test.Fatalf("program does not have enough statements, got=%d", len(program.Statements))
}
statement, ok := program.Statements[0].(*ast.ExpressionStatement)
if !ok {
l_test.Fatalf("program.Statements[0] is not ast.ExpressionStatement, got=%T", program.Statements[0])
}
literal, ok := statement.Expression.(*ast.IntegerLiteral)
if !ok {
l_test.Fatalf("expression not *ast.IntegerLiteral, got=%T", statement.Expression)
}
if literal.Value != 5 {
l_test.Errorf("literal.Value not %d, got=%d", 5, literal.Value)
}
if literal.TokenLiteral() != "5" {
l_test.Errorf("literal.TokenLiteral not %s, got=%s", "5", literal.TokenLiteral())
}
}
func TestParsingPrefixExpressions(l_test *testing.T) {
prefix_tests := []struct {
input string
operator string
value interface{}
}{
{"!5;", "!", 5},
{"-15", "-", 15},
{"!true;", "!", true},
{"!false;", "!", false},
}
for _, tt := range prefix_tests {
l_lexer := lexer.New(tt.input)
l_parser := New(l_lexer)
program := l_parser.ParseProgram()
check_parser_errors(l_test, l_parser)
if len(program.Statements) != 1 {
l_test.Fatalf("program.Statements does not contain %d statements, got=%d\n", 1, len(program.Statements))
}
statement, ok := program.Statements[0].(*ast.ExpressionStatement)
if !ok {
l_test.Fatalf("program.Statements[0] is not ast.ExpressionStatement, got=%T", program.Statements[0])
}
expression, ok := statement.Expression.(*ast.PrefixExpression)
if !ok {
l_test.Fatalf("program.Statements[0] is not ast.PrefixEXpression, got=%T", statement.Expression)
}
if expression.Operator != tt.operator {
l_test.Fatalf("exp.Operator is not '%s', got %s", tt.operator, expression.Operator)
}
if !testLiteralExpression(l_test, expression.Right, tt.value) {
return
}
}
}
func TestParsingInfixExpressions(l_test *testing.T) {
infix_tests := []struct {
input string
left_value interface{}
operator string
right_value interface{}
}{
{"5 + 5;", 5, "+", 5},
{"5 - 5;", 5, "-", 5},
{"5 * 5;", 5, "*", 5},
{"5 / 5;", 5, "/", 5},
{"5 > 5;", 5, ">", 5},
{"5 < 5;", 5, "<", 5},
{"5 == 5;", 5, "==", 5},
{"5 != 5;", 5, "!=", 5},
{"true == true", true, "==", true},
{"true != false", true, "!=", false},
{"false == false", false, "==", false},
}
for _, tt := range infix_tests {
l_lexer := lexer.New(tt.input)
l_parser := New(l_lexer)
program := l_parser.ParseProgram()
check_parser_errors(l_test, l_parser)
if len(program.Statements) != 1 {
l_test.Fatalf("program.Statements does not contain %d statements, got=%d\n", 1, len(program.Statements))
}
statement, ok := program.Statements[0].(*ast.ExpressionStatement)
if !ok {
l_test.Fatalf("program.Statements[0] is not ast.ExpressionStatement, got=%T", program.Statements[0])
}
if !testInfixExpression(l_test, statement.Expression, tt.left_value, tt.operator, tt.right_value) {
return
}
}
}
func TestOperatorPrecedenceParsing(l_test *testing.T) {
tests := []struct {
input string
expected string
}{
{
"-a * b",
"((-a) * b)",
},
{
"!-a",
"(!(-a))",
},
{
"a + b + c",
"((a + b) + c)",
},
{
"a + b - c",
"((a + b) - c)",
},
{
"a * b * c",
"((a * b) * c)",
},
{
"a * b / c",
"((a * b) / c)",
},
{
"a + b / c",
"(a + (b / c))",
},
{
"a + b * c + d / e - f",
"(((a + (b * c)) + (d / e)) - f)",
},
{
"3 + 4; -5 * 5",
"(3 + 4)((-5) * 5)",
},
{
"5 > 4 == 3 < 4",
"((5 > 4) == (3 < 4))",
},
{
"5 < 4 != 3 > 4",
"((5 < 4) != (3 > 4))",
},
{
"3 + 4 * 5 == 3 * 1 + 4 * 5",
"((3 + (4 * 5)) == ((3 * 1) + (4 * 5)))",
},
{
"true",
"true",
},
{
"false",
"false",
},
{
"3 > 5 == false",
"((3 > 5) == false)",
},
{
"3 < 5 == true",
"((3 < 5) == true)",
},
{
"3 < 5 == true",
"((3 < 5) == true)",
},
{
"(5 + 5) * 2",
"((5 + 5) * 2)",
},
{
"2 / (5 + 5)",
"(2 / (5 + 5))",
},
{
"-(5 + 5)",
"(-(5 + 5))",
},
{
"!(true == true)",
"(!(true == true))",
},
{
"a + add(b * c) + d",
"((a + add((b * c))) + d)",
},
{
"add(a, b, 1, 2 * 3, 4 + 5, add(6, 7 * 8))",
"add(a, b, 1, (2 * 3), (4 + 5), add(6, (7 * 8)))",
},
{
"add(a + b + c * d / f + g)",
"add((((a + b) + ((c * d) / f)) + g))",
},
}
for _, tt := range tests {
l_lexer := lexer.New(tt.input)
l_parser := New(l_lexer)
program := l_parser.ParseProgram()
check_parser_errors(l_test, l_parser)
actual := program.String()
if actual != tt.expected {
l_test.Errorf("expected=%q, got=%q", tt.expected, actual)
}
}
}
func TestBooleanExpression(l_test *testing.T) {
tests := []struct {
input string
expected_boolean bool
}{
{"true;", true},
{"false;", false},
}
for _, tt := range tests {
l_lexer := lexer.New(tt.input)
l_parser := New(l_lexer)
program := l_parser.ParseProgram()
check_parser_errors(l_test, l_parser)
if len(program.Statements) != 1 {
l_test.Fatalf("program.Statements does not have enough statements, got=%d", len(program.Statements))
}
statement, ok := program.Statements[0].(*ast.ExpressionStatement)
if !ok {
l_test.Fatalf("program.Statements[0] is not ast.ExpressionStatement, got=%T", program.Statements[0])
}
boolean, ok := statement.Expression.(*ast.Boolean)
if !ok {
l_test.Fatalf("exp not *ast.Boolean, got=%T", statement.Expression)
}
if boolean.Value != tt.expected_boolean {
l_test.Errorf("boolean.Value not %t, got=%t", tt.expected_boolean, boolean.Value)
}
}
}
func TestIfExpression(l_test *testing.T) {
input := `if (x < y) { x }`
l_lexer := lexer.New(input)
l_parser := New(l_lexer)
program := l_parser.ParseProgram()
check_parser_errors(l_test, l_parser)
if len(program.Statements) != 1 {
l_test.Fatalf("program.Statements does not contain %d statements, got=%d\n", 1, len(program.Statements))
}
statement, ok := program.Statements[0].(*ast.ExpressionStatement)
if !ok {
l_test.Fatalf("program.Statements[0] is not ast.ExpressionStatement, got=%T", program.Statements[0])
}
expression, ok := statement.Expression.(*ast.IfExpression)
if !ok {
l_test.Fatalf("statement.Expression is not ast.IfExpression, got=%T", statement.Expression)
}
if !testInfixExpression(l_test, expression.Condition, "x", "<", "y") {
return
}
if len(expression.Consequence.Statements) != 1 {
l_test.Errorf("consequence is not 1 statements, got=%d\n", len(expression.Consequence.Statements))
}
consequence, ok := expression.Consequence.Statements[0].(*ast.ExpressionStatement)
if !ok {
l_test.Fatalf("Statements[0] is not ast.ExpressionStatement, got=%T", expression.Consequence.Statements[0])
}
if !testIdentifier(l_test, consequence.Expression, "x") {
return
}
if expression.Alternative != nil {
l_test.Errorf("expression.Alternative.Statements was not nil, got=%+v", expression.Alternative)
}
}
func TestIfElseExpression(l_test *testing.T) {
input := `if (x < y) { x } else { y }`
l_lexer := lexer.New(input)
l_parser := New(l_lexer)
program := l_parser.ParseProgram()
check_parser_errors(l_test, l_parser)
if len(program.Statements) != 1 {
l_test.Fatalf("program.Statements does not contain %d statements, got=%d\n", 1, len(program.Statements))
}
statement, ok := program.Statements[0].(*ast.ExpressionStatement)
if !ok {
l_test.Fatalf("program.Statements[0] is not an ast.ExpressionStatement, got=%T", program.Statements[0])
}
expression, ok := statement.Expression.(*ast.IfExpression)
if !ok {
l_test.Fatalf("statement.Expression is not ast.IfExpression, got=%T", statement.Expression)
}
if !testInfixExpression(l_test, expression.Condition, "x", "<", "y") {
return
}
if len(expression.Consequence.Statements) != 1 {
l_test.Errorf("consequence is not 1 statements, got=%d\n", len(expression.Consequence.Statements))
}
consequence, ok := expression.Consequence.Statements[0].(*ast.ExpressionStatement)
if !ok {
l_test.Fatalf("Statements[0] is not ast.ExpressionStatement, got=%T", expression.Consequence.Statements[0])
}
if !testIdentifier(l_test, consequence.Expression, "x") {
return
}
if len(expression.Alternative.Statements) != 1 {
l_test.Errorf("expression.Alterative.Statements does not contain 1 statement, got=%d\n", len(expression.Alternative.Statements))
}
alternative, ok := expression.Alternative.Statements[0].(*ast.ExpressionStatement)
if !ok {
l_test.Fatalf("Statements[0] is not ast.ExpressionStatement, got=%T", expression.Alternative.Statements[0])
}
if !testIdentifier(l_test, alternative.Expression, "y") {
return
}
}
func TestFunctionLiteralParsing(l_test *testing.T) {
input := `fn(x, y) { x + y; }`
l_lexer := lexer.New(input)
l_parser := New(l_lexer)
program := l_parser.ParseProgram()
check_parser_errors(l_test, l_parser)
if len(program.Statements) != 1 {
l_test.Fatalf("program.Statements does not contain %d statements, got=%d\n", 1, len(program.Statements))
}
statement, ok := program.Statements[0].(*ast.ExpressionStatement)
if !ok {
l_test.Fatalf("program.Statements[0] is not ast.ExpressionStatement, got=%T", program.Statements[0])
}
function, ok := statement.Expression.(*ast.FunctionLiteral)
if !ok {
l_test.Fatalf("statement.Expression is not ast.FunctionLiteral, got=%T", statement.Expression)
}
if len(function.Parameters) != 2 {
l_test.Fatalf("function literal parameters wrong, want 2, got=%d\n", len(function.Parameters))
}
testLiteralExpression(l_test, function.Parameters[0], "x")
testLiteralExpression(l_test, function.Parameters[1], "y")
if len(function.Body.Statements) != 1 {
l_test.Fatalf("function.Body.Statements does not have 1 statement, got=%d\n", len(function.Body.Statements))
}
body_statement, ok := function.Body.Statements[0].(*ast.ExpressionStatement)
if !ok {
l_test.Fatalf("function body statement is not ast.ExpressionStatemes, got=%T", function.Body.Statements[0])
}
testInfixExpression(l_test, body_statement.Expression, "x", "+", "y")
}
func TestFunctionParameterParsing(l_test *testing.T) {
tests := []struct {
input string
expected_params []string
}{
{input: "fn() {};", expected_params: []string{}},
{input: "fn(x) {};", expected_params: []string{"x"}},
{input: "fn(x, y, z) {};", expected_params: []string{"x", "y", "z"}},
}
for _, tt := range tests {
l_lexer := lexer.New(tt.input)
l_parser := New(l_lexer)
program := l_parser.ParseProgram()
check_parser_errors(l_test, l_parser)
statement := program.Statements[0].(*ast.ExpressionStatement)
function := statement.Expression.(*ast.FunctionLiteral)
if len(function.Parameters) != len(tt.expected_params) {
l_test.Errorf("length of parameters is wrong, want %d, got=%d\n",
len(tt.expected_params), len(function.Parameters))
}
for i, identifier := range tt.expected_params {
testLiteralExpression(l_test, function.Parameters[i], identifier)
}
}
}
func TestCallExpressionParsing(l_test *testing.T) {
input := "add(1, 2 * 3, 4 + 5);"
l_lexer := lexer.New(input)
l_parser := New(l_lexer)
program := l_parser.ParseProgram()
check_parser_errors(l_test, l_parser)
if len(program.Statements) != 1 {
l_test.Fatalf("program.Statements does not contain %d statements, got=%d\n", 1, len(program.Statements))
}
statement, ok := program.Statements[0].(*ast.ExpressionStatement)
if !ok {
l_test.Fatalf("statement is not ast.ExpressionStatement, got=%T", program.Statements[0])
}
expression, ok := statement.Expression.(*ast.CallExpression)
if !ok {
l_test.Fatalf("statemnt.Expression is not ast.CallExpression, got=%T", statement.Expression)
}
if !testIdentifier(l_test, expression.Function, "add") {
return
}
if len(expression.Arguments) != 3 {
l_test.Fatalf("wrong length of arguments, got=%d", len(expression.Arguments))
}
testLiteralExpression(l_test, expression.Arguments[0], 1)
testInfixExpression(l_test, expression.Arguments[1], 2, "*", 3)
testInfixExpression(l_test, expression.Arguments[2], 4, "+", 5)
}
func TestCallExpressionParameterParsing(l_test *testing.T) {
tests := []struct {
input string
expected_ident string
expected_args []string
}{
{
input: "add();",
expected_ident: "add",
expected_args: []string{},
},
{
input: "add(1);",
expected_ident: "add",
expected_args: []string{"1"},
},
{
input: "add(1, 2 * 3, 4 + 5);",
expected_ident: "add",
expected_args: []string{"1", "(2 * 3)", "(4 + 5)"},
},
}
for _, tt := range tests {
l_lexer := lexer.New(tt.input)
l_parser := New(l_lexer)
program := l_parser.ParseProgram()
check_parser_errors(l_test, l_parser)
statement := program.Statements[0].(*ast.ExpressionStatement)
expression, ok := statement.Expression.(*ast.CallExpression)
if !ok {
l_test.Fatalf("statement.Expression is not ast.CallExpression, got=%T",
statement.Expression)
}
if !testIdentifier(l_test, expression.Function, tt.expected_ident) {
return
}
if len(expression.Arguments) != len(tt.expected_args) {
l_test.Fatalf("wrong number of arguments, want=%d, got=%d",
len(tt.expected_args), len(expression.Arguments))
}
for i, arg := range tt.expected_args {
if expression.Arguments[i].String() != arg {
l_test.Errorf("argument %d wrong. want=%q, got=%q", i,
arg, expression.Arguments[i].String())
}
}
}
}
func TestLetStatements(l_test *testing.T) {
tests := []struct {
input string
expected_identifier string
expected_value interface{}
}{
{"let x = 5;", "x", 5},
{"let y = true;", "y", true},
{"let foobar = y;", "foobar", "y"},
}
for _, tt := range tests {
l_lexer := lexer.New(tt.input)
l_parser := New(l_lexer)
program := l_parser.ParseProgram()
check_parser_errors(l_test, l_parser)
if len(program.Statements) != 1 {
l_test.Fatalf("program.Statements does not contain 1 statements, got=%d",
len(program.Statements))
}
statement := program.Statements[0]
if !testLetStatement(l_test, statement, tt.expected_identifier) {
return
}
val := statement.(*ast.LetStatement).Value
if !testLiteralExpression(l_test, val, tt.expected_value) {
return
}
}
}
// Helpers
func check_parser_errors(l_test *testing.T, l_parser *Parser) {
errors := l_parser.Errors()
if len(errors) == 0 {
return
}
l_test.Errorf("parser has %d errors", len(errors))
for _, message := range errors {
l_test.Errorf("parser error: %q", message)
}
l_test.FailNow()
}
func testLetStatement(l_test *testing.T, statement ast.Statement, name string) bool {
if statement.TokenLiteral() != "let" {
l_test.Errorf("statement.TokenLiteral not let, got=%q", statement.TokenLiteral())
return false
}
let_statement, ok := statement.(*ast.LetStatement)
if !ok {
l_test.Errorf("statement not *ast.LetStatement, got=%T", statement)
return false
}
if let_statement.Name.Value != name {
l_test.Errorf("let_statement.name.Value not %s, got=%s", name, let_statement.Name.Value)
return false
}
if let_statement.Name.TokenLiteral() != name {
l_test.Errorf("let_statement.name.TokenLiteral() not %s, got=%s", name, let_statement.Name.TokenLiteral())
return false
}
return true
}
func testIdentifier(l_test *testing.T, exp ast.Expression, value string) bool {
identifier, ok := exp.(*ast.Identifier)
if !ok {
l_test.Errorf("exp not *ast.Identifier, got=%T", exp)
return false
}
if identifier.Value != value {
l_test.Errorf("identifier.Value not %s, got=%s", value, identifier.Value)
return false
}
if identifier.TokenLiteral() != value {
l_test.Errorf("identifier.TokenLiteral not %s, got=%s", value, identifier.TokenLiteral())
return false
}
return true
}
func testIntegerLiteral(l_test *testing.T, il ast.Expression, value int64) bool {
integer, ok := il.(*ast.IntegerLiteral)
if !ok {
l_test.Errorf("il not *ast.IntegerLiteral, got=%T", il)
return false
}
if integer.Value != value {
l_test.Errorf("integer.Value not %d, got=%d", value, integer.Value)
return false
}
if integer.TokenLiteral() != fmt.Sprintf("%d", value) {
l_test.Errorf("integer.TokenLiteral not %d, got=%s", value, integer.TokenLiteral())
return false
}
return true
}
func testLiteralExpression(l_test *testing.T, exp ast.Expression, expected interface{}) bool {
switch v := expected.(type) {
case int:
return testIntegerLiteral(l_test, exp, int64(v))
case int64:
return testIntegerLiteral(l_test, exp, v)
case string:
return testIdentifier(l_test, exp, v)
case bool:
return testBooleanLiteral(l_test, exp, v)
}
l_test.Errorf("type of exp not handled, got=%T", exp)
return false
}
func testInfixExpression(l_test *testing.T, exp ast.Expression, left interface{}, operator string, right interface{}) bool {
operator_expression, ok := exp.(*ast.InfixExpression)
if !ok {
l_test.Errorf("exp is not ast.InfixExpression, got=%T(%s)", exp, exp)
return false
}
if !testLiteralExpression(l_test, operator_expression.Left, left) {
return false
}
if operator_expression.Operator != operator {
l_test.Errorf("exp.Operator is not '%s', got=%q", operator, operator_expression.Operator)
return false
}
if !testLiteralExpression(l_test, operator_expression.Right, right) {
return false
}
return true
}
func testBooleanLiteral(l_test *testing.T, exp ast.Expression, value bool) bool {
boolean, ok := exp.(*ast.Boolean)
if !ok {
l_test.Errorf("exp not *ast.Boolean, got=%T", exp)
return false
}
if boolean.Value != value {
l_test.Errorf("boolean.Value is not %t, got=%t", value, boolean.Value)
return false
}
if boolean.TokenLiteral() != fmt.Sprintf("%t", value) {
l_test.Errorf("boolean.TokenLiteral is not %t, got=%s", value, boolean.TokenLiteral())
return false
}
return true
}

View File

@ -1,32 +0,0 @@
package parser
import (
"fmt"
"strings"
)
var traceLevel int = 0
const traceIdentPlaceholder string = "\t"
func identLevel() string {
return strings.Repeat(traceIdentPlaceholder, traceLevel-1)
}
func tracePrint(fs string) {
fmt.Printf("%s%s\n", identLevel(), fs)
}
func incIdent() { traceLevel = traceLevel + 1 }
func decIdent() { traceLevel = traceLevel - 1 }
func trace(msg string) string {
incIdent()
tracePrint("BEGIN " + msg)
return msg
}
func untrace(msg string) {
tracePrint("END " + msg)
decIdent()
}

View File

@ -1,7 +0,0 @@
The Monk Programming Language.
To test a specific function, run
```
go test -v -run 'TestFnctionName' ./test_dir/
```

View File

@ -1,64 +0,0 @@
package repl
import (
"bufio"
"fmt"
"io"
"monkey/evaluator"
"monkey/lexer"
"monkey/object"
"monkey/parser"
)
const MONKEY_FACE = ` __,__
.--. .-" "-. .--.
/ .. \/ .-. .-. \/ .. \
| | '| / Y \ |' | |
| \ \ \ 0 | 0 / / / |
\ '- ,\.-"""""""-./, -' /
''-' /_ ^ ^ _\ '-''
| \._ _./ |
\ \ '~' / /
'._ '-=-' _.'
'-----'
`
const PROMPT = ">> "
func Start(in io.Reader, out io.Writer) {
scanner := bufio.NewScanner(in)
env := object.NewEnvironment()
for {
fmt.Fprintf(out, PROMPT)
scanned := scanner.Scan()
if !scanned {
return
}
line := scanner.Text()
l_lexer := lexer.New(line)
l_parser := parser.New(l_lexer)
program := l_parser.ParseProgram()
if len(l_parser.Errors()) != 0 {
print_parser_errors(out, l_parser.Errors())
continue
}
evaluated := evaluator.Eval(program, env)
if evaluated != nil {
io.WriteString(out, evaluated.Inspect())
io.WriteString(out, "\n")
}
}
}
func print_parser_errors(out io.Writer, errors []string) {
io.WriteString(out, MONKEY_FACE)
io.WriteString(out, "Woops! I ran into some monkey business here!\n")
io.WriteString(out, " parser errors:\n")
for _, message := range errors {
io.WriteString(out, "\t"+message+"\n")
}
}

View File

@ -1,65 +0,0 @@
package token
type TokenType string
type Token struct {
Type TokenType
Literal string
}
const (
ILLEGAL = "ILLEGAL"
EOF = "EOF"
COMMENT = "COMMENT" // TODO(tijani): Implement this!!
// Identifiers and basic type literals
IDENT = "IDENT"
INT = "INT"
// Operators
ASSIGN = "="
PLUS = "+"
MINUS = "-"
BANG = "!"
ASTERISK = "*"
SLASH = "/"
EQ = "=="
NOT_EQ = "!="
LT = "<"
GT = ">"
// Delimiters
COMMA = ","
SEMICOLON = ";"
LPAREN = "("
RPAREN = ")"
LBRACE = "{"
RBRACE = "}"
// Keywords
FUNCTION = "FUNCTION"
LET = "LET"
IF = "IF"
ELSE = "ELSE"
TRUE = "TRUE"
FALSE = "FALSE"
RETURN = "RETURN"
)
var keywords = map[string]TokenType{
"fn": FUNCTION,
"let": LET,
"if": IF,
"else": ELSE,
"true": TRUE,
"false": FALSE,
"return": RETURN,
}
func LookupIdentifier(ident string) TokenType {
if tok, ok := keywords[ident]; ok {
return tok
}
return IDENT
}

View File

@ -85,6 +85,10 @@ func Eval(node ast.Node, env *object.Environment) object.Object {
return args[0] return args[0]
} }
return apply_function(function, args) return apply_function(function, args)
case *ast.StringLiteral:
return &object.String { Value: node.Value }
} }
return nil return nil

View File

@ -237,6 +237,21 @@ func TestClosures(l_test *testing.T) {
test_integer_object(l_test, test_eval(input), 4) test_integer_object(l_test, test_eval(input), 4)
} }
func TestStringLiteral(l_test *testing.T) {
input := `"Hello World!";`
evaluated := test_eval(input)
string, ok := evaluated.(*object.String)
if !ok {
l_test.Fatalf("object is not String, got =%T (%+v)", evaluated, evaluated)
}
if string.Value != "Hello World!" {
l_test.Errorf("String has wrong value, got=%q", string.Value)
}
}
// Helpers // Helpers
func test_eval(input string) object.Object { func test_eval(input string) object.Object {
l_lexer := lexer.New(input) l_lexer := lexer.New(input)

View File

@ -62,6 +62,10 @@ func (l_lexer *Lexer) NextToken() token.Token {
tok = new_token(token.LT, l_lexer.current_char) tok = new_token(token.LT, l_lexer.current_char)
case '>': case '>':
tok = new_token(token.GT, l_lexer.current_char) tok = new_token(token.GT, l_lexer.current_char)
case '"':
tok.Literal = l_lexer.read_string()
tok.Type = token.STRING
case 0: case 0:
tok.Literal = "" tok.Literal = ""
tok.Type = token.EOF tok.Type = token.EOF
@ -134,3 +138,14 @@ func (l_lexer *Lexer) read_number() string {
func is_digit(ch byte) bool { func is_digit(ch byte) bool {
return '0' <= ch && ch <= '9' return '0' <= ch && ch <= '9'
} }
func (l_lexer *Lexer) read_string() string {
position := l_lexer.position + 1
for {
l_lexer.read_char()
if l_lexer.current_char == '"' || l_lexer.current_char == 0 {
break
}
}
return l_lexer.input[position:l_lexer.position]
}

View File

@ -25,6 +25,8 @@ func TestNextToken(t *testing.T) {
10 == 10; 10 == 10;
10 != 9; 10 != 9;
"foobar"
"foo bar"
` `
tests := []struct { tests := []struct {
expectedType token.TokenType expectedType token.TokenType
@ -111,6 +113,8 @@ func TestNextToken(t *testing.T) {
{token.INT, "9"}, {token.INT, "9"},
{token.SEMICOLON, ";"}, {token.SEMICOLON, ";"},
{token.STRING, "foobar"},
{token.STRING, "foo bar"},
{token.EOF, ""}, {token.EOF, ""},
} }

View File

@ -16,6 +16,7 @@ const (
RETURN_VALUE_OBJECT = "RETURN_VALUE" RETURN_VALUE_OBJECT = "RETURN_VALUE"
ERROR_OBJECT = "ERROR" ERROR_OBJECT = "ERROR"
FUNCTION_OBJECT = "FUNCTION" FUNCTION_OBJECT = "FUNCTION"
STRING_OBJECT = "STRING"
) )
type Object interface { type Object interface {
@ -101,3 +102,11 @@ func (f *Function) Inspect() string {
return out.String() return out.String()
} }
// String
type String struct {
Value string
}
func (s *String) Type() ObjectType { return STRING_OBJECT }
func (s *String) Inspect() string { return s.Value }

View File

@ -85,6 +85,7 @@ func New(l_lexer *lexer.Lexer) *Parser {
l_parser.register_prefix(token.INT, l_parser.parse_integer_literal) l_parser.register_prefix(token.INT, l_parser.parse_integer_literal)
l_parser.register_prefix(token.BANG, l_parser.parse_prefix_expression) l_parser.register_prefix(token.BANG, l_parser.parse_prefix_expression)
l_parser.register_prefix(token.MINUS, l_parser.parse_prefix_expression) l_parser.register_prefix(token.MINUS, l_parser.parse_prefix_expression)
l_parser.register_prefix(token.STRING, l_parser.parse_string_literal)
// Infix Operation // Infix Operation
l_parser.infix_parse_functions = make(map[token.TokenType]infix_parse_function) l_parser.infix_parse_functions = make(map[token.TokenType]infix_parse_function)
@ -285,6 +286,13 @@ func (l_parser *Parser) parse_infix_expression(left ast.Expression) ast.Expressi
return expression return expression
} }
func (l_parser *Parser) parse_string_literal() ast.Expression {
return &ast.StringLiteral{
Token: l_parser.current_token,
Value: l_parser.current_token.Literal,
}
}
func (l_parser *Parser) no_prefix_parse_function_error(l_token_type token.TokenType) { func (l_parser *Parser) no_prefix_parse_function_error(l_token_type token.TokenType) {
message := fmt.Sprintf("no prefix parse function for %s, found", l_token_type) message := fmt.Sprintf("no prefix parse function for %s, found", l_token_type)
l_parser.errors = append(l_parser.errors, message) l_parser.errors = append(l_parser.errors, message)

View File

@ -658,6 +658,25 @@ func TestLetStatements(l_test *testing.T) {
} }
} }
func TestStringLiteralExpression(l_test *testing.T) {
input := `"Hello world";`
l_lexer := lexer.New(input)
l_parser := New(l_lexer)
program := l_parser.ParseProgram()
check_parser_errors(l_test, l_parser)
statement := program.Statements[0].(*ast.ExpressionStatement)
literal, ok := statement.Expression.(*ast.StringLiteral)
if !ok {
l_test.Fatalf("expression not *ast.StringLiteral, got=%T", statement.Expression)
}
if literal.Value != "Hello world" {
l_test.Errorf("literal.Value not %q, got=%q", "Hello world", literal.Value)
}
}
// Helpers // Helpers
func check_parser_errors(l_test *testing.T, l_parser *Parser) { func check_parser_errors(l_test *testing.T, l_parser *Parser) {

View File

@ -45,6 +45,8 @@ const (
TRUE = "TRUE" TRUE = "TRUE"
FALSE = "FALSE" FALSE = "FALSE"
RETURN = "RETURN" RETURN = "RETURN"
STRING = "STRING"
) )
var keywords = map[string]TokenType{ var keywords = map[string]TokenType{