From 949368ff0715db6afd44517afbc93cd2bdbee518 Mon Sep 17 00:00:00 2001 From: Robert Lowry Date: Thu, 21 Nov 2024 22:34:50 -0600 Subject: [PATCH] Implement assert (#86) * add logic and comparison operators * merge overlapping tokOpExpr and tokAddressMode to tokSymbol * implement assert testing --- compile.go | 47 +++++++++++++++++++++++-- compile_test.go | 28 +++++++++++++++ expr.go | 14 +++++--- expr_test.go | 39 ++++++++++++++++----- lex.go | 72 +++++++++++++++++++++++++++++++------- lex_test.go | 69 +++++++++++++++++++++++++++++++----- parser.go | 9 ++--- parser_test.go | 8 ++--- token.go | 27 ++++++++++---- warriors/88/imp.red | 1 + warriors/94/bombspiral.red | 1 + warriors/94/imp.red | 1 + 12 files changed, 266 insertions(+), 50 deletions(-) diff --git a/compile.go b/compile.go index 2242098..8e8e598 100644 --- a/compile.go +++ b/compile.go @@ -125,7 +125,7 @@ func (c *compiler) expandExpression(expr []token, line int) ([]token, error) { if labelOk { val := (label - line) % int(c.m) if val < 0 { - output = append(output, token{tokExprOp, "-"}, token{tokNumber, fmt.Sprintf("%d", -val)}) + output = append(output, token{tokSymbol, "-"}, token{tokNumber, fmt.Sprintf("%d", -val)}) } else { output = append(output, token{tokNumber, fmt.Sprintf("%d", val)}) } @@ -140,6 +140,43 @@ func (c *compiler) expandExpression(expr []token, line int) ([]token, error) { return output, nil } +func (c *compiler) evaluateAssertion(assertText string) error { + + assertTokens, err := LexInput(strings.NewReader(assertText)) + if err != nil { + return err + } + assertTokens = assertTokens[:len(assertTokens)-1] + exprTokens, err := c.expandExpression(assertTokens, 0) + if err != nil { + return err + } + exprVal, err := evaluateExpression(exprTokens) + if err != nil { + return err + } + if exprVal == 0 { + return fmt.Errorf("assertion '%s' failed", assertText) + } + return nil +} + +func (c *compiler) evaluateAssertions() error { + for _, line := range c.lines { + if line.typ != lineComment { + continue + } + if strings.HasPrefix(line.comment, ";assert") { + assertText := line.comment[7:] + err := c.evaluateAssertion(assertText) + if err != nil { + return err + } + } + } + return nil +} + func (c *compiler) assembleLine(in sourceLine) (Instruction, error) { opLower := strings.ToLower(in.op) var aMode, bMode AddressMode @@ -289,7 +326,7 @@ func (c *compiler) expandFor(start, end int) error { if j == 1 { newValue = []token{{tokNumber, "0"}} } else { - newValue = []token{{tokExprOp, "-"}, {tokNumber, fmt.Sprintf("%d", -(1 - j))}} + newValue = []token{{tokSymbol, "-"}, {tokNumber, fmt.Sprintf("%d", -(1 - j))}} } } thisLine = thisLine.subSymbol(label, newValue) @@ -351,9 +388,13 @@ func (c *compiler) expandForLoops() error { } func (c *compiler) compile() (WarriorData, error) { - c.loadSymbols() + err := c.evaluateAssertions() + if err != nil { + return WarriorData{}, err + } + graph := buildReferenceGraph(c.values) cyclic, cyclicKey := graphContainsCycle(graph) if cyclic { diff --git a/compile_test.go b/compile_test.go index 190b89a..1d6628d 100644 --- a/compile_test.go +++ b/compile_test.go @@ -173,3 +173,31 @@ func TestCompileDoubleForLoop(t *testing.T) { }, w.Code) assert.Equal(t, 7, w.Start) } + +func TestAssertPositive(t *testing.T) { + config := ConfigNOP94 + + input := ` +;assert CORESIZE == 8000 +dat.f $123, $123 +` + + w, err := CompileWarrior(strings.NewReader(input), config) + require.NoError(t, err) + assert.Equal(t, []Instruction{ + {Op: DAT, OpMode: F, AMode: DIRECT, A: 123, BMode: DIRECT, B: 123}, + }, w.Code) +} + +func TestAssertNegative(t *testing.T) { + config := ConfigNOP94 + + input := ` +;assert CORESIZE == 8192 +dat.f $123, $123 +` + + w, err := CompileWarrior(strings.NewReader(input), config) + require.Error(t, err) + require.Equal(t, WarriorData{}, w) +} diff --git a/expr.go b/expr.go index fa03a01..b7281ee 100644 --- a/expr.go +++ b/expr.go @@ -75,11 +75,11 @@ func expandExpressions(values map[string][]token, graph map[string][]string) (ma func combineSigns(expr []token) []token { out := make([]token, 0, len(expr)) - lastOut := token{tokEOF, ""} + lastOut := token{typ: tokEOF} // please forgive me for this lol for i := 0; i < len(expr); i++ { - if lastOut.typ == tokExprOp { + if lastOut.typ == tokSymbol { negativeFound := false for ; i < len(expr); i++ { if !(expr[i].val == "-" || expr[i].val == "+") { @@ -90,7 +90,7 @@ func combineSigns(expr []token) []token { } } if negativeFound { - out = append(out, token{tokExprOp, "-"}) + out = append(out, token{tokSymbol, "-"}) } if i < len(expr) { out = append(out, expr[i]) @@ -111,7 +111,7 @@ func flipDoubleNegatives(expr []token) []token { for i := 0; i < len(expr); i++ { if expr[i].val == "-" { if i+1 < len(expr) && expr[i+1].val == "-" { - out = append(out, token{tokExprOp, "+"}) + out = append(out, token{tokSymbol, "+"}) i += 1 continue } @@ -122,6 +122,12 @@ func flipDoubleNegatives(expr []token) []token { } func evaluateExpression(expr []token) (int, error) { + for _, tok := range expr { + if tok.typ == tokText || !tok.IsExpressionTerm() { + return 0, fmt.Errorf("unexpected token in expressoin: '%s'", tok) + } + } + combinedExpr := combineSigns(expr) flippedExpr := flipDoubleNegatives(combinedExpr) diff --git a/expr_test.go b/expr_test.go index 47a8d86..e4be977 100644 --- a/expr_test.go +++ b/expr_test.go @@ -11,8 +11,8 @@ import ( func TestExpandExpressions(t *testing.T) { values := map[string][]token{ "a": {{tokNumber, "1"}}, - "c": {{tokText, "a"}, {tokExprOp, "*"}, {tokText, "b"}}, - "b": {{tokText, "a"}, {tokExprOp, "+"}, {tokNumber, "2"}}, + "c": {{tokText, "a"}, {tokSymbol, "*"}, {tokText, "b"}}, + "b": {{tokText, "a"}, {tokSymbol, "+"}, {tokNumber, "2"}}, } graph := map[string][]string{ "b": {"a"}, @@ -23,8 +23,8 @@ func TestExpandExpressions(t *testing.T) { require.NoError(t, err) require.Equal(t, map[string][]token{ "a": {{tokNumber, "1"}}, - "b": {{tokNumber, "1"}, {tokExprOp, "+"}, {tokNumber, "2"}}, - "c": {{tokNumber, "1"}, {tokExprOp, "*"}, {tokNumber, "1"}, {tokExprOp, "+"}, {tokNumber, "2"}}, + "b": {{tokNumber, "1"}, {tokSymbol, "+"}, {tokNumber, "2"}}, + "c": {{tokNumber, "1"}, {tokSymbol, "*"}, {tokNumber, "1"}, {tokSymbol, "+"}, {tokNumber, "2"}}, }, output) } @@ -37,8 +37,8 @@ func TestCombineSigns(t *testing.T) { input: "1++-2", output: []token{ {tokNumber, "1"}, - {tokExprOp, "+"}, - {tokExprOp, "-"}, + {tokSymbol, "+"}, + {tokSymbol, "-"}, {tokNumber, "2"}, }, }, @@ -46,8 +46,8 @@ func TestCombineSigns(t *testing.T) { input: "1-+-2", output: []token{ {tokNumber, "1"}, - {tokExprOp, "-"}, - {tokExprOp, "-"}, + {tokSymbol, "-"}, + {tokSymbol, "-"}, {tokNumber, "2"}, }, }, @@ -73,7 +73,7 @@ func TestFlipDoubleNegatives(t *testing.T) { input: "1--1", output: []token{ {tokNumber, "1"}, - {tokExprOp, "+"}, + {tokSymbol, "+"}, {tokNumber, "1"}, }, }, @@ -103,6 +103,23 @@ func TestEvaluateExpressionPositive(t *testing.T) { // handle signs "1 - -1": 2, + + // logic + "1 > 2": 0, + "2 > 1": 1, + "1 < 2": 1, + "2 < 1": 0, + "1 >= 1": 1, + "2 <= 2": 1, + "8000 == 8000": 1, + "8000 == 800": 0, + // hmmm, these need to be fixed + // "1 && 1": 1, + // "1 && 0": 0, + // "1 || 1": 1, + // "1 || 0": 0, + "2 == 1 || 2 == 2": 1, + "2 == 1 || 2 == 3": 0, } for input, expected := range testCases { @@ -110,6 +127,9 @@ func TestEvaluateExpressionPositive(t *testing.T) { tokens, err := lexer.Tokens() require.NoError(t, err) + // trim EOF from input + tokens = tokens[:len(tokens)-1] + val, err := evaluateExpression(tokens) require.NoError(t, err) assert.Equal(t, expected, val) @@ -120,6 +140,7 @@ func TestEvaluateExpressionNegative(t *testing.T) { cases := []string{ ")21", "2^3", + "2{2", } for _, input := range cases { diff --git a/lex.go b/lex.go index 964151c..34e9cff 100644 --- a/lex.go +++ b/lex.go @@ -65,6 +65,11 @@ func newLexer(r io.Reader) *lexer { return lex } +func LexInput(r io.Reader) ([]token, error) { + lexer := newLexer(r) + return lexer.Tokens() +} + func (l *lexer) next() (rune, bool) { if l.atEOF { return '\x00', true @@ -155,15 +160,12 @@ func lexInput(l *lexer) lexStateFn { return lexNumber } - // handle comments - if l.nextRune == ';' { - return lexComment - } - // dispatch based on next rune, or error switch l.nextRune { case '\x00': l.tokens <- token{tokEOF, ""} + case ';': + return lexComment case ',': return l.emitConsume(token{tokComma, ","}, lexInput) case '(': @@ -179,7 +181,7 @@ func lexInput(l *lexer) lexStateFn { case '/': fallthrough case '%': - return l.emitConsume(token{tokExprOp, string(l.nextRune)}, lexInput) + return l.emitConsume(token{tokSymbol, string(l.nextRune)}, lexInput) case '$': fallthrough case '#': @@ -189,13 +191,19 @@ func lexInput(l *lexer) lexStateFn { case '{': fallthrough case '}': - fallthrough + return l.emitConsume(token{tokSymbol, string(l.nextRune)}, lexInput) case '<': - fallthrough + return l.consume(lexLt) case '>': - return l.emitConsume(token{tokAddressMode, string(l.nextRune)}, lexInput) + return l.consume(lexGt) case ':': return l.emitConsume(token{tokColon, ":"}, lexInput) + case '=': + return l.consume(lexEquals) + case '|': + return l.consume(lexPipe) + case '&': + return l.consume(lexAnd) case '\x1a': return l.consume(lexInput) default: @@ -276,7 +284,47 @@ func lexComment(l *lexer) lexStateFn { return lexInput } -func LexInput(r io.Reader) ([]token, error) { - lexer := newLexer(r) - return lexer.Tokens() +func lexEquals(l *lexer) lexStateFn { + if l.nextRune == '=' { + return l.emitConsume(token{tokSymbol, "=="}, lexInput) + } else { + l.tokens <- token{tokError, fmt.Sprintf("expected '=' after '=', got '%s'", string(l.nextRune))} + return nil + } +} + +func lexPipe(l *lexer) lexStateFn { + if l.nextRune == '|' { + return l.emitConsume(token{tokSymbol, "||"}, lexInput) + } else { + l.tokens <- token{tokError, fmt.Sprintf("expected '|' after '|', got '%s'", string(l.nextRune))} + return nil + } +} + +func lexAnd(l *lexer) lexStateFn { + if l.nextRune == '&' { + return l.emitConsume(token{tokSymbol, "&&"}, lexInput) + } else { + l.tokens <- token{tokError, fmt.Sprintf("expected '&' after '&', got '%s'", string(l.nextRune))} + return nil + } +} + +func lexGt(l *lexer) lexStateFn { + if l.nextRune == '=' { + return l.emitConsume(token{tokSymbol, ">="}, lexInput) + } else { + l.tokens <- token{tokSymbol, ">"} + return lexInput + } +} + +func lexLt(l *lexer) lexStateFn { + if l.nextRune == '=' { + return l.emitConsume(token{tokSymbol, "<="}, lexInput) + } else { + l.tokens <- token{tokSymbol, "<"} + return lexInput + } } diff --git a/lex_test.go b/lex_test.go index 1b10660..b0532bc 100644 --- a/lex_test.go +++ b/lex_test.go @@ -12,14 +12,20 @@ import ( type lexTestCase struct { input string expected []token + err bool } func runLexTests(t *testing.T, setName string, testCases []lexTestCase) { for i, test := range testCases { l := newLexer(strings.NewReader(test.input)) out, err := l.Tokens() - require.NoError(t, err, fmt.Errorf("%s test %d: error: %s", setName, i, err)) - assert.Equal(t, test.expected, out, fmt.Sprintf("%s test %d", setName, i)) + if test.err { + require.Error(t, err, fmt.Sprintf("%s test %d", setName, i)) + require.Equal(t, out, test.expected, fmt.Sprintf("%s test %d", setName, i)) + } else { + require.NoError(t, err, fmt.Errorf("%s test %d: error: %s", setName, i, err)) + assert.Equal(t, test.expected, out, fmt.Sprintf("%s test %d", setName, i)) + } } } @@ -43,11 +49,11 @@ func TestLexer(t *testing.T) { expected: []token{ {tokText, "start"}, {tokText, "mov"}, - {tokAddressMode, "#"}, - {tokExprOp, "-"}, + {tokSymbol, "#"}, + {tokSymbol, "-"}, {tokNumber, "1"}, {tokComma, ","}, - {tokAddressMode, "$"}, + {tokSymbol, "$"}, {tokNumber, "2"}, {tokComment, "; comment"}, {tokNewline, ""}, @@ -61,10 +67,10 @@ func TestLexer(t *testing.T) { {tokText, "equ"}, {tokParenL, "("}, {tokNumber, "1"}, - {tokExprOp, "+"}, + {tokSymbol, "+"}, {tokNumber, "3"}, {tokParenR, ")"}, - {tokExprOp, "-"}, + {tokSymbol, "-"}, {tokText, "start"}, {tokNewline, ""}, {tokEOF, ""}, @@ -94,7 +100,7 @@ func TestLexer(t *testing.T) { { input: "#", expected: []token{ - {tokAddressMode, "#"}, + {tokSymbol, "#"}, {tokEOF, ""}, }, }, @@ -122,11 +128,58 @@ func TestLexer(t *testing.T) { {tokEOF, ""}, }, }, + { + input: "for CORESIZE==1\n", + expected: []token{ + {tokText, "for"}, + {tokText, "CORESIZE"}, + {tokSymbol, "=="}, + {tokNumber, "1"}, + {tokNewline, ""}, + {tokEOF, ""}, + }, + }, + { + input: "CORESIZE==8000||CORESIZE==800\n", + expected: []token{ + {tokText, "CORESIZE"}, + {tokSymbol, "=="}, + {tokNumber, "8000"}, + {tokSymbol, "||"}, + {tokText, "CORESIZE"}, + {tokSymbol, "=="}, + {tokNumber, "800"}, + {tokNewline, ""}, + {tokEOF, ""}, + }, + }, + { + input: "1&&2\n", + expected: []token{ + {tokNumber, "1"}, + {tokSymbol, "&&"}, + {tokNumber, "2"}, + {tokNewline, ""}, + {tokEOF, ""}, + }, + }, } runLexTests(t, "TestLexer", testCases) } +func TestLexNegative(t *testing.T) { + inputs := []string{ + "1 =! 0", + } + + for _, input := range inputs { + tokens, err := LexInput(strings.NewReader(input)) + require.NoError(t, err) + require.Equal(t, tokError, tokens[len(tokens)-1].typ) + } +} + func TestLexEnd(t *testing.T) { l := newLexer(strings.NewReader("test mov 0, 1\n")) diff --git a/parser.go b/parser.go index 734cd6b..44e56d5 100644 --- a/parser.go +++ b/parser.go @@ -396,14 +396,15 @@ func parseOp(p *parser) parseStateFn { p.next() + if p.nextToken.IsAddressMode() { + return parseModeA + } if p.nextToken.IsExpressionTerm() && p.nextToken.val != "*" { return parseExprA } switch p.nextToken.typ { - case tokAddressMode: - return parseModeA - case tokExprOp: + case tokSymbol: if p.nextToken.val == "*" { return parseModeA } @@ -471,7 +472,7 @@ func parseExprA(p *parser) parseStateFn { func parseComma(p *parser) parseStateFn { p.next() - if p.nextToken.typ == tokAddressMode || (p.nextToken.typ == tokExprOp && p.nextToken.val == "*") { + if p.nextToken.IsAddressMode() { return parseModeB } else if p.nextToken.IsExpressionTerm() { return parseExprB diff --git a/parser_test.go b/parser_test.go index 656f128..4c965f5 100644 --- a/parser_test.go +++ b/parser_test.go @@ -101,13 +101,13 @@ func TestParserPositive(t *testing.T) { op: "mov", amode: "$", a: []token{ - {typ: tokExprOp, val: "-"}, + {typ: tokSymbol, val: "-"}, {typ: tokNumber, val: "1"}, }, bmode: "$", b: []token{ {typ: tokNumber, val: "2"}, - {typ: tokExprOp, val: "+"}, + {typ: tokSymbol, val: "+"}, {typ: tokNumber, val: "2"}, }, comment: "", @@ -122,12 +122,12 @@ func TestParserPositive(t *testing.T) { op: "mov", amode: "*", a: []token{ - {typ: tokExprOp, val: "-"}, + {typ: tokSymbol, val: "-"}, {typ: tokNumber, val: "1"}, }, bmode: "*", b: []token{ - {typ: tokExprOp, val: "-"}, + {typ: tokSymbol, val: "-"}, {typ: tokNumber, val: "1"}, }, comment: "", diff --git a/token.go b/token.go index 05ddb2c..029733f 100644 --- a/token.go +++ b/token.go @@ -5,11 +5,10 @@ import "strings" type tokenType uint8 const ( - tokError tokenType = iota // returned when an error is encountered - tokText // used for labels, symbols, and opcodes - tokAddressMode // $ # { } < > - tokNumber // (optionally) signed integer - tokExprOp // + - * / % == + tokError tokenType = iota // returned when an error is encountered + tokText // used for labels, symbols, and opcodes + tokNumber // (optionally) signed integer + tokSymbol // address modes and symbols for aritmetic, comparison, ang logic tokComma tokColon tokParenL @@ -50,6 +49,16 @@ func (t token) IsOp() bool { return t.IsPseudoOp() } +func (t token) IsAddressMode() bool { + if t.typ != tokSymbol { + return false + } + if t.val == "$" || t.val == "#" || t.val == "@" || t.val == "*" || t.val == "{" || t.val == "<" || t.val == "}" || t.val == ">" { + return true + } + return false +} + func (t token) NoOperandsOk() bool { lower := strings.ToLower(t.val) return lower == "end" || lower == "rof" @@ -73,7 +82,13 @@ func (t token) IsPseudoOp() bool { } func (t token) IsExpressionTerm() bool { - if t.typ == tokExprOp || t.typ == tokNumber || t.typ == tokText || t.typ == tokParenL || t.typ == tokParenR { + if t.typ == tokSymbol || t.typ == tokNumber || t.typ == tokText || t.typ == tokParenL || t.typ == tokParenR { + return true + } + if t.typ == tokSymbol { + if t.val == "}" || t.val == "{" || t.val == "#" || t.val == "$" || t.val == "@" { + return false + } return true } return false diff --git a/warriors/88/imp.red b/warriors/88/imp.red index b60b717..ae02590 100644 --- a/warriors/88/imp.red +++ b/warriors/88/imp.red @@ -3,6 +3,7 @@ ;author A K Dewdney ;strategy this is the simplest program ;strategy it was described in the initial articles +;assert 1 MOV $ 0, $ 1 END 0 diff --git a/warriors/94/bombspiral.red b/warriors/94/bombspiral.red index 0172980..bb69a76 100644 --- a/warriors/94/bombspiral.red +++ b/warriors/94/bombspiral.red @@ -2,6 +2,7 @@ ;name bomb spiral ;author Robert Lowry ;strategy stone and imp launcher +;assert CORESIZE == 8000 spl istart jmp bstart diff --git a/warriors/94/imp.red b/warriors/94/imp.red index 2b74dac..4dd32e7 100644 --- a/warriors/94/imp.red +++ b/warriors/94/imp.red @@ -3,6 +3,7 @@ ;author A K Dewdney ;strategy this is the simplest program ;strategy it was described in the initial articles +;assert 1 ORG 0 MOV.I # 0, $ 1