From 028a685f7c19dee1af0482cf5ed142e751d36a62 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Benedikt=20B=C3=B6hm?= Date: Thu, 21 May 2009 18:21:20 +0200 Subject: implement AST interfaces, which also superseeds parser tokens --- src/front/lexer.py | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) (limited to 'src/front/lexer.py') diff --git a/src/front/lexer.py b/src/front/lexer.py index 5605fe3..ff67e6d 100644 --- a/src/front/lexer.py +++ b/src/front/lexer.py @@ -11,18 +11,18 @@ class Lexer: self.currentLine = '' # reservierte Wörter initialisieren - self.reservedWords = {'True': LeafToken(Tag.TRUE), - 'False': LeafToken(Tag.FALSE), - '[': LeafToken(Tag.LBRAK), - ']': LeafToken(Tag.RBRAK), - '(': LeafToken(Tag.LPAREN), - ')': LeafToken(Tag.RPAREN), - ',': LeafToken(Tag.COMMA), - 'while': LeafToken(Tag.WHILE), - 'if': LeafToken(Tag.IF), - 'else': LeafToken(Tag.ELSE), - 'fun': LeafToken(Tag.FUN), - 'end': LeafToken(Tag.END)} + self.reservedWords = {'True': Token(Tag.BOOL, True), + 'False': Token(Tag.BOOL, False), + '[': Token(Tag.LBRAK), + ']': Token(Tag.RBRAK), + '(': Token(Tag.LPAREN), + ')': Token(Tag.RPAREN), + ',': Token(Tag.COMMA), + 'while': Token(Tag.WHILE), + 'if': Token(Tag.IF), + 'else': Token(Tag.ELSE), + 'fun': Token(Tag.FUN), + 'end': Token(Tag.END)} return def reserve(self, word, token): @@ -43,7 +43,7 @@ class Lexer: # newline zurückgeben if self.doubleNewlineCheck: self.doubleNewlineCheck = False - return LeafToken(Tag.NEWLINE) + return Token(Tag.NEWLINE) # leerzeichen entfernen self.currentLine = self.currentLine.strip() @@ -59,7 +59,7 @@ class Lexer: # Token parsen if self.currentLine.startswith('@'): self.currentLine = self.currentLine[1:] - return LeafToken(Tag.RETURN) + return Token(Tag.RETURN) # reservierte Wörter (da stehen auch schon erkannte Identifyer drine) for reservedWord, token in self.reservedWords.iteritems(): @@ -74,26 +74,26 @@ class Lexer: match = re.match(r"^([0-9]+)", self.currentLine) if match: self.currentLine = self.currentLine[match.end(0):] - return LeafToken(Tag.NUMBER, int(match.group(0))) + return Token(Tag.NUMBER, int(match.group(0))) # operatoren matchen match = re.match(r"^(<=|==|>=|&&|\|\||<|>|\+|-|\*|/)", self.currentLine) if match: self.currentLine = self.currentLine[match.end(0):] - return LeafToken(Tag.OPERATOR, match.group(0)) + return Token(Tag.OPERATOR, match.group(0)) # idents matchen match = re.match(r"^([a-zA-Z][a-zA-Z0-9]*)", self.currentLine) if match: self.currentLine = self.currentLine[match.end(0):] - token = LeafToken(Tag.IDENT, match.group(0)) + token = Token(Tag.IDENT, match.group(0)) self.reserve(match.group(0), token) return token # assignments if self.currentLine.startswith('='): self.currentLine = self.currentLine[1:] - return LeafToken(Tag.ASSIGNMENT) + return Token(Tag.ASSIGNMENT) # wenn die programmausführung hier ist, # ist ein syntaxfehler aufgetreten -- cgit v1.2.3