-
Notifications
You must be signed in to change notification settings - Fork 0
/
scanner.py
186 lines (146 loc) · 5.69 KB
/
scanner.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
from __future__ import annotations
import re
from enum import Enum
from typing import List, Tuple
from dfa_tree import create_dfa_tree, State
from symbol_table import SymbolTable
from utils import *
class ErrorFound(Exception):
pass
class TokenType(Enum):
COMMENT = "(\/\*(\*(?!\/)|[^*])*\*\/)|(\/\/.*\n)"
ID = "^[A-Za-z][A-Za-z0-9]*$"
KEYWORD = "^(if|endif|else|void|int|repeat|break|until|return)$"
NUM = "^[0-9]+$"
SYMBOL = "^(;|:|,|\[|\]|\(|\)|{|}|\+|-|\*|=|<|==)$"
WHITESPACE = "^(\x09|\x0A|\x0B|\x0C|\x20)$"
class LexicalError(Enum):
UNMATCHED_COMMENT = ("\*/", "Unmatched comment")
INVALID_NUMBER = ("^[0-9]+[^0-9]+$", "Invalid number")
class Token:
def __init__(self, _type: TokenType, lexeme: str):
self.type = _type
self.lexeme = lexeme
@property
def parse_name(self):
if self.type in ["ID", "NUM"]:
return self.type
if self.type == "SYMBOL":
return self.lexeme[0]
return self.lexeme
@property
def ilegal_token_message(self):
if self.lexeme == "$":
return "Unexpected EOF"
return f'illegal {self.lexeme if self.type not in ["ID", "NUM"] else self.type}'
def __str__(self):
return f"({self.type}, {self.lexeme})"
# global vars
token_dict: dict[int : List[Token]] = {}
error_dict: dict[int:Tuple] = {}
class Scanner:
def __init__(self, symbol_table: SymbolTable):
self.dfa_mother_state = create_dfa_tree()
self.buffer = []
self.line_number = 1
self.char_pointer = 0
self.char = ""
self.next_char = ""
self.last_comment_line_number = 0
self.next_selected_state = None
self.symbol_table = symbol_table
def get_next_token(self):
# Fill char and next char with buffer otherwise get them from input file
if self.buffer == []:
self.char = self.get_next_char()
self.next_char = self.get_next_char()
else:
if len(self.buffer) >= 2:
self.char = self.buffer[-2]
self.next_char = self.buffer[-1]
else:
self.char = self.buffer[-1]
self.next_char = self.get_next_char()
# next line
if self.char == "\n":
self.line_number += 1
# update last_comment_line_number
if self.char == "/" and (self.next_char in ["/", "*"]):
self.last_comment_line_number = self.line_number
# dfa states that is in start position an can be continued
if self.next_selected_state == None:
selected_state: State = self.dfa_mother_state.next_dfa_tree_state(self.char)
else:
selected_state = self.next_selected_state
self.next_selected_state = None
# Algorithm
# Check if next character possibly can be append to our current state
if selected_state:
next_state = selected_state.next_dfa_tree_state(self.next_char)
try:
can_be_continued = next_state != None
except NameError:
can_be_continued = False
# end of file
if self.char == "":
return Token(None, "$")
if can_be_continued:
self.char = self.next_char
self.next_char = self.get_next_char()
self.next_selected_state = next_state
elif self.buffer != []:
self.next_selected_state = None
token = self.add_token_to_array()
if token != None:
return token
return self.get_next_token()
def get_next_char(self):
self.char_pointer += 1
with open("input.txt", "r") as file:
char = file.read(self.char_pointer)
self.buffer.append(char[self.char_pointer - 1 :])
return char[self.char_pointer - 1 :]
def add_token_to_array(self) -> None:
token = self.get_token_from_buffer()
try:
_type = self.get_type_or_error(token)
except ErrorFound:
return None
if _type not in ["", TokenType.WHITESPACE, TokenType.COMMENT]:
token_dict.setdefault(self.line_number, []).append(Token(_type.name, token))
if _type in [TokenType.ID, TokenType.KEYWORD]:
if not self.symbol_table.include(token):
self.symbol_table.insert(token)
if _type not in ["", TokenType.WHITESPACE, TokenType.COMMENT]:
return Token(_type.name, token)
def get_token_from_buffer(self):
length = len(self.buffer) - 1
token = self.buffer[:length]
self.buffer = [self.buffer[length]]
return "".join(token)
def save_to_file(self):
add_tokens_to_file(self.line_number, token_dict)
self.symbol_table.save_to_file()
add_lexical_errors_to_file(self.line_number, error_dict)
def get_type_or_error(self, token):
if token == "":
raise ErrorFound
if token[:2] == "/*" and token[-2:] != "*/":
error_dict.setdefault(self.last_comment_line_number, []).append(
("Unclosed comment", token[:7] + "..." if len(token) > 7 else "")
)
raise ErrorFound
for error_regex in LexicalError:
if re.match(error_regex.value[0], token):
error_dict.setdefault(self.line_number, []).append(
(error_regex.value[1], token)
)
raise ErrorFound
_type = ""
for regex in TokenType:
if re.search(regex.value, token):
_type = regex
if _type == "":
error_dict.setdefault(self.line_number, []).append(("Invalid input", token))
raise ErrorFound
return _type