Created
September 11, 2019 19:11
-
-
Save RaidAndFade/afda56c48f2d8fb62517450c1f281c12 to your computer and use it in GitHub Desktop.
Generate truthtables for almost any (no "for all" or "exists a") statement
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
stmt = "((p and (not q)) or (q and (not p))) or r" | |
# |p |q |r |p = q |(p = q) IMPLIES r | | |
# |True |True |True |True |True | | |
# |True |True |False |True |False | | |
# |True |False |True |False |True | | |
# |True |False |False |False |True | | |
# |False |True |True |False |True | | |
# |False |True |False |False |True | | |
# |False |False |True |True |True | | |
# |False |False |False |True |False | | |
def tokenize(stmt): | |
tokenstrs = {} | |
i = 0 | |
curtoken = "" | |
curId = 0 | |
prevtokens = [] | |
while True: | |
if len(stmt)<=i: | |
break | |
curchar = stmt[i] | |
if curchar == '(': | |
prevtokens.append(curtoken) | |
curtoken = "" | |
elif curchar == ')': | |
tokenstrs[str(curId)] = curtoken | |
curtoken = prevtokens.pop() + "~"+str(curId)+"~" | |
curId+=1 | |
else: | |
curtoken += curchar | |
i+=1 | |
tokenstrs['BASE'] = curtoken | |
tokens = {} | |
tokens['vars'] = [] | |
for i in tokenstrs: | |
t = tokenstrs[i].split(" ") | |
parts = [] | |
operation = "" | |
if(len(t)==2): # unary operations in form of "OP X" | |
if t[1][0] == "~": # X is a token of its own | |
parts.append({'type':'token','id':t[1][1:-1]}) | |
else: | |
if t[1] not in tokens['vars']: | |
tokens['vars'].append(t[1]) | |
parts.append({'type': 'var', 'id': t[1]}) | |
operation = t[0].upper() | |
elif(len(t)==3): # binary operations of the form "X OP Y" | |
if t[0][0] == "~": # X is a token of its own | |
parts.append({'type':'token','id':t[0][1:-1]}) | |
else: | |
if t[0] not in tokens['vars']: | |
tokens['vars'].append(t[0]) | |
parts.append({'type': 'var', 'id': t[0]}) | |
operation = t[1].upper() | |
if t[2][0] == "~": # Y is a token of its own | |
parts.append({'type': 'token', 'id': t[2][1:-1]}) | |
else: | |
if t[2] not in tokens['vars']: | |
tokens['vars'].append(t[2]) | |
parts.append({'type': 'var', 'id': t[2]}) | |
else: | |
raise Exception("Cannot handle "+str(len(t)-1)+"-ary operations") | |
tokens[str(i)]={"op":operation,"parts":parts} | |
tokens['vars'].sort() | |
return tokens | |
ops_impls = { | |
"IMPLIES": lambda x,y: (x and y) or (not x), | |
"=": lambda x,y: x == y, | |
"AND": lambda x,y: x and y, | |
"OR": lambda x,y: x or y, | |
"NOT": lambda x: not x | |
} | |
def evaluateToken(token, tokens, vars): | |
partvals = [] | |
for pid in range(len(token['parts'])): | |
part = token['parts'][pid] | |
if part['type'] == "token": | |
partvals.insert(pid,evaluateToken(tokens[part['id']],tokens,vars)) | |
if part['type'] == 'var': | |
if part['id'] in vars: | |
partvals.insert(pid,vars[part['id']]) | |
else: | |
raise Exception("Part "+part['id']+" not found.") | |
if token['op'] in ops_impls: | |
return ops_impls[token['op']](*partvals) | |
def evaluate(tokens, vars): | |
return evaluateToken(tokens['BASE'],tokens,vars) | |
def stringifyToken(token, tokens): | |
s = "" | |
if len(token['parts']) == 1: | |
partstrs = [] | |
for pid in range(len(token['parts'])): | |
part = token['parts'][pid] | |
if part['type'] == "token": | |
partstrs.insert(pid,"("+stringifyToken(tokens[part['id']],tokens)+")") | |
elif part['type'] == "var": | |
partstrs.insert(pid,part['id']) | |
return token['op'] + " "+ partstrs[0] | |
elif len(token['parts']) == 2: | |
partstrs = [] | |
for pid in range(len(token['parts'])): | |
part = token['parts'][pid] | |
if part['type'] == "token": | |
partstrs.insert(pid,"("+stringifyToken(tokens[part['id']],tokens)+")") | |
elif part['type'] == "var": | |
partstrs.insert(pid,part['id']) | |
return partstrs[0] + " "+ token['op'] + " "+ partstrs[1] | |
else: | |
raise Exception("Cannot handle "+str(len(token['parts']))+"-ary token") | |
def createTable(stmt,detailed=False): | |
tokens = tokenize(stmt) | |
import itertools | |
header_row = [] | |
for var in tokens['vars']: | |
header_row.append(var) | |
for token in tokens: | |
if token == "vars": continue | |
if token != "BASE" and not detailed: continue | |
t = stringifyToken(tokens[token],tokens) | |
header_row.append(t) | |
row_format = "|" | |
for p in header_row: | |
rowlen = max(6,len(p)+1) | |
row_format += "{:<"+str(rowlen)+"}|" | |
# row_format = "{:<15}|" * (len(tokens)+len(tokens['vars'])-1) | |
print(row_format.format(*header_row)) | |
for vals in list(itertools.product([True,False],repeat=len(tokens['vars']))): | |
vars = {} | |
for i in range(len(vals)): | |
vars[tokens['vars'][i]] = vals[i] | |
print_data = [] | |
for x in vars: | |
print_data.append(str(vars[x])) | |
tokenvals = {} | |
for token in tokens: | |
if token == "vars": continue | |
if token != "BASE" and not detailed: continue | |
tokenvals[token] = evaluateToken(tokens[token],tokens,vars) | |
print_data.append(str(tokenvals[token])) | |
print(row_format.format(*print_data)) | |
createTable(stmt, True) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment