Skip to content

Commit

Permalink
Update: use ONA for reasoning properly
Browse files Browse the repository at this point in the history
  • Loading branch information
patham9 committed Apr 22, 2023
1 parent d3cfe3d commit a2fd033
Show file tree
Hide file tree
Showing 4 changed files with 60 additions and 29 deletions.
15 changes: 8 additions & 7 deletions Control.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@

from Memory import *

def Control_cycle(currentTime, memory, cmd, userQuestion, PrintMemoryUpdates, PrintTruthValues):
def Control_cycle(inp, currentTime, memory, cmd, userQuestion, PrintMemoryUpdates, PrintTruthValues):
AlreadyExecuted = set([])
for x in cmd:
if len(x) < 3:
Expand All @@ -51,9 +51,10 @@ def Control_cycle(currentTime, memory, cmd, userQuestion, PrintMemoryUpdates, Pr
isInput = x.startswith("RelationClaim(") or x.startswith("PropertyClaim(")
if isInput and ")" in x:
sentence = x.split("(")[1].split(")")[0].replace('"','').replace("'","").replace(".", "").lower()
Memory_digest_sentence(currentTime, memory, sentence, truth, PrintMemoryUpdates)
printsentence = sentence if isInput else x
if PrintTruthValues:
print(f"{printsentence}. truth={truth}")
else:
print(printsentence)
digested = Memory_digest_sentence(inp, currentTime, memory, sentence, truth, PrintMemoryUpdates)
if digested:
printsentence = sentence if isInput else x
if PrintTruthValues:
print(f"{printsentence}. truth={truth}")
else:
print(printsentence)
61 changes: 43 additions & 18 deletions Memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,9 @@
import json
import sys
import os
sys.path.append("/home/tc/OpenNARS-for-Applications/misc/Python/")
cwd = os.getcwd()
os.chdir("/home/tc/OpenNARS-for-Applications/misc/Python/")
sys.path.append(cwd + "/OpenNARS-for-Applications/misc/Python/")
os.chdir(cwd + "/OpenNARS-for-Applications/misc/Python/")
import NAR
os.chdir(cwd)

Expand Down Expand Up @@ -72,7 +72,15 @@ def Memory_generate_prompt(memory, prompt_start, prompt_end, attention_buffer_si
flags.append("Contradictory")
certainty = Truth_Expectation((f,c))
truthtype = '"' + " ".join(flags) + '"'
prompt_memory += f"i={i}: {x[0]}. truthtype={truthtype} certainty={certainty}\n"
term = x[0][1:-1]
if " * " in term:
arg1 = term.split("(")[1].split(" * ")[0].strip()
arg2 = term.split(")")[0].split(" * ")[1].strip()
relarg = term.split(" --> ")[1].strip()
term = arg1 + " " + relarg + " " + arg2
else:
term = term.replace(" --> ", " isa ")
prompt_memory += f"i={i}: {term}. truthtype={truthtype} certainty={certainty}\n"
return prompt_start + prompt_memory + prompt_end

from nltk import WordNetLemmatizer
Expand All @@ -83,23 +91,20 @@ def Lemmatize(word, tag):
ret = lemma.lemmatize(word.lower(), pos = tag).strip().lower().replace(" ","_").replace("-","_")
if tag == wordnet.VERB:
if ret == "is" or ret == "isa" or ret == "is_a" or ret == "be" or ret == "are" or ret == "were":
return "IsA"
return "isa"
return ret

def ProcessInput(currentTime, memory, term, punctuation_tv, backups = ["input", "answers", "derivations"]):
ret = NAR.AddInput(term + punctuation_tv, Print=False)
for backup in backups:
for derivation in ret[backup]:
#print(derivation["term"], " /1 " in derivation["term"])
for forbidden in [" /1 ", " \1 ", " /2 ", " \2 ", " & ", " | ", " ~ ", " - ", " <=> ", " && ", " || ", " ==> ", " <-> "]:
if forbidden in derivation["term"]:
return
#print("GRANTED>", derivation["term"])
if derivation["punctuation"] == "." and derivation["occurrenceTime"] == "eternal" and derivation["term"] != "None":
term = derivation["term"]
if term.startswith("dt="): #we don't need to store time deltas
term = " ".join(term.split(" ")[1:])
#query(term)
f2 = float(derivation["truth"]["frequency"])
c2 = float(derivation["truth"]["confidence"])
usefulnessAddition = 1000000 if "Priority" not in derivation or derivation["Priority"] == 1.0 else 1
Expand All @@ -109,32 +114,52 @@ def ProcessInput(currentTime, memory, term, punctuation_tv, backups = ["input",
memory[term] = (currentTime, usefulness + usefulnessAddition, (f2, c2)) #, #(f2, c2, usefulness + usefulnessAddition)
else:
memory[term] = (currentTime, usefulnessAddition, (f2, c2))

def Relation(currentTime, memory, s, v, p, punctuation_tv):

relations = set(["isa", "are", "hasproperty"])
def Relation(inp, currentTime, memory, s, v, p, punctuation_tv):
global relations
s = Lemmatize(s, wordnet.NOUN)
p = Lemmatize(p, wordnet.NOUN)
v = Lemmatize(v, wordnet.VERB)
relations.add(v)
if s not in inp or p not in inp:
#print("//!!!! filtered out", s, v, p)
return False
if s == "" or v == "" or p == "":
return
if v == "IsA" or v == "are":
return False
if v == "isa" or v == "are":
ProcessInput(currentTime, memory, f"<{s} --> {p}>", punctuation_tv)
else:
ProcessInput(currentTime, memory, f"<({s} * {p}) --> {v}>", punctuation_tv)
return True

def Property(currentTime, memory, s, p, punctuation_tv):
def Property(inp, currentTime, memory, s, p, punctuation_tv):
if s not in inp or p not in inp:
#print("//!!!! filtered out", s, "hasproperty", p)
return False
s = Lemmatize(s, wordnet.NOUN)
p = Lemmatize(p, wordnet.ADJ)
if s == "" or p == "":
return
return False
ProcessInput(currentTime, memory, f"<{s} --> [{p}]>", punctuation_tv)
return True

def Memory_digest_sentence(currentTime, memory, sentence, truth, PrintMemoryUpdates):
lastTime = 0
hadRelation = set([])
def Memory_digest_sentence(inp, currentTime, memory, sentence, truth, PrintMemoryUpdates):
global lastTime, hadRelation
if currentTime != lastTime:
hadRelation = set([])
if sentence in hadRelation:
return
lastTime = currentTime
pieces = sentence.split(" ")
punctuation_tv = f". {{{truth[0]} {truth[1]}}}"
if len(pieces) == 3:
if pieces[1] == "hasproperty":
Property(currentTime, memory, pieces[0], pieces[2], punctuation_tv)
return Property(inp, currentTime, memory, pieces[0], pieces[2], punctuation_tv)
else:
Relation(currentTime, memory, *pieces, punctuation_tv)
#else:
# print("!!!! omitted", pieces)
return Relation(inp, currentTime, memory, *pieces, punctuation_tv)
else:
#print("//!!!! Can't form relation:", pieces)
return False
10 changes: 6 additions & 4 deletions NarsGPT.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,11 +43,11 @@
memory = {} #the NARS-style long-term memory
currentTime = 1

def PromptProcess(send_prompt, isQuestion):
def PromptProcess(inp, send_prompt, isQuestion):
if PrintGPTPrompt: print("vvvvSTART PROMPT", send_prompt, "\n^^^^END PROMPT")
response = openai.ChatCompletion.create(model='gpt-3.5-turbo', messages=[ {"role": "user", "content": send_prompt}], max_tokens=200, temperature=0)
commands = response['choices'][0]['message']['content'].split("\n")
Control_cycle(currentTime, memory, commands, isQuestion, PrintMemoryUpdates, PrintTruthValues)
Control_cycle(inp, currentTime, memory, commands, isQuestion, PrintMemoryUpdates, PrintTruthValues)

while True:
try:
Expand All @@ -72,10 +72,12 @@ def PromptProcess(send_prompt, isQuestion):
if inp.endswith("?"):
send_prompt = Memory_generate_prompt(memory, Prompts_question_start, "\nThe question: ", attention_buffer_size) + inp[:-1] + \
(Prompts_question_end_alternative if IncludeGPTKnowledge else Prompts_question_end)
PromptProcess(send_prompt, True)
PromptProcess(inp, send_prompt, True)
else:
if len(inp) > 0 and inp != "1":
PromptProcess(Memory_generate_prompt(memory, Prompts_belief_start, "\nThe sentence: ", attention_buffer_size) + inp + Prompts_belief_end, False)
PromptProcess(inp, Memory_generate_prompt(memory, Prompts_belief_start, "\nThe sentence: ", attention_buffer_size) + inp + Prompts_belief_end, False)
else:
NAR.AddInput("1", Print=False)
#PromptProcess(Memory_generate_prompt(memory, Prompts_inference_start, "\n", attention_buffer_size) + Prompts_inference_end, False)
currentTime += 1
#Memory_store(filename, memory, currentTime, evidentalBaseID)
3 changes: 3 additions & 0 deletions build.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
git clone https://github.com/opennars/OpenNARS-for-Applications
cd OpenNARS-for-Applications
sh build.sh

0 comments on commit a2fd033

Please sign in to comment.