forked from pythaiml/automindx
-
Notifications
You must be signed in to change notification settings - Fork 0
/
UIUX6.py
97 lines (77 loc) · 3.41 KB
/
UIUX6.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
import os
import gradio as gr
import fire
from enum import Enum
from transformers import AutoModelForCausalLM, AutoTokenizer, AutoModel
from MASTERMIND import MASTERMIND
from logic import LogicTables
from reasoning import SocraticReasoning
from prediction import Predictor
from epistemic import AutoepistemicAgent
from bdi import Belief, Desire, Intention, Goal, Reward # Import BDI components
from memory import save_conversation_memory
class Model_Type(Enum):
gptq = 1
ggml = 2
full_precision = 3
class LlamaModel:
def __init__(self, model_name, models_folder="./models"):
self.model_name = model_name
self.models_folder = models_folder
self.model, self.tokenizer = self.initialize_model()
def initialize_model(self):
# Check if model_name is a path or a model ID
if os.path.isdir(self.model_name):
model_path = self.model_name
else:
model_path = hf_hub_download(repo_id=self.model_name, filename="pytorch_model.bin")
model = AutoModelForCausalLM.from_pretrained(model_path)
tokenizer = AutoTokenizer.from_pretrained(model_path)
return model, tokenizer
def get_model_type(model_name):
if "gptq" in model_name.lower():
return Model_Type.gptq
elif "ggml" in model_name.lower():
return Model_Type.ggml
else:
return Model_Type.full_precision
def initialize_model(model_name, model_type):
return LlamaModel(model_name).model, LlamaModel(model_name).tokenizer
def run_ui(model, tokenizer, is_chat_model, model_type, save_history=True):
with gr.Blocks() as demo:
chatbot = gr.Chatbot()
msg = gr.Textbox()
clear = gr.Button("Clear")
conversation_memory = []
epistemic_agent = AutoepistemicAgent(initial_beliefs={'The sky is blue': True})
belief = Belief("The sky is blue") # Initialize Belief
def user(user_message, memory):
nonlocal conversation_memory
conversation_memory.append([user_message, None])
# Process belief and simulate adding conflicting information
belief.process_belief()
epistemic_agent.add_information({'The sky is blue': False})
epistemic_agent.revise_beliefs()
current_beliefs = f"Processed Belief: {belief}"
memory[-1][1] = current_beliefs
return "", memory
def bot(memory):
nonlocal conversation_memory
conversation_memory = memory
instruction = memory[-1][0]
aglm_model = LlamaModel(model_name, "./models/")
response = aglm_model.generate_contextual_output(instruction)
memory[-1][1] = f"Response: {response}"
if save_history:
save_conversation_memory(conversation_memory)
return memory
msg.submit(user, inputs=[msg, chatbot], outputs=[msg, chatbot], queue=False).then(bot, inputs=[chatbot], outputs=[chatbot])
clear.click(lambda: None, inputs=None, outputs=chatbot, queue=False)
demo.launch(share=False, debug=True)
def main(model_name=None, file_name=None, save_history=True):
assert model_name, "model_name argument is missing."
model_type = get_model_type(model_name)
model, tokenizer = initialize_model(model_name, model_type)
run_ui(model, tokenizer, 'chat' in model_name.lower(), model_type, save_history=save_history)
if __name__ == '__main__':
fire.Fire(main)