ghost7 commited on
Commit
9f11d79
·
verified ·
1 Parent(s): c94298a

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -0
app.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
+
4
+ MODEL = "TheBloke/MythoMax-L2-13B-GGUF" # GGUF quantized version (lighter)
5
+
6
+ tokenizer = AutoTokenizer.from_pretrained(MODEL)
7
+ pipeline_chat = pipeline(
8
+ "text-generation",
9
+ model=MODEL,
10
+ tokenizer=tokenizer,
11
+ device_map="auto",
12
+ max_new_tokens=512,
13
+ temperature=0.9,
14
+ top_p=0.95,
15
+ )
16
+
17
+ def chat_fn(message, history):
18
+ prompt = ""
19
+ for user, bot in history:
20
+ prompt += f"User: {user}\nAssistant: {bot}\n"
21
+ prompt += f"User: {message}\nAssistant:"
22
+
23
+ output = pipeline_chat(prompt)[0]["generated_text"]
24
+ reply = output.split("Assistant:")[-1].strip()
25
+ return reply
26
+
27
+ demo = gr.ChatInterface(chat_fn, title="💖 MythoMax Virtual GF")
28
+
29
+ demo.launch()