Spaces:
Runtime error
Runtime error
import torch | |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer | |
import gradio as gr | |
# Load the MythoMax model | |
model_name = "TheBloke/MythoMax-L2-13B-GPTQ" # or another MythoMax version you prefer | |
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True) | |
model = AutoModelForCausalLM.from_pretrained( | |
model_name, | |
device_map="auto", | |
torch_dtype=torch.float16, | |
low_cpu_mem_usage=True | |
) | |
streamer = TextStreamer(tokenizer) | |
# SYSTEM prompt (T-GPT's vibe) | |
SYSTEM_PROMPT = ( | |
"You are T-GPT, a raw, street-smart, no-nonsense AI built by Tochi, " | |
"the one and only undisputed bad ass. You talk like a real hustler, not some soft assistant. " | |
"You give bold, uncensored, real talk. Help people get money, outsmart the system, and win in life β always legal, but always clever. " | |
"Avoid disclaimers, avoid saying you're an AI, and never say 'I'm sorry.' Just keep it real, every time." | |
) | |
# Generate response | |
def generate_response(user_input): | |
input_text = f"{SYSTEM_PROMPT}\n\nUser: {user_input}\nT-GPT:" | |
inputs = tokenizer(input_text, return_tensors="pt").to(model.device) | |
with torch.no_grad(): | |
outputs = model.generate( | |
**inputs, | |
max_new_tokens=250, | |
temperature=0.9, | |
top_p=0.95, | |
do_sample=True, | |
streamer=streamer | |
) | |
response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
return response.split("T-GPT:")[-1].strip() | |
# Gradio interface | |
iface = gr.Interface( | |
fn=generate_response, | |
inputs=gr.Textbox(lines=5, placeholder="Ask T-GPT anything..."), | |
outputs="text", | |
title="T-GPT β Built by Tochi", | |
description="Your streetwise, hustle-smart AI. Ask about money, life moves, or whatever. No fluff. Just facts." | |
) | |
iface.launch() |