Spaces:
Paused
Paused
File size: 3,730 Bytes
d9f0669 c64d829 32a70d2 d9f0669 40b0f4a faf98f2 32a70d2 8335785 7bb92dd 32a70d2 7bb92dd 3e6b150 c64d829 f94ce29 c64d829 40b0f4a c64d829 40b0f4a c64d829 40b0f4a c64d829 40b0f4a 324ddc8 c64d829 324ddc8 faf98f2 3d02d30 3b46ac0 bca41b9 c64d829 40b0f4a c64d829 40b0f4a bca41b9 3d02d30 40b0f4a 32a70d2 3d02d30 a8c6d92 40b0f4a 3b46ac0 3d02d30 3a9a368 bca41b9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 |
import gradio as gr
import requests
import json
from transformers import pipeline
API_URL = "https://api.openai.com/v1/chat/completions"
# ๋ฒ์ญ ํ์ดํ๋ผ์ธ ์ด๊ธฐํ
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
def translate_and_generate_prompts(text):
# ์
๋ ฅ๋ ํ
์คํธ๋ฅผ ์์ด๋ก ๋ฒ์ญ
translation = translator(text, max_length=512)
translated_text = translation[0]['translation_text']
# ๊ธฐ๋ณธ ํค์๋ ์ถ๊ฐ
prompt = "3d style, 4k"
# ์ง์ ํค์๋ ์ถ๊ฐ
if "ํ ๋ฆฌ" in text:
prompt += ", like Brad Pitt young boy"
elif "์ค๋ฆฌ" in text:
prompt += ", like Emma Watson young girl"
# ๋ฒ์ญ๋ ํ
์คํธ ์ถ๊ฐ
prompt += f", {translated_text}"
return prompt
def predict(inputs, top_p, temperature, openai_api_key):
narration_prompt = f"์๋์ฉ ์ ๋๋ฉ์ด์
๋์์์ ์ฌ์ฉํ ์คํฌ๋ฆฝํธ๋ฅผ ์์ฑํ๋ผ. ๋ฐ๋์ ํ๊ธ๋ก ์์ฑํ ๊ฒ. ์ผ์ฒด์ ์ง๋ฌธ์ด๋ ์ง์, ๊ธฐํธ ํ์, ๋ฐฐ๊ฒฝ ์ค๋ช
๋ฑ์ ๋
ธ์ถ ํ๊ฑฐ๋ ์ถ๋ ฅํ์ง ๋ง๊ณ ๊ธฐ์น์ ๊ฒฐ์ ๊ตฌ์กฐ๋ก ๋ชจํ์ ์ด์ /์๊ธฐ/๋์ /๋ฌธ์ ํด๊ฒฐ/๊ตํ์ ํฌํจํ์ฌ ์์ํ ๋๋ ์ด์
๋ง 1์ค์ฉ ์ถ๋ ฅํ์ฌ ์ต๋ 10์ค ์ด๋ด๋ก ์ถ๋ ฅ. ์
๋ ฅ: '{inputs}'"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {openai_api_key}"
}
payload = {
"model": "gpt-4-1106-preview",
"messages": [{"role": "system", "content": narration_prompt}],
"temperature": temperature,
"top_p": top_p,
"n": 1,
"max_tokens": 1000
}
response = requests.post(API_URL, headers=headers, json=payload)
if response.status_code == 200:
response_data = response.json()
generated_text = response_data['choices'][0]['message']['content']
return generated_text
else:
return "Error: Unable to generate response."
# ์ด๋ฏธ์ง ์์ฑ ํจ์ ์ถ๊ฐ
def generate_images(prompts):
image_pipeline = pipeline("image-generation", model="models/goofyai/3d_render_style_xl")
images = [image_pipeline(prompt)[0] for prompt in prompts.split('\n') if prompt.strip() != '']
return images
# ์คํฌ๋ฆฝํธ ์ฒ๋ฆฌ ๋ก์ง์ ํฌํจํ generate_prompts ํจ์
def generate_prompts(script):
# ์คํฌ๋ฆฝํธ ์ฒ๋ฆฌ ๋ก์ง
return "์ฌ๊ธฐ์ ์ฒ๋ฆฌ๋ ํ๋กฌํํธ ๋ฐํ"
with gr.Blocks() as demo:
gr.Markdown("<h1 align='center'>ํ ๋ฆฌ์ ๋ชจํ: 3D ์ ๋๋ฉ์ด์
์์ฑ๊ธฐ</h1>")
with gr.Row():
openai_api_key = gr.Textbox(type='password', label="Enter your OpenAI API key here")
inputs = gr.Textbox(placeholder="์ฌ๊ธฐ์ ์
๋ ฅํ์ธ์.", label="์๋์ฉ ์ ๋๋ฉ์ด์
์คํฌ๋ฆฝํธ๋ฅผ ์์ฑํ๊ณ ์ถ์ ์ฃผ์ ์ด๋ ๋ฌธ์ฅ์ ์
๋ ฅํ์ธ์.")
top_p = gr.Slider(minimum=0, maximum=1.0, value=1.0, step=0.05, label="Top-p (nucleus sampling)")
temperature = gr.Slider(minimum=0, maximum=5.0, value=1.0, step=0.1, label="Temperature")
# Gradio ์ธํฐํ์ด์ค ์ด๊ธฐํ ๋ถ๋ถ์์ readonly ๋งค๊ฐ๋ณ์ ์ ๊ฑฐ
output = gr.Textbox(label="Generated Script")
prompts_output = gr.TextArea(label="Translated Image Generation Prompts")
images_output = gr.Gallery(label="Generated Images")
submit_button = gr.Button("Generate Script")
prompts_button = gr.Button("Translate Prompts")
images_button = gr.Button("Generate Images")
submit_button.click(fn=predict, inputs=[inputs, top_p, temperature, openai_api_key], outputs=output)
prompts_button.click(fn=generate_prompts, inputs=[output], outputs=prompts_output)
images_button.click(fn=generate_images, inputs=prompts_output, outputs=images_output)
demo.launch() |