Spaces:
Sleeping
Sleeping
Upload app.py
Browse files
app.py
CHANGED
@@ -65,8 +65,8 @@ leaderboard_scores = load_leaderboard()
|
|
65 |
# --- Save and push to HF Hub ---
|
66 |
def save_leaderboard():
|
67 |
try:
|
68 |
-
with open(HUB_JSON, "w") as f:
|
69 |
-
json.dump(leaderboard_scores, f)
|
70 |
|
71 |
if HF_TOKEN is None:
|
72 |
print("HF_TOKEN not set. Skipping push to hub.")
|
@@ -122,12 +122,12 @@ def compute_prompt_match(image: Image.Image, prompt: str) -> float:
|
|
122 |
# --- Main prediction logic ---
|
123 |
def detect_with_model(image: Image.Image, prompt: str, username: str):
|
124 |
if not username.strip():
|
125 |
-
return "Please enter your name.", None, [], gr.update(visible=True), gr.update(visible=False)
|
126 |
|
127 |
prompt_score = compute_prompt_match(image, prompt)
|
128 |
if prompt_score < PROMPT_MATCH_THRESHOLD:
|
129 |
message = f"โ ๏ธ Prompt match too low ({round(prompt_score, 2)}%). Please generate an image that better matches the prompt."
|
130 |
-
return message, None, [], gr.update(visible=True), gr.update(visible=False)
|
131 |
|
132 |
image_tensor = transforms.Resize((224, 224))(image)
|
133 |
image_tensor = transforms.ToTensor()(image_tensor).unsqueeze(0).numpy().astype(np.float32)
|
@@ -138,7 +138,7 @@ def detect_with_model(image: Image.Image, prompt: str, username: str):
|
|
138 |
score = 1 if prediction == "Real" else 0
|
139 |
confidence = round(prob * 100, 2) if prediction == "Real" else round((1 - prob) * 100, 2)
|
140 |
|
141 |
-
message = f"Prediction: {prediction} ({confidence}% confidence)\n๐ง Prompt match: {prompt_score}%"
|
142 |
|
143 |
if prediction == "Real":
|
144 |
leaderboard_scores[username] = leaderboard_scores.get(username, 0) + score
|
@@ -156,7 +156,8 @@ def detect_with_model(image: Image.Image, prompt: str, username: str):
|
|
156 |
image,
|
157 |
leaderboard_table,
|
158 |
gr.update(visible=False),
|
159 |
-
gr.update(visible=True)
|
|
|
160 |
)
|
161 |
|
162 |
# --- UI Layout ---
|
@@ -164,17 +165,17 @@ def get_random_prompt():
|
|
164 |
return random.choice(PROMPT_LIST) if PROMPT_LIST else "A synthetic scene with dramatic lighting"
|
165 |
|
166 |
with gr.Blocks(css=".gr-button {font-size: 16px !important}") as demo:
|
167 |
-
gr.Markdown("##
|
168 |
gr.Markdown("Welcome to the OpenFake Arena!\n\n**Your mission:** Generate a synthetic image for the prompt, upload it, and try to fool the AI detector into thinking itโs real.\n\n**Rules:**\n- Only synthetic images allowed!\n- No cheating with real photos.\n- Licensing is your responsibility.\n\nMake it wild. Make it weird. Most of all โ make it fun.")
|
169 |
|
170 |
with gr.Group(visible=True) as input_section:
|
171 |
-
username_input = gr.Textbox(label="Your Name", placeholder="Enter your name")
|
172 |
|
173 |
with gr.Row():
|
174 |
prompt_input = gr.Textbox(
|
175 |
label="Suggested Prompt",
|
176 |
-
placeholder="e.g.,
|
177 |
-
value=
|
178 |
lines=2
|
179 |
)
|
180 |
|
@@ -198,7 +199,8 @@ with gr.Blocks(css=".gr-button {font-size: 16px !important}") as demo:
|
|
198 |
headers=["Username", "Score"],
|
199 |
datatype=["str", "number"],
|
200 |
interactive=False,
|
201 |
-
row_count=5
|
|
|
202 |
)
|
203 |
|
204 |
submit_btn.click(
|
@@ -209,20 +211,29 @@ with gr.Blocks(css=".gr-button {font-size: 16px !important}") as demo:
|
|
209 |
image_output,
|
210 |
leaderboard,
|
211 |
input_section,
|
212 |
-
try_again_btn
|
|
|
213 |
]
|
214 |
)
|
215 |
|
216 |
try_again_btn.click(
|
217 |
-
fn=lambda: ("", None, [], gr.update(visible=True), gr.update(visible=False)),
|
|
|
218 |
outputs=[
|
219 |
prediction_output,
|
220 |
image_output,
|
221 |
leaderboard,
|
222 |
input_section,
|
223 |
-
try_again_btn
|
|
|
|
|
224 |
]
|
225 |
)
|
226 |
|
|
|
|
|
|
|
|
|
|
|
227 |
if __name__ == "__main__":
|
228 |
demo.launch()
|
|
|
65 |
# --- Save and push to HF Hub ---
|
66 |
def save_leaderboard():
|
67 |
try:
|
68 |
+
with open(HUB_JSON, "w", encoding="utf-8") as f:
|
69 |
+
json.dump(leaderboard_scores, f, ensure_ascii=False)
|
70 |
|
71 |
if HF_TOKEN is None:
|
72 |
print("HF_TOKEN not set. Skipping push to hub.")
|
|
|
122 |
# --- Main prediction logic ---
|
123 |
def detect_with_model(image: Image.Image, prompt: str, username: str):
|
124 |
if not username.strip():
|
125 |
+
return "Please enter your name.", None, [], gr.update(visible=True), gr.update(visible=False), username
|
126 |
|
127 |
prompt_score = compute_prompt_match(image, prompt)
|
128 |
if prompt_score < PROMPT_MATCH_THRESHOLD:
|
129 |
message = f"โ ๏ธ Prompt match too low ({round(prompt_score, 2)}%). Please generate an image that better matches the prompt."
|
130 |
+
return message, None, [], gr.update(visible=True), gr.update(visible=False), username
|
131 |
|
132 |
image_tensor = transforms.Resize((224, 224))(image)
|
133 |
image_tensor = transforms.ToTensor()(image_tensor).unsqueeze(0).numpy().astype(np.float32)
|
|
|
138 |
score = 1 if prediction == "Real" else 0
|
139 |
confidence = round(prob * 100, 2) if prediction == "Real" else round((1 - prob) * 100, 2)
|
140 |
|
141 |
+
message = f"๐ Prediction: {prediction} ({round(confidence, 2)}% confidence)\n๐ง Prompt match: {prompt_score}%"
|
142 |
|
143 |
if prediction == "Real":
|
144 |
leaderboard_scores[username] = leaderboard_scores.get(username, 0) + score
|
|
|
156 |
image,
|
157 |
leaderboard_table,
|
158 |
gr.update(visible=False),
|
159 |
+
gr.update(visible=True),
|
160 |
+
username
|
161 |
)
|
162 |
|
163 |
# --- UI Layout ---
|
|
|
165 |
return random.choice(PROMPT_LIST) if PROMPT_LIST else "A synthetic scene with dramatic lighting"
|
166 |
|
167 |
with gr.Blocks(css=".gr-button {font-size: 16px !important}") as demo:
|
168 |
+
gr.Markdown("## ๐ OpenFake Arena")
|
169 |
gr.Markdown("Welcome to the OpenFake Arena!\n\n**Your mission:** Generate a synthetic image for the prompt, upload it, and try to fool the AI detector into thinking itโs real.\n\n**Rules:**\n- Only synthetic images allowed!\n- No cheating with real photos.\n- Licensing is your responsibility.\n\nMake it wild. Make it weird. Most of all โ make it fun.")
|
170 |
|
171 |
with gr.Group(visible=True) as input_section:
|
172 |
+
username_input = gr.Textbox(label="Your Name", placeholder="Enter your name", interactive=True)
|
173 |
|
174 |
with gr.Row():
|
175 |
prompt_input = gr.Textbox(
|
176 |
label="Suggested Prompt",
|
177 |
+
placeholder="e.g., ...",
|
178 |
+
value="",
|
179 |
lines=2
|
180 |
)
|
181 |
|
|
|
199 |
headers=["Username", "Score"],
|
200 |
datatype=["str", "number"],
|
201 |
interactive=False,
|
202 |
+
row_count=5,
|
203 |
+
visible=True
|
204 |
)
|
205 |
|
206 |
submit_btn.click(
|
|
|
211 |
image_output,
|
212 |
leaderboard,
|
213 |
input_section,
|
214 |
+
try_again_btn,
|
215 |
+
username_input
|
216 |
]
|
217 |
)
|
218 |
|
219 |
try_again_btn.click(
|
220 |
+
fn=lambda name: ("", None, [], gr.update(visible=True), gr.update(visible=False), name, gr.update(value=get_random_prompt())),
|
221 |
+
inputs=[username_input],
|
222 |
outputs=[
|
223 |
prediction_output,
|
224 |
image_output,
|
225 |
leaderboard,
|
226 |
input_section,
|
227 |
+
try_again_btn,
|
228 |
+
username_input,
|
229 |
+
prompt_input
|
230 |
]
|
231 |
)
|
232 |
|
233 |
+
demo.load(
|
234 |
+
fn=lambda: gr.update(value=get_random_prompt()),
|
235 |
+
outputs=prompt_input
|
236 |
+
)
|
237 |
+
|
238 |
if __name__ == "__main__":
|
239 |
demo.launch()
|