aarodi commited on
Commit
c4ae52a
ยท
1 Parent(s): 16f3614

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +135 -55
  2. requirements.txt +2 -0
app.py CHANGED
@@ -1,68 +1,156 @@
1
  import gradio as gr
2
  from PIL import Image
3
- import random
 
4
  import json
5
  import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
- LEADERBOARD_FILE = "leaderboard.json"
8
 
9
- # --- Load leaderboard from file if it exists ---
10
- if os.path.exists(LEADERBOARD_FILE):
11
- with open(LEADERBOARD_FILE, "r") as f:
12
- leaderboard_scores = json.load(f)
13
- else:
14
- leaderboard_scores = {}
 
 
15
 
16
- # --- Save leaderboard to file ---
17
- def save_leaderboard():
18
- with open(LEADERBOARD_FILE, "w") as f:
19
- json.dump(leaderboard_scores, f)
20
 
21
- # --- Main logic for prediction ---
22
- def dummy_deepfake_detector(image: Image.Image, prompt: str, username: str):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  if not username.strip():
24
- return "Please enter your name.", None, [], gr.update(visible=True), gr.update(visible=True), gr.update(visible=True), gr.update(visible=False)
 
 
 
 
 
 
 
 
 
 
 
25
 
26
- prediction = random.choice(["Real", "Fake"])
27
  score = 1 if prediction == "Real" else 0
 
 
 
 
 
 
 
 
 
28
 
29
- # Update and persist leaderboard
30
- leaderboard_scores[username] = leaderboard_scores.get(username, 0) + score
31
  save_leaderboard()
32
 
33
- # Create sorted leaderboard table
34
  sorted_scores = sorted(leaderboard_scores.items(), key=lambda x: x[1], reverse=True)
35
  leaderboard_table = [[name, points] for name, points in sorted_scores]
36
 
37
  return (
38
- f"Prediction: {prediction}",
39
  image,
40
  leaderboard_table,
41
- gr.update(visible=False), # hide image input
42
- gr.update(visible=False), # hide prompt input
43
- gr.update(visible=False), # hide upload button
44
- gr.update(visible=True) # show Try Again button
45
- )
46
-
47
- # --- Reset app state ---
48
- def reset_app():
49
- return (
50
- "", # Clear prediction text
51
- None, # Clear image output
52
- [], # Clear leaderboard
53
- gr.update(visible=True, value=""), # Show prompt input
54
- gr.update(visible=True, value=None), # Show image input
55
- gr.update(visible=True), # Show Upload button
56
- gr.update(visible=False), # Hide Try Again button
57
- gr.update(visible=True, value="") # Show username input
58
  )
59
 
60
- # --- Build Gradio UI ---
61
  with gr.Blocks(css=".gr-button {font-size: 16px !important}") as demo:
62
- gr.Markdown("## ๐Ÿ•ต๏ธโ€โ™‚๏ธ Fool the Deepfake Detector")
63
- gr.Markdown("Upload an image and try to fool the AI model into thinking itโ€™s real. Your score will be saved!")
64
 
65
- with gr.Group():
66
  username_input = gr.Textbox(label="Your Name", placeholder="Enter your name")
67
 
68
  with gr.Row():
@@ -74,7 +162,7 @@ with gr.Blocks(css=".gr-button {font-size: 16px !important}") as demo:
74
  )
75
 
76
  with gr.Row():
77
- image_input = gr.Image(type="pil", label="Upload Image")
78
 
79
  with gr.Row():
80
  submit_btn = gr.Button("Upload")
@@ -96,36 +184,28 @@ with gr.Blocks(css=".gr-button {font-size: 16px !important}") as demo:
96
  row_count=5
97
  )
98
 
99
- # Submit button logic
100
  submit_btn.click(
101
- fn=dummy_deepfake_detector,
102
  inputs=[image_input, prompt_input, username_input],
103
  outputs=[
104
  prediction_output,
105
  image_output,
106
  leaderboard,
107
- image_input,
108
- prompt_input,
109
- submit_btn,
110
  try_again_btn
111
  ]
112
  )
113
 
114
- # Try Again button logic
115
  try_again_btn.click(
116
- fn=reset_app,
117
  outputs=[
118
  prediction_output,
119
  image_output,
120
  leaderboard,
121
- prompt_input,
122
- image_input,
123
- submit_btn,
124
- try_again_btn,
125
- username_input
126
  ]
127
  )
128
 
129
  if __name__ == "__main__":
130
  demo.launch()
131
-
 
1
  import gradio as gr
2
  from PIL import Image
3
+ import onnxruntime as ort
4
+ import torchvision.transforms as transforms
5
  import json
6
  import os
7
+ import numpy as np
8
+ from huggingface_hub import snapshot_download, HfApi
9
+ from transformers import CLIPTokenizer
10
+
11
+ # --- Config ---
12
+ HUB_REPO_ID = "aarodi/OpenArenaLeaderboard"
13
+ HF_TOKEN = os.environ.get("HF_TOKEN")
14
+ LOCAL_JSON = "leaderboard.json"
15
+ HUB_JSON = "leaderboard.json"
16
+ MODEL_PATH = "mobilenet_v2_fake_detector.onnx"
17
+ CLIP_IMAGE_ENCODER_PATH = "clip_image_encoder.onnx"
18
+ CLIP_TEXT_ENCODER_PATH = "clip_text_encoder.onnx"
19
+ PROMPT_MATCH_THRESHOLD = 10 # percent
20
+
21
+ # --- Download leaderboard + model checkpoint from HF Hub ---
22
+ def load_assets():
23
+ try:
24
+ snapshot_download(
25
+ repo_id=HUB_REPO_ID,
26
+ local_dir=".",
27
+ repo_type="dataset",
28
+ token=HF_TOKEN,
29
+ allow_patterns=[HUB_JSON, MODEL_PATH, CLIP_IMAGE_ENCODER_PATH, CLIP_TEXT_ENCODER_PATH]
30
+ )
31
+ except Exception as e:
32
+ print(f"Failed to load assets from HF Hub: {e}")
33
 
34
+ load_assets()
35
 
36
+ # --- Load leaderboard ---
37
+ def load_leaderboard():
38
+ try:
39
+ with open(HUB_JSON, "r") as f:
40
+ return json.load(f)
41
+ except Exception as e:
42
+ print(f"Failed to read leaderboard: {e}")
43
+ return {}
44
 
45
+ leaderboard_scores = load_leaderboard()
 
 
 
46
 
47
+ # --- Save and push to HF Hub ---
48
+ def save_leaderboard():
49
+ try:
50
+ with open(HUB_JSON, "w") as f:
51
+ json.dump(leaderboard_scores, f)
52
+
53
+ if HF_TOKEN is None:
54
+ print("HF_TOKEN not set. Skipping push to hub.")
55
+ return
56
+
57
+ api = HfApi()
58
+ api.upload_file(
59
+ path_or_fileobj=HUB_JSON,
60
+ path_in_repo=HUB_JSON,
61
+ repo_id=HUB_REPO_ID,
62
+ repo_type="dataset",
63
+ token=HF_TOKEN,
64
+ commit_message="Update leaderboard"
65
+ )
66
+ except Exception as e:
67
+ print(f"Failed to save leaderboard to HF Hub: {e}")
68
+
69
+ # --- Load ONNX models ---
70
+ session = ort.InferenceSession(MODEL_PATH, providers=["CPUExecutionProvider"])
71
+ input_name = session.get_inputs()[0].name
72
+
73
+ clip_image_sess = ort.InferenceSession(CLIP_IMAGE_ENCODER_PATH, providers=["CPUExecutionProvider"])
74
+ clip_text_sess = ort.InferenceSession(CLIP_TEXT_ENCODER_PATH, providers=["CPUExecutionProvider"])
75
+ clip_tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32")
76
+
77
+ transform = transforms.Compose([
78
+ transforms.Resize((224, 224)),
79
+ transforms.ToTensor(),
80
+ transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711])
81
+ ])
82
+
83
+ def compute_prompt_match(image: Image.Image, prompt: str) -> float:
84
+ try:
85
+ # Encode image
86
+ img_tensor = transform(image).unsqueeze(0).numpy().astype(np.float32)
87
+ image_features = clip_image_sess.run(None, {clip_image_sess.get_inputs()[0].name: img_tensor})[0][0]
88
+ image_features /= np.linalg.norm(image_features) # Normalize
89
+
90
+ # Encode text
91
+ inputs = clip_tokenizer(prompt, return_tensors="np", padding="max_length", truncation=True, max_length=77)
92
+ input_ids = inputs["input_ids"]
93
+ attention_mask = inputs["attention_mask"]
94
+ text_features = clip_text_sess.run(None, {
95
+ clip_text_sess.get_inputs()[0].name: input_ids,
96
+ clip_text_sess.get_inputs()[1].name: attention_mask
97
+ })[0][0]
98
+ text_features /= np.linalg.norm(text_features) # Normalize
99
+
100
+ # Cosine similarity
101
+ sim = np.dot(image_features, text_features)
102
+ return round(sim * 100, 2)
103
+ except Exception as e:
104
+ print(f"CLIP ONNX match failed: {e}")
105
+ return 0.0
106
+
107
+
108
+ # --- Main prediction logic ---
109
+ def detect_with_model(image: Image.Image, prompt: str, username: str):
110
  if not username.strip():
111
+ return "Please enter your name.", None, [], gr.update(visible=True), gr.update(visible=False)
112
+
113
+ prompt_score = compute_prompt_match(image, prompt)
114
+ if prompt_score < PROMPT_MATCH_THRESHOLD:
115
+ message = f"โš ๏ธ Prompt match too low ({round(prompt_score, 2)}%). Please generate an image that better matches the prompt."
116
+ return message, None, [], gr.update(visible=True), gr.update(visible=False)
117
+
118
+ image_tensor = transforms.Resize((224, 224))(image)
119
+ image_tensor = transforms.ToTensor()(image_tensor).unsqueeze(0).numpy().astype(np.float32)
120
+ outputs = session.run(None, {input_name: image_tensor})
121
+ prob = round(1 / (1 + np.exp(-outputs[0][0][0])), 2) # sigmoid
122
+ prediction = "Real" if prob > 0.5 else "Fake"
123
 
 
124
  score = 1 if prediction == "Real" else 0
125
+ confidence = round(prob * 100, 2) if prediction == "Real" else round((1 - prob) * 100, 2)
126
+
127
+ message = f"Prediction: {prediction} ({confidence}% confidence)\n๐Ÿง Prompt match: {prompt_score}%"
128
+
129
+ if prediction == "Real":
130
+ leaderboard_scores[username] = leaderboard_scores.get(username, 0) + score
131
+ message += "\n๐ŸŽ‰ Nice! You fooled the AI. +1 point!"
132
+ else:
133
+ message += "\n๐Ÿ˜… The AI caught you this time. Try again!"
134
 
 
 
135
  save_leaderboard()
136
 
 
137
  sorted_scores = sorted(leaderboard_scores.items(), key=lambda x: x[1], reverse=True)
138
  leaderboard_table = [[name, points] for name, points in sorted_scores]
139
 
140
  return (
141
+ message,
142
  image,
143
  leaderboard_table,
144
+ gr.update(visible=False),
145
+ gr.update(visible=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146
  )
147
 
148
+ # --- UI Layout ---
149
  with gr.Blocks(css=".gr-button {font-size: 16px !important}") as demo:
150
+ gr.Markdown("## ๐ŸŽ OpenFake Arena")
151
+ gr.Markdown("Welcome to the OpenFake Arena!\n\n**Your mission:** Generate a synthetic image for the prompt, upload it, and try to fool the AI detector into thinking itโ€™s real.\n\n**Rules:**\n- Only synthetic images allowed!\n- No cheating with real photos.\n- Licensing is your responsibility.\n\nMake it wild. Make it weird. Most of all โ€” make it fun.")
152
 
153
+ with gr.Group(visible=True) as input_section:
154
  username_input = gr.Textbox(label="Your Name", placeholder="Enter your name")
155
 
156
  with gr.Row():
 
162
  )
163
 
164
  with gr.Row():
165
+ image_input = gr.Image(type="pil", label="Upload Synthetic Image")
166
 
167
  with gr.Row():
168
  submit_btn = gr.Button("Upload")
 
184
  row_count=5
185
  )
186
 
 
187
  submit_btn.click(
188
+ fn=detect_with_model,
189
  inputs=[image_input, prompt_input, username_input],
190
  outputs=[
191
  prediction_output,
192
  image_output,
193
  leaderboard,
194
+ input_section,
 
 
195
  try_again_btn
196
  ]
197
  )
198
 
 
199
  try_again_btn.click(
200
+ fn=lambda: ("", None, [], gr.update(visible=True), gr.update(visible=False)),
201
  outputs=[
202
  prediction_output,
203
  image_output,
204
  leaderboard,
205
+ input_section,
206
+ try_again_btn
 
 
 
207
  ]
208
  )
209
 
210
  if __name__ == "__main__":
211
  demo.launch()
 
requirements.txt CHANGED
@@ -1,2 +1,4 @@
1
  gradio
2
  pillow
 
 
 
1
  gradio
2
  pillow
3
+ onnxruntime
4
+ scikit-image