ZhouZJ36DL commited on
Commit
da068d4
·
1 Parent(s): eb86c26

modified: app.py

Browse files
app.py CHANGED
@@ -54,8 +54,12 @@ if os.path.exists("history_gradio/history.safetensors"):
54
  os.remove("history_gradio/history.safetensors")
55
 
56
  out_root = 'src/gradio_utils/gradio_outputs'
 
57
  if not os.path.exists(out_root):
58
  os.makedirs(out_root)
 
 
 
59
  exp_folders = [d for d in os.listdir(out_root) if d.startswith("exp_") and d[4:].isdigit()]
60
  if exp_folders:
61
  max_idx = max(int(d[4:]) for d in exp_folders)
@@ -63,9 +67,12 @@ if exp_folders:
63
  else:
64
  name_dir = "exp_0"
65
  output_dir = os.path.join(out_root, name_dir)
 
66
 
67
  if not os.path.exists(output_dir):
68
  os.makedirs(output_dir)
 
 
69
  if not os.path.exists("heatmap"):
70
  os.makedirs("heatmap")
71
  if not os.path.exists("heatmap/average_heatmaps"):
@@ -74,7 +81,19 @@ source_image = None
74
  history_tensors = {
75
  "source img": torch.zeros((1, 1, 1)),
76
  "prev img": torch.zeros((1, 1, 1))}
77
- instructions = ['source']
 
 
 
 
 
 
 
 
 
 
 
 
78
 
79
 
80
  @torch.inference_mode()
@@ -84,9 +103,11 @@ def reset():
84
  if os.path.exists("history_gradio/history.safetensors"):
85
  os.remove("history_gradio/history.safetensors")
86
 
87
- global out_root, output_dir, history_tensors, source_image, instructions
88
  if not os.path.exists(out_root):
89
  os.makedirs(out_root)
 
 
90
  exp_folders = [d for d in os.listdir(out_root) if d.startswith("exp_") and d[4:].isdigit()]
91
  if exp_folders:
92
  max_idx = max(int(d[4:]) for d in exp_folders)
@@ -94,14 +115,17 @@ def reset():
94
  else:
95
  name_dir = "exp_0"
96
  output_dir = os.path.join(out_root, name_dir)
 
97
 
98
  if not os.path.exists(output_dir):
99
  os.makedirs(output_dir)
 
 
100
  if not os.path.exists("heatmap"):
101
  os.makedirs("heatmap")
102
  if not os.path.exists("heatmap/average_heatmaps"):
103
  os.makedirs("heatmap/average_heatmaps")
104
- instructions = ['source']
105
  source_image = None
106
  history_tensors = {
107
  "source img": torch.zeros((1, 1, 1)),
@@ -111,7 +135,8 @@ def reset():
111
  traget_prompt = "(Required) Describe the desired content of the edited image."
112
  gallery = None
113
  output_image = None
114
- return source_prompt, traget_prompt, gallery, output_image
 
115
 
116
 
117
  @torch.inference_mode()
@@ -145,7 +170,7 @@ def generate_image(
145
  init_image=None,
146
  image2image_strength=0.0,
147
  ):
148
- global ae, t5, clip, model, name, is_schnell, output_dir, add_sampling_metadata, offload, history_tensors
149
  device = "cuda" if torch.cuda.is_available() else "cpu"
150
  torch.cuda.empty_cache()
151
  seed = None
@@ -250,9 +275,14 @@ def generate_image(
250
  img.save(filename, format="jpeg", exif=exif_data, quality=95, subsampling=0)
251
  instructions = [prompt]
252
 
 
 
 
 
253
  #-------------------- 6.4 save editing prompt, update gradio component: gallery ----------------------#
254
  img_and_prompt = []
255
  history_imgs = sorted(os.listdir(output_dir))
 
256
  for img_file, prompt_txt in zip(history_imgs, instructions):
257
  img_and_prompt.append((os.path.join(output_dir, img_file), prompt_txt))
258
  history_gallery = gr.Gallery(value=img_and_prompt, label="History Image", interactive=True, columns=3)
@@ -262,7 +292,7 @@ def generate_image(
262
  @spaces.GPU(duration=200)
263
  @torch.inference_mode()
264
  def edit(init_image, source_prompt, target_prompt, editing_strategy, denoise_strategy, num_steps, guidance, attn_guidance_start_block, inject_step, init_image_2=None):
265
- global ae, t5, clip, model, name, is_schnell, output_dir, add_sampling_metadata, offload, source_image, history_tensors, instructions
266
 
267
  device = "cuda" if torch.cuda.is_available() else "cpu"
268
  torch.cuda.empty_cache()
@@ -276,6 +306,9 @@ def edit(init_image, source_prompt, target_prompt, editing_strategy, denoise_str
276
 
277
  if not any("round_0000" in fname for fname in os.listdir(output_dir)):
278
  Image.fromarray(init_image).save(os.path.join(output_dir,"round_0000_[source].jpg"))
 
 
 
279
 
280
  init_image = init_image[:new_h, :new_w, :]
281
  width, height = init_image.shape[0], init_image.shape[1]
@@ -436,7 +469,7 @@ def edit(init_image, source_prompt, target_prompt, editing_strategy, denoise_str
436
  else:
437
  idx = 1
438
  formatted_idx = str(idx).zfill(4) # Format as a 4-digit string
439
-
440
  #-------------------- 6.3 output name ----------------------#
441
  if denoise_strategy == 'multi_turn_consistent':
442
  denoise_strategy = 'MTC'
@@ -458,9 +491,14 @@ def edit(init_image, source_prompt, target_prompt, editing_strategy, denoise_str
458
  instructions.append(target_prompt)
459
  print("End Edit")
460
 
 
 
 
 
461
  #-------------------- 6.4 save editing prompt, update gradio component: gallery ----------------------#
462
  img_and_prompt = []
463
  history_imgs = sorted(os.listdir(output_dir))
 
464
  for img_file, prompt_txt in zip(history_imgs, instructions):
465
  img_and_prompt.append((os.path.join(output_dir, img_file), prompt_txt))
466
  history_gallery = gr.Gallery(value=img_and_prompt, label="History Image", interactive=True, columns=3)
@@ -486,7 +524,6 @@ def create_demo(model_name: str, device: str = "cuda" if torch.cuda.is_available
486
  # Pre-defined examples
487
  examples = [
488
  ["src/gradio_utils/gradio_examples/000000000011.jpg", "", "an eagle standing on the branch", ['attn_guidance'], 15, 3.5, 11, 0],
489
- ["src/gradio_utils/gradio_examples/221000000002.jpg", "", "a cat wearing a hat standing on the fence", ['attn_guidance'], 15, 3.5, 11, 0],
490
  ]
491
 
492
  with gr.Blocks() as demo:
@@ -531,7 +568,7 @@ def create_demo(model_name: str, device: str = "cuda" if torch.cuda.is_available
531
  inputs=[init_image, source_prompt, target_prompt, editing_strategy, denoise_strategy, num_steps, guidance, attn_guidance_start_block, inject_step, init_image_2],
532
  outputs=[output_image, gallery]
533
  )
534
- reset_btn.click(fn = reset, outputs=[source_prompt, target_prompt, gallery, output_image])
535
 
536
  # Add examples
537
  gr.Examples(
 
54
  os.remove("history_gradio/history.safetensors")
55
 
56
  out_root = 'src/gradio_utils/gradio_outputs'
57
+ out_root_prompt = 'src/gradio_utils/gradio_prompts'
58
  if not os.path.exists(out_root):
59
  os.makedirs(out_root)
60
+ if not os.path.exists(out_root_prompt):
61
+ os.makedirs(out_root_prompt)
62
+
63
  exp_folders = [d for d in os.listdir(out_root) if d.startswith("exp_") and d[4:].isdigit()]
64
  if exp_folders:
65
  max_idx = max(int(d[4:]) for d in exp_folders)
 
67
  else:
68
  name_dir = "exp_0"
69
  output_dir = os.path.join(out_root, name_dir)
70
+ output_prompt = os.path.join(out_root_prompt, name_dir)
71
 
72
  if not os.path.exists(output_dir):
73
  os.makedirs(output_dir)
74
+ if not os.path.exists(output_prompt):
75
+ os.makedirs(output_prompt)
76
  if not os.path.exists("heatmap"):
77
  os.makedirs("heatmap")
78
  if not os.path.exists("heatmap/average_heatmaps"):
 
81
  history_tensors = {
82
  "source img": torch.zeros((1, 1, 1)),
83
  "prev img": torch.zeros((1, 1, 1))}
84
+ instructions = ['']
85
+
86
+
87
+ def read_sorted_prompts(folder_path):
88
+ # List all .txt files and sort them
89
+ files = sorted([f for f in os.listdir(folder_path) if f.endswith('.txt')])
90
+ prompts = []
91
+ for filename in files:
92
+ file_path = os.path.join(folder_path, filename)
93
+ with open(file_path, 'r') as f:
94
+ prompt = f.read().strip()
95
+ prompts.append(prompt)
96
+ return prompts
97
 
98
 
99
  @torch.inference_mode()
 
103
  if os.path.exists("history_gradio/history.safetensors"):
104
  os.remove("history_gradio/history.safetensors")
105
 
106
+ global out_root, out_root_prompt, output_dir, output_prompt, history_tensors, source_image, instructions
107
  if not os.path.exists(out_root):
108
  os.makedirs(out_root)
109
+ if not os.path.exists(out_root_prompt):
110
+ os.makedirs(out_root_prompt)
111
  exp_folders = [d for d in os.listdir(out_root) if d.startswith("exp_") and d[4:].isdigit()]
112
  if exp_folders:
113
  max_idx = max(int(d[4:]) for d in exp_folders)
 
115
  else:
116
  name_dir = "exp_0"
117
  output_dir = os.path.join(out_root, name_dir)
118
+ output_prompt = os.path.join(out_root_prompt, name_dir)
119
 
120
  if not os.path.exists(output_dir):
121
  os.makedirs(output_dir)
122
+ if not os.path.exists(output_prompt):
123
+ os.makedirs(output_prompt)
124
  if not os.path.exists("heatmap"):
125
  os.makedirs("heatmap")
126
  if not os.path.exists("heatmap/average_heatmaps"):
127
  os.makedirs("heatmap/average_heatmaps")
128
+ instructions = ['']
129
  source_image = None
130
  history_tensors = {
131
  "source img": torch.zeros((1, 1, 1)),
 
135
  traget_prompt = "(Required) Describe the desired content of the edited image."
136
  gallery = None
137
  output_image = None
138
+ init_image = None
139
+ return source_prompt, traget_prompt, gallery, output_image, init_image
140
 
141
 
142
  @torch.inference_mode()
 
170
  init_image=None,
171
  image2image_strength=0.0,
172
  ):
173
+ global ae, t5, clip, model, name, is_schnell, output_dir, output_prompt, add_sampling_metadata, offload, history_tensors
174
  device = "cuda" if torch.cuda.is_available() else "cpu"
175
  torch.cuda.empty_cache()
176
  seed = None
 
275
  img.save(filename, format="jpeg", exif=exif_data, quality=95, subsampling=0)
276
  instructions = [prompt]
277
 
278
+ prompt_path = os.path.join(output_prompt, f"round_0000.txt")
279
+ with open(prompt_path, "w") as f:
280
+ f.write(prompt)
281
+
282
  #-------------------- 6.4 save editing prompt, update gradio component: gallery ----------------------#
283
  img_and_prompt = []
284
  history_imgs = sorted(os.listdir(output_dir))
285
+ instructions = read_sorted_prompts(output_prompt)
286
  for img_file, prompt_txt in zip(history_imgs, instructions):
287
  img_and_prompt.append((os.path.join(output_dir, img_file), prompt_txt))
288
  history_gallery = gr.Gallery(value=img_and_prompt, label="History Image", interactive=True, columns=3)
 
292
  @spaces.GPU(duration=200)
293
  @torch.inference_mode()
294
  def edit(init_image, source_prompt, target_prompt, editing_strategy, denoise_strategy, num_steps, guidance, attn_guidance_start_block, inject_step, init_image_2=None):
295
+ global ae, t5, clip, model, name, is_schnell, output_dir, output_prompt, add_sampling_metadata, offload, source_image, history_tensors, instructions
296
 
297
  device = "cuda" if torch.cuda.is_available() else "cpu"
298
  torch.cuda.empty_cache()
 
306
 
307
  if not any("round_0000" in fname for fname in os.listdir(output_dir)):
308
  Image.fromarray(init_image).save(os.path.join(output_dir,"round_0000_[source].jpg"))
309
+ prompt_path = os.path.join(output_prompt, f"round_0000.txt")
310
+ with open(prompt_path, "w") as f:
311
+ f.write('')
312
 
313
  init_image = init_image[:new_h, :new_w, :]
314
  width, height = init_image.shape[0], init_image.shape[1]
 
469
  else:
470
  idx = 1
471
  formatted_idx = str(idx).zfill(4) # Format as a 4-digit string
472
+ os.makedirs(output_prompt, exist_ok=True)
473
  #-------------------- 6.3 output name ----------------------#
474
  if denoise_strategy == 'multi_turn_consistent':
475
  denoise_strategy = 'MTC'
 
491
  instructions.append(target_prompt)
492
  print("End Edit")
493
 
494
+ prompt_path = os.path.join(output_prompt, f"round_{formatted_idx}.txt")
495
+ with open(prompt_path, "w") as f:
496
+ f.write(target_prompt)
497
+
498
  #-------------------- 6.4 save editing prompt, update gradio component: gallery ----------------------#
499
  img_and_prompt = []
500
  history_imgs = sorted(os.listdir(output_dir))
501
+ instructions = read_sorted_prompts(output_prompt)
502
  for img_file, prompt_txt in zip(history_imgs, instructions):
503
  img_and_prompt.append((os.path.join(output_dir, img_file), prompt_txt))
504
  history_gallery = gr.Gallery(value=img_and_prompt, label="History Image", interactive=True, columns=3)
 
524
  # Pre-defined examples
525
  examples = [
526
  ["src/gradio_utils/gradio_examples/000000000011.jpg", "", "an eagle standing on the branch", ['attn_guidance'], 15, 3.5, 11, 0],
 
527
  ]
528
 
529
  with gr.Blocks() as demo:
 
568
  inputs=[init_image, source_prompt, target_prompt, editing_strategy, denoise_strategy, num_steps, guidance, attn_guidance_start_block, inject_step, init_image_2],
569
  outputs=[output_image, gallery]
570
  )
571
+ reset_btn.click(fn = reset, outputs=[source_prompt, target_prompt, gallery, output_image, init_image])
572
 
573
  # Add examples
574
  gr.Examples(
src/flux/__pycache__/__init__.cpython-310.pyc CHANGED
Binary files a/src/flux/__pycache__/__init__.cpython-310.pyc and b/src/flux/__pycache__/__init__.cpython-310.pyc differ
 
src/flux/__pycache__/_version.cpython-310.pyc CHANGED
Binary files a/src/flux/__pycache__/_version.cpython-310.pyc and b/src/flux/__pycache__/_version.cpython-310.pyc differ
 
src/flux/__pycache__/math.cpython-310.pyc CHANGED
Binary files a/src/flux/__pycache__/math.cpython-310.pyc and b/src/flux/__pycache__/math.cpython-310.pyc differ
 
src/flux/__pycache__/model.cpython-310.pyc CHANGED
Binary files a/src/flux/__pycache__/model.cpython-310.pyc and b/src/flux/__pycache__/model.cpython-310.pyc differ
 
src/flux/__pycache__/sampling.cpython-310.pyc CHANGED
Binary files a/src/flux/__pycache__/sampling.cpython-310.pyc and b/src/flux/__pycache__/sampling.cpython-310.pyc differ
 
src/flux/__pycache__/util.cpython-310.pyc CHANGED
Binary files a/src/flux/__pycache__/util.cpython-310.pyc and b/src/flux/__pycache__/util.cpython-310.pyc differ
 
src/flux/modules/__pycache__/autoencoder.cpython-310.pyc CHANGED
Binary files a/src/flux/modules/__pycache__/autoencoder.cpython-310.pyc and b/src/flux/modules/__pycache__/autoencoder.cpython-310.pyc differ
 
src/flux/modules/__pycache__/conditioner.cpython-310.pyc CHANGED
Binary files a/src/flux/modules/__pycache__/conditioner.cpython-310.pyc and b/src/flux/modules/__pycache__/conditioner.cpython-310.pyc differ
 
src/flux/modules/__pycache__/layers.cpython-310.pyc CHANGED
Binary files a/src/flux/modules/__pycache__/layers.cpython-310.pyc and b/src/flux/modules/__pycache__/layers.cpython-310.pyc differ
 
src/gradio_utils/gradio_examples/221000000002.jpg DELETED

Git LFS Details

  • SHA256: 81e88f4b4c7211b9dd8e29aaa7f31e38e474f148dcc8d47ba772a7b3c59bbbb1
  • Pointer size: 131 Bytes
  • Size of remote file: 161 kB