paulpanwang commited on
Commit
7760d2d
·
verified ·
1 Parent(s): 72c5e64

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -133,3 +133,4 @@ gradio_cached_examples/23/3D[[:space:]]Gaussians[[:space:]]ply[[:space:]]format/
133
  gradio_cached_examples/23/3D[[:space:]]Gaussians[[:space:]]ply[[:space:]]format/e611dc2e9fbf1acd1cfc/_a[[:space:]]toy[[:space:]]robot..._013020.ply filter=lfs diff=lfs merge=lfs -text
134
  tmp/input_image.png filter=lfs diff=lfs merge=lfs -text
135
  wheel/diff_gaussian_rasterization-0.0.0-cp310-cp310-linux_x86_64.whl filter=lfs diff=lfs merge=lfs -text
 
 
133
  gradio_cached_examples/23/3D[[:space:]]Gaussians[[:space:]]ply[[:space:]]format/e611dc2e9fbf1acd1cfc/_a[[:space:]]toy[[:space:]]robot..._013020.ply filter=lfs diff=lfs merge=lfs -text
134
  tmp/input_image.png filter=lfs diff=lfs merge=lfs -text
135
  wheel/diff_gaussian_rasterization-0.0.0-cp310-cp310-linux_x86_64.whl filter=lfs diff=lfs merge=lfs -text
136
+ gradio_cached_examples/23/3D[[:space:]]Gaussians[[:space:]]ply[[:space:]]format/a6d4bba77a6f190743e6/_a[[:space:]]toy[[:space:]]robot..._013020.ply filter=lfs diff=lfs merge=lfs -text
app.py CHANGED
@@ -13,63 +13,70 @@ current_path = os.path.dirname(os.path.abspath(__file__))
13
  MAX_SEED = np.iinfo(np.int32).max
14
  TMP_DIR = os.path.join(current_path, 'out')
15
  os.makedirs(TMP_DIR, exist_ok=True)
16
- TAG = "gsdiff_gobj83k_sd15__render"
17
- # os.system("cd extensions/RaDe-GS/submodules && pip3 install diff-gaussian-rasterization")
18
- # os.system("cd extensions/RaDe-GS/submodules/diff-gaussian-rasterization && python3 setup.py bdist_wheel ")
19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
- # download checkpoints
22
- # subprocess.run(shlex.split("python3 download_ckpt.py --model_type pas")) # for txt condition
23
- # subprocess.run(shlex.split("python3 download_ckpt.py --model_type pas --image_cond")) # for img condition
24
 
25
- # img_commands = "PYTHONPATH=./ bash scripts/infer.sh src/infer_gsdiff_pas.py configs/gsdiff_pas.yaml {} \
26
- # --rembg_and_center --triangle_cfg_scaling --save_ply --output_video_type mp4 --guidance_scale {} \
27
- # --image_path {} --elevation {} --prompt {} --seed {}"
28
 
29
- # txt_commands = "PYTHONPATH=./ bash scripts/infer.sh src/infer_gsdiff_pas.py configs/gsdiff_pas.yaml \
 
 
 
 
30
  # --rembg_and_center --triangle_cfg_scaling --save_ply --output_video_type mp4 --guidance_scale {} \
31
  # --image_path {} --elevation {} --prompt {} --seed {}"
32
 
33
- # SD1.5
34
- subprocess.run(shlex.split("python3 download_ckpt.py --model_type sd15")) # for txt condition
35
- # subprocess.run(shlex.split("python3 download_ckpt.py --model_type sd15 --image_cond")) # for img condition
36
- img_commands = "PYTHONPATH=./ bash scripts/infer.sh src/infer_gsdiff_sd.py configs/gsdiff_sd15.yaml {} \
37
- --rembg_and_center --triangle_cfg_scaling --save_ply --output_video_type mp4 --guidance_scale {} \
38
- --image_path {} --elevation {} --prompt {} --seed {}"
39
-
40
- txt_commands = "PYTHONPATH=./ bash scripts/infer.sh src/infer_gsdiff_sd.py configs/gsdiff_sd15.yaml {} \
41
- --rembg_and_center --save_ply --output_video_type mp4 --guidance_scale {} \
42
- --elevation {} --prompt {} --seed {}"
43
 
44
 
45
 
46
  # process function
47
  @spaces.GPU
48
- def process(input_image, prompt='', prompt_neg='ugly, blurry, pixelated obscure, unnatural colors, poor lighting, dull, unclear, cropped, lowres, low quality, artifacts, duplicate', input_elevation=20, guidance_scale=2., input_seed=0):
49
- subprocess.run("cd extensions/RaDe-GS/submodules && pip3 install diff-gaussian-rasterization", shell=True)
50
- subprocess.run("cd extensions/RaDe-GS/submodules/diff-gaussian-rasterization && python3 setup.py bdist_wheel ", shell=True)
51
-
52
  if input_image is not None:
53
  image_path = os.path.join(TMP_DIR, "input_image.png")
54
  image_name = image_path.split('/')[-1].split('.')[0] + "_rgba"
55
  input_image.save(image_path)
56
- full_command = img_commands.format(TAG, guidance_scale, image_path, input_elevation, prompt, input_seed)
 
57
  else:
58
- full_command = txt_commands.format(TAG, guidance_scale, input_elevation, prompt, input_seed)
 
 
59
  image_name = ""
60
 
61
  os.system(full_command)
62
 
63
  # save video and ply files
64
- ckpt_dir = os.path.join(TMP_DIR, TAG, "checkpoints")
65
  infer_from_iter = int(sorted(os.listdir(ckpt_dir))[-1])
66
  MAX_NAME_LEN = 20 # TODO: make `20` configurable
67
  prompt = prompt.replace("_", " ")
68
  prompt_name = prompt[:MAX_NAME_LEN] + "..." if prompt[:MAX_NAME_LEN] != "" else prompt
69
  name = f"[{image_name}]_[{prompt_name}]_{infer_from_iter:06d}"
70
- output_video_path = os.path.join(TMP_DIR, TAG, "inference", name + ".mp4")
71
- output_ply_path = os.path.join(TMP_DIR, TAG, "inference", name + ".ply")
72
- output_img_path = os.path.join(TMP_DIR, TAG, "inference", name + "_gs.png")
73
 
74
  logger.info(full_command, output_video_path, output_ply_path)
75
 
@@ -116,7 +123,7 @@ with block:
116
  guidance_scale = gr.Slider(label="guidance scale", minimum=1., maximum=7.5, step=0.5, value=2.0)
117
 
118
  # elevation
119
- input_elevation = gr.Slider(label="elevation", minimum=-90, maximum=90, step=1, value=20)
120
  # # inference steps
121
  # input_num_steps = gr.Slider(label="inference steps", minimum=1, maximum=100, step=1, value=30)
122
  # random seed
@@ -125,7 +132,7 @@ with block:
125
  button_gen = gr.Button("Generate")
126
 
127
 
128
- with gr.Column(scale=1):
129
  with gr.Tab("Video"):
130
  # final video results
131
  output_video = gr.Video(label="video")
@@ -153,8 +160,8 @@ with block:
153
  gr.Examples(
154
  examples=[
155
  "a_toy_robot",
156
- # "a_cute_panda",
157
- # "a_book"
158
  ],
159
  inputs=[input_text],
160
  outputs=[output_image, output_video, output_file],
 
13
  MAX_SEED = np.iinfo(np.int32).max
14
  TMP_DIR = os.path.join(current_path, 'out')
15
  os.makedirs(TMP_DIR, exist_ok=True)
 
 
 
16
 
17
+ TAG = {
18
+ "SD15": ["gsdiff_gobj83k_sd15__render", "gsdiff_gobj83k_sd15_image__render"], # Best efficiency
19
+ "PixArt-Sigma": ["gsdiff_gobj83k_pas_fp16__render","gsdiff_gobj83k_pas_fp16_image__render"],
20
+ "SD3": ["gsdiff_gobj83k_sd35m__render", "gsdiff_gobj83k_sd35m_image__render"] # Best performance
21
+ }
22
+ MODEL_TYPE = "PixArt-Sigma"
23
+
24
+ # for PixArt-Sigma
25
+ subprocess.run(shlex.split("python3 download_ckpt.py --model_type pas")) # for txt condition
26
+ subprocess.run(shlex.split("python3 download_ckpt.py --model_type pas --image_cond")) # for img condition
27
+ img_commands = "PYTHONPATH=./ bash scripts/infer.sh src/infer_gsdiff_pas.py configs/gsdiff_pas.yaml {} \
28
+ --rembg_and_center --triangle_cfg_scaling --save_ply --output_video_type mp4 --guidance_scale {} \
29
+ --image_path {} --elevation {} --prompt {} --seed {}"
30
 
31
+ txt_commands = "PYTHONPATH=./ bash scripts/infer.sh src/infer_gsdiff_pas.py configs/gsdiff_pas.yaml {} \
32
+ --save_ply --output_video_type mp4 \
33
+ --prompt {} --seed {}"
34
 
 
 
 
35
 
36
+ # for SD1.5
37
+ # subprocess.run(shlex.split("python3 download_ckpt.py --model_type sd15")) # for txt condition
38
+ # subprocess.run(shlex.split("python3 download_ckpt.py --model_type sd15 --image_cond")) # for img condition
39
+
40
+ # img_commands = "PYTHONPATH=./ bash scripts/infer.sh src/infer_gsdiff_sd.py configs/gsdiff_sd15.yaml {} \
41
  # --rembg_and_center --triangle_cfg_scaling --save_ply --output_video_type mp4 --guidance_scale {} \
42
  # --image_path {} --elevation {} --prompt {} --seed {}"
43
 
44
+ # txt_commands = "PYTHONPATH=./ bash scripts/infer.sh src/infer_gsdiff_sd.py configs/gsdiff_sd15.yaml {} \
45
+ # --save_ply --output_video_type mp4 --guidance_scale {} \
46
+ # --elevation {} --prompt {} --seed {}"
 
 
 
 
 
 
 
47
 
48
 
49
 
50
  # process function
51
  @spaces.GPU
52
+ def process(input_image, prompt='a_high_quality_3D_asset', prompt_neg='poor_quality', input_elevation=20, guidance_scale=2., input_seed=0):
53
+ # subprocess.run("cd extensions/RaDe-GS/submodules && pip3 install diff-gaussian-rasterization", shell=True)
54
+ # subprocess.run("cd extensions/RaDe-GS/submodules/diff-gaussian-rasterization && python3 setup.py bdist_wheel ", shell=True)
55
+
56
  if input_image is not None:
57
  image_path = os.path.join(TMP_DIR, "input_image.png")
58
  image_name = image_path.split('/')[-1].split('.')[0] + "_rgba"
59
  input_image.save(image_path)
60
+ TAG_DEST = TAG[MODEL_TYPE][1]
61
+ full_command = img_commands.format(TAG_DEST, guidance_scale, image_path, input_elevation, prompt, input_seed)
62
  else:
63
+ TAG_DEST = TAG[MODEL_TYPE][0]
64
+ # without guidance_scale and input_elevation
65
+ full_command = txt_commands.format(TAG_DEST, prompt, input_seed)
66
  image_name = ""
67
 
68
  os.system(full_command)
69
 
70
  # save video and ply files
71
+ ckpt_dir = os.path.join(TMP_DIR, TAG_DEST, "checkpoints")
72
  infer_from_iter = int(sorted(os.listdir(ckpt_dir))[-1])
73
  MAX_NAME_LEN = 20 # TODO: make `20` configurable
74
  prompt = prompt.replace("_", " ")
75
  prompt_name = prompt[:MAX_NAME_LEN] + "..." if prompt[:MAX_NAME_LEN] != "" else prompt
76
  name = f"[{image_name}]_[{prompt_name}]_{infer_from_iter:06d}"
77
+ output_video_path = os.path.join(TMP_DIR, TAG_DEST, "inference", name + ".mp4")
78
+ output_ply_path = os.path.join(TMP_DIR, TAG_DEST, "inference", name + ".ply")
79
+ output_img_path = os.path.join(TMP_DIR, TAG_DEST, "inference", name + "_gs.png")
80
 
81
  logger.info(full_command, output_video_path, output_ply_path)
82
 
 
123
  guidance_scale = gr.Slider(label="guidance scale", minimum=1., maximum=7.5, step=0.5, value=2.0)
124
 
125
  # elevation
126
+ input_elevation = gr.Slider(label="elevation", minimum=-90, maximum=90, step=1, value=10)
127
  # # inference steps
128
  # input_num_steps = gr.Slider(label="inference steps", minimum=1, maximum=100, step=1, value=30)
129
  # random seed
 
132
  button_gen = gr.Button("Generate")
133
 
134
 
135
+ with gr.Column(scale=0.8):
136
  with gr.Tab("Video"):
137
  # final video results
138
  output_video = gr.Video(label="video")
 
160
  gr.Examples(
161
  examples=[
162
  "a_toy_robot",
163
+ "a_cute_panda",
164
+ "an_ancient_leather-bound_book"
165
  ],
166
  inputs=[input_text],
167
  outputs=[output_image, output_video, output_file],
gradio_cached_examples/23/3D Gaussians ply format/a6d4bba77a6f190743e6/_a toy robot..._013020.ply ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3eb386a0bd35349d3271f7ac5431b26af1f8549b7a8a16c10af894877bb22c09
3
+ size 1476719
gradio_cached_examples/23/component 0/e4f43cec7e020a66d596/image.webp ADDED
gradio_cached_examples/23/log.csv CHANGED
@@ -1,4 +1,2 @@
1
  component 0,video,3D Gaussians (ply format),flag,username,timestamp
2
- "{""path"": ""gradio_cached_examples/23/component 0/60e43731ac393e3fd280/image.webp"", ""url"": ""/file=/tmp/gradio/dbf713c255683bb9a0817141f85a0a122fcfda997c9ee184c11cadc4ec699dbf/image.webp"", ""size"": null, ""orig_name"": ""image.webp"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}","{""video"": {""path"": ""gradio_cached_examples/23/video/a573fa23b5f2b0944aef/_a toy robot..._013020.mp4"", ""url"": ""/file=/tmp/gradio/93e4187e3359a1457f501416c2ee86ed9e0ab485007ff0784cd226ac2f6e8e0b/_a toy robot..._013020.mp4"", ""size"": null, ""orig_name"": ""[]_[a toy robot...]_013020.mp4"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}, ""subtitles"": null}","{""path"": ""gradio_cached_examples/23/3D Gaussians ply format/e611dc2e9fbf1acd1cfc/_a toy robot..._013020.ply"", ""url"": ""/file=/tmp/gradio/3eb386a0bd35349d3271f7ac5431b26af1f8549b7a8a16c10af894877bb22c09/_a toy robot..._013020.ply"", ""size"": 1476719, ""orig_name"": ""[]_[a toy robot...]_013020.ply"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}",,,2025-02-05 17:53:34.441679
3
- "{""path"": ""gradio_cached_examples/23/component 0/c4591e8b87a757447a47/image.webp"", ""url"": ""/file=/tmp/gradio/7879af8e96c9236c48faefd790d084d712f2f5f45fbfe2dc54798540be16479f/image.webp"", ""size"": null, ""orig_name"": ""image.webp"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}","{""video"": {""path"": ""gradio_cached_examples/23/video/8e15e0239b0523adb9ea/_a cute panda..._013020.mp4"", ""url"": ""/file=/tmp/gradio/f9588f3f7218e15a80c4c8393b1335b3d70cbedc1f9ff91347daae9945325bd7/_a cute panda..._013020.mp4"", ""size"": null, ""orig_name"": ""[]_[a cute panda...]_013020.mp4"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}, ""subtitles"": null}","{""path"": ""gradio_cached_examples/23/3D Gaussians ply format/743ad8490995e372c5f3/_a cute panda..._013020.ply"", ""url"": ""/file=/tmp/gradio/2ad3fb46683cdbfdb83050203762230a7a3d4889bb133f4376b8da0e3119fd1d/_a cute panda..._013020.ply"", ""size"": 2347854, ""orig_name"": ""[]_[a cute panda...]_013020.ply"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}",,,2025-02-05 17:54:24.100453
4
- "{""path"": ""gradio_cached_examples/23/component 0/31e3ecdad1b51eb1d11e/image.webp"", ""url"": ""/file=/tmp/gradio/0381c7b9d1a19b575d2568376ee9bfa18292716cc1e45ae95bb4528ca92557d6/image.webp"", ""size"": null, ""orig_name"": ""image.webp"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}","{""video"": {""path"": ""gradio_cached_examples/23/video/09491404fda7bfb0eab7/_a book..._013020.mp4"", ""url"": ""/file=/tmp/gradio/3d8e3fa54c906aff498f443284df7d5b2e9e9e8018558ea4f3199074aa342813/_a book..._013020.mp4"", ""size"": null, ""orig_name"": ""[]_[a book...]_013020.mp4"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}, ""subtitles"": null}","{""path"": ""gradio_cached_examples/23/3D Gaussians ply format/1174ce83af0bcabb215a/_a book..._013020.ply"", ""url"": ""/file=/tmp/gradio/191f72e3e00089869450ba9fe70cab622e94089489c0cf5104a724f2fa98d5ee/_a book..._013020.ply"", ""size"": 2224072, ""orig_name"": ""[]_[a book...]_013020.ply"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}",,,2025-02-05 17:55:14.451860
 
1
  component 0,video,3D Gaussians (ply format),flag,username,timestamp
2
+ "{""path"": ""gradio_cached_examples/23/component 0/e4f43cec7e020a66d596/image.webp"", ""url"": ""/file=/tmp/gradio/dbf713c255683bb9a0817141f85a0a122fcfda997c9ee184c11cadc4ec699dbf/image.webp"", ""size"": null, ""orig_name"": ""image.webp"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}","{""video"": {""path"": ""gradio_cached_examples/23/video/3f82eb6a8d5e1cc88e28/_a toy robot..._013020.mp4"", ""url"": ""/file=/tmp/gradio/93e4187e3359a1457f501416c2ee86ed9e0ab485007ff0784cd226ac2f6e8e0b/_a toy robot..._013020.mp4"", ""size"": null, ""orig_name"": ""[]_[a toy robot...]_013020.mp4"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}, ""subtitles"": null}","{""path"": ""gradio_cached_examples/23/3D Gaussians ply format/a6d4bba77a6f190743e6/_a toy robot..._013020.ply"", ""url"": ""/file=/tmp/gradio/3eb386a0bd35349d3271f7ac5431b26af1f8549b7a8a16c10af894877bb22c09/_a toy robot..._013020.ply"", ""size"": 1476719, ""orig_name"": ""[]_[a toy robot...]_013020.ply"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}",,,2025-02-06 12:39:16.678627
 
 
gradio_cached_examples/23/video/3f82eb6a8d5e1cc88e28/_a toy robot..._013020.mp4 ADDED
Binary file (68 kB). View file
 
requirements.txt CHANGED
@@ -29,4 +29,6 @@ spaces
29
  triton
30
  wandb
31
  opencv-python==4.11.0.86
 
 
32
 
 
29
  triton
30
  wandb
31
  opencv-python==4.11.0.86
32
+ https://huggingface.co/spaces/JeffreyXiang/TRELLIS/resolve/main/wheels/diff_gaussian_rasterization-0.0.0-cp310-cp310-linux_x86_64.whl?download=true
33
+
34
 
src/infer_gsdiff_sd.py CHANGED
@@ -347,13 +347,6 @@ def main():
347
  level=logging.INFO
348
  )
349
  logger = logging.getLogger(__name__)
350
- # file_handler = logging.FileHandler(os.path.join(args.output_dir, args.tag, "log_infer.txt")) # output to file
351
- # file_handler.setFormatter(logging.Formatter(
352
- # fmt="%(asctime)s - %(message)s",
353
- # datefmt="%Y/%m/%d %H:%M:%S"
354
- # ))
355
- # logger.addHandler(file_handler)
356
- # logger.propagate = True # propagate to the root logger (console)
357
 
358
  # Set the random seed
359
  if args.seed >= 0:
@@ -368,7 +361,7 @@ def main():
368
  if (args.image_path is not None or args.image_dir is not None) and args.load_pretrained_controlnet is None:
369
  opt.prediction_type = "v_prediction"
370
  opt.view_concat_condition = True
371
- opt.input_concat_binary_mask = False
372
  if args.guidance_scale > 3.:
373
  logger.info(
374
  f"WARNING: guidance scale ({args.guidance_scale}) is too large for image-conditioned models. " +
 
347
  level=logging.INFO
348
  )
349
  logger = logging.getLogger(__name__)
 
 
 
 
 
 
 
350
 
351
  # Set the random seed
352
  if args.seed >= 0:
 
361
  if (args.image_path is not None or args.image_dir is not None) and args.load_pretrained_controlnet is None:
362
  opt.prediction_type = "v_prediction"
363
  opt.view_concat_condition = True
364
+ opt.input_concat_binary_mask = True
365
  if args.guidance_scale > 3.:
366
  logger.info(
367
  f"WARNING: guidance scale ({args.guidance_scale}) is too large for image-conditioned models. " +
src/models/gs_render/gs_renderer.py CHANGED
@@ -104,9 +104,9 @@ class GaussianRenderer:
104
  self.opt.vis_coords or self.opt.vis_normals, # whether render depth & normal
105
  )
106
  images[i, j] = render_results["image"]
107
- alphas[i, j] = render_results["alpha"]
108
- depths[i, j] = render_results["depth"]
109
- normals[i, j] = render_results["normal"]
110
 
111
  if not isinstance(bg_color, Tensor):
112
  bg_color = torch.tensor(list(bg_color), dtype=torch.float32, device=device)
 
104
  self.opt.vis_coords or self.opt.vis_normals, # whether render depth & normal
105
  )
106
  images[i, j] = render_results["image"]
107
+ # alphas[i, j] = render_results["alpha"]
108
+ # depths[i, j] = render_results["depth"]
109
+ # normals[i, j] = render_results["normal"]
110
 
111
  if not isinstance(bg_color, Tensor):
112
  bg_color = torch.tensor(list(bg_color), dtype=torch.float32, device=device)
src/models/gs_render/gs_util.py CHANGED
@@ -179,12 +179,13 @@ def render(
179
  bg_color = bg_color.to(C2W.device, dtype=torch.float32)
180
 
181
  pc = pc.to(dtype=torch.float32)
182
-
183
  raster_settings = GaussianRasterizationSettings(
184
  image_height=int(viewpoint_camera.h),
185
  image_width=int(viewpoint_camera.w),
186
  tanfovx=viewpoint_camera.tanfovX,
187
  tanfovy=viewpoint_camera.tanfovY,
 
188
  kernel_size=0., # cf. Mip-Splatting; not used
189
  bg=bg_color,
190
  scale_modifier=scaling_modifier,
@@ -194,13 +195,13 @@ def render(
194
  campos=viewpoint_camera.camera_center,
195
  prefiltered=False,
196
  debug=False,
197
- # cf. RaDe-GS
198
- require_depth=render_dn,
199
- require_coord=False,
200
  )
 
 
201
  rasterizer = GaussianRasterizer(raster_settings=raster_settings)
202
-
203
- image, _, _, _, depth, _, alpha, normal = rasterizer( # not used: radii, coord, mcoord, mdepth
204
  means3D=pc.xyz,
205
  means2D=torch.zeros_like(pc.xyz, dtype=torch.float32, device=pc.xyz.device),
206
  shs=None,
 
179
  bg_color = bg_color.to(C2W.device, dtype=torch.float32)
180
 
181
  pc = pc.to(dtype=torch.float32)
182
+ subpixel_offset = torch.zeros((int(viewpoint_camera.h), int(viewpoint_camera.w), 2), dtype=torch.float32, device="cuda")
183
  raster_settings = GaussianRasterizationSettings(
184
  image_height=int(viewpoint_camera.h),
185
  image_width=int(viewpoint_camera.w),
186
  tanfovx=viewpoint_camera.tanfovX,
187
  tanfovy=viewpoint_camera.tanfovY,
188
+ subpixel_offset=subpixel_offset,
189
  kernel_size=0., # cf. Mip-Splatting; not used
190
  bg=bg_color,
191
  scale_modifier=scaling_modifier,
 
195
  campos=viewpoint_camera.camera_center,
196
  prefiltered=False,
197
  debug=False,
198
+
 
 
199
  )
200
+ alpha = normal = depth = None
201
+
202
  rasterizer = GaussianRasterizer(raster_settings=raster_settings)
203
+ # Rasterize visible Gaussians to image, obtain their radii (on screen).
204
+ image, radii = rasterizer( # not used: radii, coord, mcoord, mdepth
205
  means3D=pc.xyz,
206
  means2D=torch.zeros_like(pc.xyz, dtype=torch.float32, device=pc.xyz.device),
207
  shs=None,