vaibhavpandeyvpz commited on
Commit
e6dc4b9
·
1 Parent(s): c044b0a

Set up Gradio app for Trellis

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +4 -0
  2. .gitignore +49 -0
  3. README.md +60 -2
  4. app.py +422 -4
  5. assets/images/gun1.webp +3 -0
  6. assets/images/gun2.webp +3 -0
  7. requirements.txt +28 -0
  8. trellis/__init__.py +6 -0
  9. trellis/models/__init__.py +70 -0
  10. trellis/models/sparse_structure_flow.py +200 -0
  11. trellis/models/sparse_structure_vae.py +306 -0
  12. trellis/models/structured_latent_flow.py +262 -0
  13. trellis/models/structured_latent_vae/__init__.py +4 -0
  14. trellis/models/structured_latent_vae/base.py +117 -0
  15. trellis/models/structured_latent_vae/decoder_gs.py +122 -0
  16. trellis/models/structured_latent_vae/decoder_mesh.py +167 -0
  17. trellis/models/structured_latent_vae/decoder_rf.py +104 -0
  18. trellis/models/structured_latent_vae/encoder.py +72 -0
  19. trellis/modules/attention/__init__.py +36 -0
  20. trellis/modules/attention/full_attn.py +140 -0
  21. trellis/modules/attention/modules.py +146 -0
  22. trellis/modules/norm.py +25 -0
  23. trellis/modules/sparse/__init__.py +102 -0
  24. trellis/modules/sparse/attention/__init__.py +4 -0
  25. trellis/modules/sparse/attention/full_attn.py +215 -0
  26. trellis/modules/sparse/attention/modules.py +139 -0
  27. trellis/modules/sparse/attention/serialized_attn.py +193 -0
  28. trellis/modules/sparse/attention/windowed_attn.py +135 -0
  29. trellis/modules/sparse/basic.py +459 -0
  30. trellis/modules/sparse/conv/__init__.py +21 -0
  31. trellis/modules/sparse/conv/conv_spconv.py +80 -0
  32. trellis/modules/sparse/conv/conv_torchsparse.py +38 -0
  33. trellis/modules/sparse/linear.py +15 -0
  34. trellis/modules/sparse/nonlinearity.py +35 -0
  35. trellis/modules/sparse/norm.py +58 -0
  36. trellis/modules/sparse/spatial.py +110 -0
  37. trellis/modules/sparse/transformer/__init__.py +2 -0
  38. trellis/modules/sparse/transformer/blocks.py +151 -0
  39. trellis/modules/sparse/transformer/modulated.py +166 -0
  40. trellis/modules/spatial.py +48 -0
  41. trellis/modules/transformer/__init__.py +2 -0
  42. trellis/modules/transformer/blocks.py +182 -0
  43. trellis/modules/transformer/modulated.py +157 -0
  44. trellis/modules/utils.py +54 -0
  45. trellis/pipelines/__init__.py +24 -0
  46. trellis/pipelines/base.py +66 -0
  47. trellis/pipelines/samplers/__init__.py +2 -0
  48. trellis/pipelines/samplers/base.py +20 -0
  49. trellis/pipelines/samplers/classifier_free_guidance_mixin.py +12 -0
  50. trellis/pipelines/samplers/flow_euler.py +199 -0
.gitattributes CHANGED
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.webp filter=lfs diff=lfs merge=lfs -text
37
+ *.png filter=lfs diff=lfs merge=lfs -text
38
+ *.jpg filter=lfs diff=lfs merge=lfs -text
39
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ *.so
6
+ .Python
7
+ build/
8
+ develop-eggs/
9
+ dist/
10
+ downloads/
11
+ eggs/
12
+ .eggs/
13
+ lib/
14
+ lib64/
15
+ parts/
16
+ sdist/
17
+ var/
18
+ wheels/
19
+ *.egg-info/
20
+ .installed.cfg
21
+ *.egg
22
+
23
+ # Virtual environments
24
+ venv/
25
+ env/
26
+ ENV/
27
+ .venv
28
+
29
+ # IDE
30
+ .vscode/
31
+ .idea/
32
+ *.swp
33
+ *.swo
34
+ *~
35
+ .DS_Store
36
+
37
+ # Temporary files
38
+ tmp/
39
+ *.tmp
40
+ *.log
41
+
42
+ # Model cache (if any)
43
+ .cache/
44
+ *.pth
45
+ *.pt
46
+ *.ckpt
47
+
48
+ # Jupyter
49
+ .ipynb_checkpoints/
README.md CHANGED
@@ -8,7 +8,65 @@ sdk_version: 6.1.0
8
  app_file: app.py
9
  pinned: false
10
  license: mit
11
- short_description: Space to work with microsoft/TRELLIS image to 3D model.
 
12
  ---
13
 
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  app_file: app.py
9
  pinned: false
10
  license: mit
11
+ short_description: Convert image to high-quality 3D model via microsoft/TRELLIS
12
+ hardware: zero-gpu-t4
13
  ---
14
 
15
+ # TRELLIS Image to 3D
16
+
17
+ Convert 2D images into high-quality 3D models using [TRELLIS](https://trellis3d.github.io/), Microsoft's scalable and versatile 3D generation model.
18
+
19
+ ## 🚀 Features
20
+
21
+ - **Single Image to 3D**: Generate 3D models from a single input image
22
+ - **Multi-Image Support**: Use multiple views of an object for better reconstruction (experimental)
23
+ - **Multiple Output Formats**:
24
+ - GLB files for use in 3D applications and game engines
25
+ - Gaussian Splatting (.ply) files for advanced rendering
26
+ - **Interactive 3D Viewer**: Preview your generated models directly in the browser
27
+ - **Automatic Background Removal**: Uses alpha channel or automatic background removal
28
+ - **Configurable Generation**: Adjust sampling steps and guidance strength for fine-tuned results
29
+
30
+ ## 📖 How to Use
31
+
32
+ 1. **Upload an Image**: Click on the image input area and select an image, or choose from the example images below
33
+ 2. **Configure Settings** (optional): Expand "Generation Settings" to adjust:
34
+ - Seed for reproducibility
35
+ - Sparse Structure Generation parameters (Stage 1)
36
+ - Structured Latent Generation parameters (Stage 2)
37
+ 3. **Generate**: Click "Generate & Extract GLB" to create your 3D model
38
+ 4. **Download**: Once generation is complete, download the GLB file or extract Gaussian splatting data
39
+
40
+ ## 💡 Tips for Best Results
41
+
42
+ - **Image Quality**: Use clear, well-lit images with good contrast
43
+ - **Alpha Channel**: Images with transparent backgrounds (alpha channel) work best
44
+ - **Object Focus**: Ensure the main object is clearly visible and centered
45
+ - **Background**: The tool automatically removes backgrounds if no alpha channel is present
46
+
47
+ ## 🔧 Technical Details
48
+
49
+ - **Model**: [microsoft/TRELLIS](https://huggingface.co/microsoft/TRELLIS)
50
+ - **Hardware**: ZeroGPU (T4) - GPU resources are allocated on-demand
51
+ - **Processing Time**: Typically 2-5 minutes depending on image complexity and GPU availability
52
+
53
+ ## 📚 Resources
54
+
55
+ - [TRELLIS Project Page](https://trellis3d.github.io/)
56
+ - [Paper](https://huggingface.co/papers/2412.01506)
57
+ - [Model Card](https://huggingface.co/microsoft/TRELLIS)
58
+
59
+ ## 📝 Output Formats
60
+
61
+ - **GLB**: Universal 3D format compatible with most 3D software, game engines, and web viewers
62
+ - **Gaussian Splatting (.ply)**: Advanced point-based representation for high-quality rendering
63
+
64
+ ## ⚠️ Notes
65
+
66
+ - Multi-image mode is experimental and may not produce optimal results for all image sets
67
+ - Gaussian splatting files can be large (~50MB) and may take time to download
68
+ - Processing requires GPU resources - you may need to wait if all GPUs are in use
69
+
70
+ ---
71
+
72
+ Built with [Gradio](https://gradio.app/) and powered by [Hugging Face Spaces](https://huggingface.co/spaces)
app.py CHANGED
@@ -1,7 +1,425 @@
1
  import gradio as gr
 
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
 
 
 
 
 
 
 
 
 
 
5
 
6
- demo = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import spaces
3
+ from gradio_litmodel3d import LitModel3D
4
 
5
+ import os
6
+ import shutil
7
+ os.environ['SPCONV_ALGO'] = 'native'
8
+ from typing import *
9
+ import torch
10
+ import numpy as np
11
+ import imageio
12
+ from easydict import EasyDict as edict
13
+ from PIL import Image
14
+ from trellis.pipelines import TrellisImageTo3DPipeline
15
+ from trellis.representations import Gaussian, MeshExtractResult
16
+ from trellis.utils import render_utils, postprocessing_utils
17
 
18
+
19
+ MAX_SEED = np.iinfo(np.int32).max
20
+ TMP_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tmp')
21
+ os.makedirs(TMP_DIR, exist_ok=True)
22
+
23
+
24
+ def start_session(req: gr.Request):
25
+ user_dir = os.path.join(TMP_DIR, str(req.session_hash))
26
+ os.makedirs(user_dir, exist_ok=True)
27
+
28
+
29
+ def end_session(req: gr.Request):
30
+ user_dir = os.path.join(TMP_DIR, str(req.session_hash))
31
+ if os.path.exists(user_dir):
32
+ shutil.rmtree(user_dir)
33
+
34
+
35
+ def preprocess_image(image: Image.Image) -> Image.Image:
36
+ """
37
+ Preprocess the input image for 3D generation.
38
+
39
+ This function is called when a user uploads an image or selects an example.
40
+ It applies background removal and other preprocessing steps necessary for
41
+ optimal 3D model generation.
42
+
43
+ Args:
44
+ image (Image.Image): The input image from the user
45
+
46
+ Returns:
47
+ Image.Image: The preprocessed image ready for 3D generation
48
+ """
49
+ processed_image = pipeline.preprocess_image(image)
50
+ return processed_image
51
+
52
+
53
+ def preprocess_images(images: List[Tuple[Image.Image, str]]) -> List[Image.Image]:
54
+ """
55
+ Preprocess a list of input images for multi-image 3D generation.
56
+
57
+ This function is called when users upload multiple images in the gallery.
58
+ It processes each image to prepare them for the multi-image 3D generation pipeline.
59
+
60
+ Args:
61
+ images (List[Tuple[Image.Image, str]]): The input images from the gallery
62
+
63
+ Returns:
64
+ List[Image.Image]: The preprocessed images ready for 3D generation
65
+ """
66
+ images = [image[0] for image in images]
67
+ processed_images = [pipeline.preprocess_image(image) for image in images]
68
+ return processed_images
69
+
70
+
71
+ def pack_state(gs: Gaussian, mesh: MeshExtractResult) -> dict:
72
+ return {
73
+ 'gaussian': {
74
+ **gs.init_params,
75
+ '_xyz': gs._xyz.cpu().numpy(),
76
+ '_features_dc': gs._features_dc.cpu().numpy(),
77
+ '_scaling': gs._scaling.cpu().numpy(),
78
+ '_rotation': gs._rotation.cpu().numpy(),
79
+ '_opacity': gs._opacity.cpu().numpy(),
80
+ },
81
+ 'mesh': {
82
+ 'vertices': mesh.vertices.cpu().numpy(),
83
+ 'faces': mesh.faces.cpu().numpy(),
84
+ },
85
+ }
86
+
87
+
88
+ def unpack_state(state: dict) -> Tuple[Gaussian, edict, str]:
89
+ gs = Gaussian(
90
+ aabb=state['gaussian']['aabb'],
91
+ sh_degree=state['gaussian']['sh_degree'],
92
+ mininum_kernel_size=state['gaussian']['mininum_kernel_size'],
93
+ scaling_bias=state['gaussian']['scaling_bias'],
94
+ opacity_bias=state['gaussian']['opacity_bias'],
95
+ scaling_activation=state['gaussian']['scaling_activation'],
96
+ )
97
+ gs._xyz = torch.tensor(state['gaussian']['_xyz'], device='cuda')
98
+ gs._features_dc = torch.tensor(state['gaussian']['_features_dc'], device='cuda')
99
+ gs._scaling = torch.tensor(state['gaussian']['_scaling'], device='cuda')
100
+ gs._rotation = torch.tensor(state['gaussian']['_rotation'], device='cuda')
101
+ gs._opacity = torch.tensor(state['gaussian']['_opacity'], device='cuda')
102
+
103
+ mesh = edict(
104
+ vertices=torch.tensor(state['mesh']['vertices'], device='cuda'),
105
+ faces=torch.tensor(state['mesh']['faces'], device='cuda'),
106
+ )
107
+
108
+ return gs, mesh
109
+
110
+
111
+ def get_seed(randomize_seed: bool, seed: int) -> int:
112
+ """
113
+ Get the random seed for generation.
114
+
115
+ This function is called by the generate button to determine whether to use
116
+ a random seed or the user-specified seed value.
117
+
118
+ Args:
119
+ randomize_seed (bool): Whether to generate a random seed
120
+ seed (int): The user-specified seed value
121
+
122
+ Returns:
123
+ int: The seed to use for generation
124
+ """
125
+ return np.random.randint(0, MAX_SEED) if randomize_seed else seed
126
+
127
+
128
+ @spaces.GPU(duration=120)
129
+ def generate_and_extract_glb(
130
+ image: Image.Image,
131
+ multiimages: List[Tuple[Image.Image, str]],
132
+ is_multiimage: bool,
133
+ seed: int,
134
+ ss_guidance_strength: float,
135
+ ss_sampling_steps: int,
136
+ slat_guidance_strength: float,
137
+ slat_sampling_steps: int,
138
+ multiimage_algo: Literal["multidiffusion", "stochastic"],
139
+ mesh_simplify: float,
140
+ texture_size: int,
141
+ req: gr.Request,
142
+ ) -> Tuple[dict, str, str, str]:
143
+ """
144
+ Convert an image to a 3D model and extract GLB file.
145
+
146
+ Args:
147
+ image (Image.Image): The input image.
148
+ multiimages (List[Tuple[Image.Image, str]]): The input images in multi-image mode.
149
+ is_multiimage (bool): Whether is in multi-image mode.
150
+ seed (int): The random seed.
151
+ ss_guidance_strength (float): The guidance strength for sparse structure generation.
152
+ ss_sampling_steps (int): The number of sampling steps for sparse structure generation.
153
+ slat_guidance_strength (float): The guidance strength for structured latent generation.
154
+ slat_sampling_steps (int): The number of sampling steps for structured latent generation.
155
+ multiimage_algo (Literal["multidiffusion", "stochastic"]): The algorithm for multi-image generation.
156
+ mesh_simplify (float): The mesh simplification factor.
157
+ texture_size (int): The texture resolution.
158
+
159
+ Returns:
160
+ dict: The information of the generated 3D model.
161
+ str: The path to the video of the 3D model.
162
+ str: The path to the extracted GLB file.
163
+ str: The path to the extracted GLB file (for download).
164
+ """
165
+ user_dir = os.path.join(TMP_DIR, str(req.session_hash))
166
+
167
+ # Generate 3D model
168
+ if not is_multiimage:
169
+ outputs = pipeline.run(
170
+ image,
171
+ seed=seed,
172
+ formats=["gaussian", "mesh"],
173
+ preprocess_image=False,
174
+ sparse_structure_sampler_params={
175
+ "steps": ss_sampling_steps,
176
+ "cfg_strength": ss_guidance_strength,
177
+ },
178
+ slat_sampler_params={
179
+ "steps": slat_sampling_steps,
180
+ "cfg_strength": slat_guidance_strength,
181
+ },
182
+ )
183
+ else:
184
+ outputs = pipeline.run_multi_image(
185
+ [image[0] for image in multiimages],
186
+ seed=seed,
187
+ formats=["gaussian", "mesh"],
188
+ preprocess_image=False,
189
+ sparse_structure_sampler_params={
190
+ "steps": ss_sampling_steps,
191
+ "cfg_strength": ss_guidance_strength,
192
+ },
193
+ slat_sampler_params={
194
+ "steps": slat_sampling_steps,
195
+ "cfg_strength": slat_guidance_strength,
196
+ },
197
+ mode=multiimage_algo,
198
+ )
199
+
200
+ # Render video
201
+ video = render_utils.render_video(outputs['gaussian'][0], num_frames=120)['color']
202
+ video_geo = render_utils.render_video(outputs['mesh'][0], num_frames=120)['normal']
203
+ video = [np.concatenate([video[i], video_geo[i]], axis=1) for i in range(len(video))]
204
+ video_path = os.path.join(user_dir, 'sample.mp4')
205
+ imageio.mimsave(video_path, video, fps=15)
206
+
207
+ # Extract GLB
208
+ gs = outputs['gaussian'][0]
209
+ mesh = outputs['mesh'][0]
210
+ glb = postprocessing_utils.to_glb(gs, mesh, simplify=mesh_simplify, texture_size=texture_size, verbose=False)
211
+ glb_path = os.path.join(user_dir, 'sample.glb')
212
+ glb.export(glb_path)
213
+
214
+ # Pack state for optional Gaussian extraction
215
+ state = pack_state(gs, mesh)
216
+
217
+ torch.cuda.empty_cache()
218
+ return state, video_path, glb_path, glb_path
219
+
220
+
221
+ @spaces.GPU
222
+ def extract_gaussian(state: dict, req: gr.Request) -> Tuple[str, str]:
223
+ """
224
+ Extract a Gaussian splatting file from the generated 3D model.
225
+
226
+ This function is called when the user clicks "Extract Gaussian" button.
227
+ It converts the 3D model state into a .ply file format containing
228
+ Gaussian splatting data for advanced 3D applications.
229
+
230
+ Args:
231
+ state (dict): The state of the generated 3D model containing Gaussian data
232
+ req (gr.Request): Gradio request object for session management
233
+
234
+ Returns:
235
+ Tuple[str, str]: Paths to the extracted Gaussian file (for display and download)
236
+ """
237
+ user_dir = os.path.join(TMP_DIR, str(req.session_hash))
238
+ gs, _ = unpack_state(state)
239
+ gaussian_path = os.path.join(user_dir, 'sample.ply')
240
+ gs.save_ply(gaussian_path)
241
+ torch.cuda.empty_cache()
242
+ return gaussian_path, gaussian_path
243
+
244
+
245
+ def prepare_multi_example() -> List[Image.Image]:
246
+ # Multi-image examples removed - using only assets/images
247
+ return []
248
+
249
+
250
+ def split_image(image: Image.Image) -> List[Image.Image]:
251
+ """
252
+ Split a multi-view image into separate view images.
253
+
254
+ This function is called when users select multi-image examples that contain
255
+ multiple views in a single concatenated image. It automatically splits them
256
+ based on alpha channel boundaries and preprocesses each view.
257
+
258
+ Args:
259
+ image (Image.Image): A concatenated image containing multiple views
260
+
261
+ Returns:
262
+ List[Image.Image]: List of individual preprocessed view images
263
+ """
264
+ image = np.array(image)
265
+ alpha = image[..., 3]
266
+ alpha = np.any(alpha>0, axis=0)
267
+ start_pos = np.where(~alpha[:-1] & alpha[1:])[0].tolist()
268
+ end_pos = np.where(alpha[:-1] & ~alpha[1:])[0].tolist()
269
+ images = []
270
+ for s, e in zip(start_pos, end_pos):
271
+ images.append(Image.fromarray(image[:, s:e+1]))
272
+ return [preprocess_image(image) for image in images]
273
+
274
+
275
+ with gr.Blocks(delete_cache=(600, 600)) as demo:
276
+ gr.Markdown("""
277
+ ## Image to 3D Asset with [TRELLIS](https://trellis3d.github.io/)
278
+ * Upload an image and click "Generate & Extract GLB" to create a 3D asset and automatically extract the GLB file.
279
+ * If you want the Gaussian file as well, click "Extract Gaussian" after generation.
280
+ * If the image has alpha channel, it will be used as the mask. Otherwise, we use `rembg` to remove the background.
281
+
282
+ ✨New: 1) Experimental multi-image support. 2) Gaussian file extraction.
283
+ """)
284
+
285
+ with gr.Row():
286
+ with gr.Column():
287
+ with gr.Tabs() as input_tabs:
288
+ with gr.Tab(label="Single Image", id=0) as single_image_input_tab:
289
+ image_prompt = gr.Image(label="Image Prompt", format="png", image_mode="RGBA", type="pil", height=300)
290
+ with gr.Tab(label="Multiple Images", id=1) as multiimage_input_tab:
291
+ multiimage_prompt = gr.Gallery(label="Image Prompt", format="png", type="pil", height=300, columns=3)
292
+ gr.Markdown("""
293
+ Input different views of the object in separate images.
294
+
295
+ *NOTE: this is an experimental algorithm without training a specialized model. It may not produce the best results for all images, especially those having different poses or inconsistent details.*
296
+ """)
297
+
298
+ with gr.Accordion(label="Generation Settings", open=False):
299
+ seed = gr.Slider(0, MAX_SEED, label="Seed", value=0, step=1)
300
+ randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
301
+ gr.Markdown("Stage 1: Sparse Structure Generation")
302
+ with gr.Row():
303
+ ss_guidance_strength = gr.Slider(0.0, 10.0, label="Guidance Strength", value=7.5, step=0.1)
304
+ ss_sampling_steps = gr.Slider(1, 50, label="Sampling Steps", value=12, step=1)
305
+ gr.Markdown("Stage 2: Structured Latent Generation")
306
+ with gr.Row():
307
+ slat_guidance_strength = gr.Slider(0.0, 10.0, label="Guidance Strength", value=3.0, step=0.1)
308
+ slat_sampling_steps = gr.Slider(1, 50, label="Sampling Steps", value=12, step=1)
309
+ multiimage_algo = gr.Radio(["stochastic", "multidiffusion"], label="Multi-image Algorithm", value="stochastic")
310
+
311
+ with gr.Accordion(label="GLB Extraction Settings", open=False):
312
+ mesh_simplify = gr.Slider(0.9, 0.98, label="Simplify", value=0.95, step=0.01)
313
+ texture_size = gr.Slider(512, 2048, label="Texture Size", value=1024, step=512)
314
+
315
+ generate_btn = gr.Button("Generate & Extract GLB", variant="primary")
316
+ extract_gs_btn = gr.Button("Extract Gaussian", interactive=False)
317
+ gr.Markdown("""
318
+ *NOTE: Gaussian file can be very large (~50MB), it will take a while to display and download.*
319
+ """)
320
+
321
+ with gr.Column():
322
+ video_output = gr.Video(label="Generated 3D Asset", autoplay=True, loop=True, height=300)
323
+ model_output = LitModel3D(label="Extracted GLB/Gaussian", exposure=10.0, height=300)
324
+
325
+ with gr.Row():
326
+ download_glb = gr.DownloadButton(label="Download GLB", interactive=False)
327
+ download_gs = gr.DownloadButton(label="Download Gaussian", interactive=False)
328
+
329
+ is_multiimage = gr.State(False)
330
+ output_buf = gr.State()
331
+
332
+ # Example images at the bottom of the page
333
+ with gr.Row() as single_image_example:
334
+ if os.path.exists("assets/images"):
335
+ examples = gr.Examples(
336
+ examples=[
337
+ f'assets/images/{image}'
338
+ for image in os.listdir("assets/images")
339
+ if image.endswith(('.png', '.jpg', '.jpeg', '.webp'))
340
+ ],
341
+ inputs=[image_prompt],
342
+ fn=preprocess_image,
343
+ outputs=[image_prompt],
344
+ run_on_click=True,
345
+ examples_per_page=64,
346
+ )
347
+ else:
348
+ examples = gr.Examples(examples=[], inputs=[image_prompt])
349
+
350
+ with gr.Row(visible=False) as multiimage_example:
351
+ examples_multi = gr.Examples(
352
+ examples=prepare_multi_example(),
353
+ inputs=[image_prompt],
354
+ fn=split_image,
355
+ outputs=[multiimage_prompt],
356
+ run_on_click=True,
357
+ examples_per_page=8,
358
+ )
359
+
360
+ # Handlers
361
+ demo.load(start_session)
362
+ demo.unload(end_session)
363
+
364
+ single_image_input_tab.select(
365
+ lambda: tuple([False, gr.Row.update(visible=True), gr.Row.update(visible=False)]),
366
+ outputs=[is_multiimage, single_image_example, multiimage_example]
367
+ )
368
+ multiimage_input_tab.select(
369
+ lambda: tuple([True, gr.Row.update(visible=False), gr.Row.update(visible=True)]),
370
+ outputs=[is_multiimage, single_image_example, multiimage_example]
371
+ )
372
+
373
+ image_prompt.upload(
374
+ preprocess_image,
375
+ inputs=[image_prompt],
376
+ outputs=[image_prompt],
377
+ )
378
+ multiimage_prompt.upload(
379
+ preprocess_images,
380
+ inputs=[multiimage_prompt],
381
+ outputs=[multiimage_prompt],
382
+ )
383
+
384
+ generate_btn.click(
385
+ get_seed,
386
+ inputs=[randomize_seed, seed],
387
+ outputs=[seed],
388
+ ).then(
389
+ generate_and_extract_glb,
390
+ inputs=[image_prompt, multiimage_prompt, is_multiimage, seed, ss_guidance_strength, ss_sampling_steps, slat_guidance_strength, slat_sampling_steps, multiimage_algo, mesh_simplify, texture_size],
391
+ outputs=[output_buf, video_output, model_output, download_glb],
392
+ ).then(
393
+ lambda: tuple([gr.Button(interactive=True), gr.Button(interactive=True)]),
394
+ outputs=[extract_gs_btn, download_glb],
395
+ )
396
+
397
+ video_output.clear(
398
+ lambda: tuple([gr.Button(interactive=False), gr.Button(interactive=False), gr.Button(interactive=False)]),
399
+ outputs=[extract_gs_btn, download_glb, download_gs],
400
+ )
401
+
402
+ extract_gs_btn.click(
403
+ extract_gaussian,
404
+ inputs=[output_buf],
405
+ outputs=[model_output, download_gs],
406
+ ).then(
407
+ lambda: gr.Button(interactive=True),
408
+ outputs=[download_gs],
409
+ )
410
+
411
+ model_output.clear(
412
+ lambda: tuple([gr.Button(interactive=False), gr.Button(interactive=False)]),
413
+ outputs=[download_glb, download_gs],
414
+ )
415
+
416
+
417
+ # Launch the Gradio app
418
+ if __name__ == "__main__":
419
+ pipeline = TrellisImageTo3DPipeline.from_pretrained("microsoft/TRELLIS")
420
+ pipeline.cuda()
421
+ try:
422
+ pipeline.preprocess_image(Image.fromarray(np.zeros((512, 512, 3), dtype=np.uint8))) # Preload rembg
423
+ except:
424
+ pass
425
+ demo.launch()
assets/images/gun1.webp ADDED

Git LFS Details

  • SHA256: f8001a758093493e0ca060ee02ad80b667837e456a057ad31bc1233f880a8200
  • Pointer size: 131 Bytes
  • Size of remote file: 155 kB
assets/images/gun2.webp ADDED

Git LFS Details

  • SHA256: f59ab4ad24066da65f97c77ba6d5ca048f7d1dc1442ff42eadbe1f62fc4264b6
  • Pointer size: 131 Bytes
  • Size of remote file: 140 kB
requirements.txt ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ --extra-index-url https://download.pytorch.org/whl/cu121
2
+
3
+ torch==2.4.0
4
+ torchvision==0.19.0
5
+ pillow==10.4.0
6
+ imageio==2.36.1
7
+ imageio-ffmpeg==0.5.1
8
+ tqdm==4.67.1
9
+ easydict==1.13
10
+ opencv-python-headless==4.10.0.84
11
+ scipy==1.14.1
12
+ rembg==2.0.60
13
+ onnxruntime==1.20.1
14
+ trimesh==4.5.3
15
+ xatlas==0.0.9
16
+ pyvista==0.44.2
17
+ pymeshfix==0.17.0
18
+ igraph==0.11.8
19
+ git+https://github.com/EasternJournalist/utils3d.git@9a4eb15e4021b67b12c460c7057d642626897ec8
20
+ xformers==0.0.27.post2
21
+ spconv-cu120==2.3.6
22
+ transformers==4.46.3
23
+ gradio_litmodel3d==0.0.1
24
+ pydantic==2.10.6
25
+ https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.0.post2/flash_attn-2.7.0.post2+cu12torch2.4cxx11abiFALSE-cp310-cp310-linux_x86_64.whl
26
+ https://huggingface.co/spaces/JeffreyXiang/TRELLIS/resolve/main/wheels/diff_gaussian_rasterization-0.0.0-cp310-cp310-linux_x86_64.whl?download=true
27
+ https://huggingface.co/spaces/JeffreyXiang/TRELLIS/resolve/main/wheels/nvdiffrast-0.3.3-cp310-cp310-linux_x86_64.whl?download=true
28
+
trellis/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from . import models
2
+ from . import modules
3
+ from . import pipelines
4
+ from . import renderers
5
+ from . import representations
6
+ from . import utils
trellis/models/__init__.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+
3
+ __attributes = {
4
+ 'SparseStructureEncoder': 'sparse_structure_vae',
5
+ 'SparseStructureDecoder': 'sparse_structure_vae',
6
+ 'SparseStructureFlowModel': 'sparse_structure_flow',
7
+ 'SLatEncoder': 'structured_latent_vae',
8
+ 'SLatGaussianDecoder': 'structured_latent_vae',
9
+ 'SLatRadianceFieldDecoder': 'structured_latent_vae',
10
+ 'SLatMeshDecoder': 'structured_latent_vae',
11
+ 'SLatFlowModel': 'structured_latent_flow',
12
+ }
13
+
14
+ __submodules = []
15
+
16
+ __all__ = list(__attributes.keys()) + __submodules
17
+
18
+ def __getattr__(name):
19
+ if name not in globals():
20
+ if name in __attributes:
21
+ module_name = __attributes[name]
22
+ module = importlib.import_module(f".{module_name}", __name__)
23
+ globals()[name] = getattr(module, name)
24
+ elif name in __submodules:
25
+ module = importlib.import_module(f".{name}", __name__)
26
+ globals()[name] = module
27
+ else:
28
+ raise AttributeError(f"module {__name__} has no attribute {name}")
29
+ return globals()[name]
30
+
31
+
32
+ def from_pretrained(path: str, **kwargs):
33
+ """
34
+ Load a model from a pretrained checkpoint.
35
+
36
+ Args:
37
+ path: The path to the checkpoint. Can be either local path or a Hugging Face model name.
38
+ NOTE: config file and model file should take the name f'{path}.json' and f'{path}.safetensors' respectively.
39
+ **kwargs: Additional arguments for the model constructor.
40
+ """
41
+ import os
42
+ import json
43
+ from safetensors.torch import load_file
44
+ is_local = os.path.exists(f"{path}.json") and os.path.exists(f"{path}.safetensors")
45
+
46
+ if is_local:
47
+ config_file = f"{path}.json"
48
+ model_file = f"{path}.safetensors"
49
+ else:
50
+ from huggingface_hub import hf_hub_download
51
+ path_parts = path.split('/')
52
+ repo_id = f'{path_parts[0]}/{path_parts[1]}'
53
+ model_name = '/'.join(path_parts[2:])
54
+ config_file = hf_hub_download(repo_id, f"{model_name}.json")
55
+ model_file = hf_hub_download(repo_id, f"{model_name}.safetensors")
56
+
57
+ with open(config_file, 'r') as f:
58
+ config = json.load(f)
59
+ model = __getattr__(config['name'])(**config['args'], **kwargs)
60
+ model.load_state_dict(load_file(model_file))
61
+
62
+ return model
63
+
64
+
65
+ # For Pylance
66
+ if __name__ == '__main__':
67
+ from .sparse_structure_vae import SparseStructureEncoder, SparseStructureDecoder
68
+ from .sparse_structure_flow import SparseStructureFlowModel
69
+ from .structured_latent_vae import SLatEncoder, SLatGaussianDecoder, SLatRadianceFieldDecoder, SLatMeshDecoder
70
+ from .structured_latent_flow import SLatFlowModel
trellis/models/sparse_structure_flow.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ import numpy as np
6
+ from ..modules.utils import convert_module_to_f16, convert_module_to_f32
7
+ from ..modules.transformer import AbsolutePositionEmbedder, ModulatedTransformerCrossBlock
8
+ from ..modules.spatial import patchify, unpatchify
9
+
10
+
11
+ class TimestepEmbedder(nn.Module):
12
+ """
13
+ Embeds scalar timesteps into vector representations.
14
+ """
15
+ def __init__(self, hidden_size, frequency_embedding_size=256):
16
+ super().__init__()
17
+ self.mlp = nn.Sequential(
18
+ nn.Linear(frequency_embedding_size, hidden_size, bias=True),
19
+ nn.SiLU(),
20
+ nn.Linear(hidden_size, hidden_size, bias=True),
21
+ )
22
+ self.frequency_embedding_size = frequency_embedding_size
23
+
24
+ @staticmethod
25
+ def timestep_embedding(t, dim, max_period=10000):
26
+ """
27
+ Create sinusoidal timestep embeddings.
28
+
29
+ Args:
30
+ t: a 1-D Tensor of N indices, one per batch element.
31
+ These may be fractional.
32
+ dim: the dimension of the output.
33
+ max_period: controls the minimum frequency of the embeddings.
34
+
35
+ Returns:
36
+ an (N, D) Tensor of positional embeddings.
37
+ """
38
+ # https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py
39
+ half = dim // 2
40
+ freqs = torch.exp(
41
+ -np.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
42
+ ).to(device=t.device)
43
+ args = t[:, None].float() * freqs[None]
44
+ embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
45
+ if dim % 2:
46
+ embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
47
+ return embedding
48
+
49
+ def forward(self, t):
50
+ t_freq = self.timestep_embedding(t, self.frequency_embedding_size)
51
+ t_emb = self.mlp(t_freq)
52
+ return t_emb
53
+
54
+
55
+ class SparseStructureFlowModel(nn.Module):
56
+ def __init__(
57
+ self,
58
+ resolution: int,
59
+ in_channels: int,
60
+ model_channels: int,
61
+ cond_channels: int,
62
+ out_channels: int,
63
+ num_blocks: int,
64
+ num_heads: Optional[int] = None,
65
+ num_head_channels: Optional[int] = 64,
66
+ mlp_ratio: float = 4,
67
+ patch_size: int = 2,
68
+ pe_mode: Literal["ape", "rope"] = "ape",
69
+ use_fp16: bool = False,
70
+ use_checkpoint: bool = False,
71
+ share_mod: bool = False,
72
+ qk_rms_norm: bool = False,
73
+ qk_rms_norm_cross: bool = False,
74
+ ):
75
+ super().__init__()
76
+ self.resolution = resolution
77
+ self.in_channels = in_channels
78
+ self.model_channels = model_channels
79
+ self.cond_channels = cond_channels
80
+ self.out_channels = out_channels
81
+ self.num_blocks = num_blocks
82
+ self.num_heads = num_heads or model_channels // num_head_channels
83
+ self.mlp_ratio = mlp_ratio
84
+ self.patch_size = patch_size
85
+ self.pe_mode = pe_mode
86
+ self.use_fp16 = use_fp16
87
+ self.use_checkpoint = use_checkpoint
88
+ self.share_mod = share_mod
89
+ self.qk_rms_norm = qk_rms_norm
90
+ self.qk_rms_norm_cross = qk_rms_norm_cross
91
+ self.dtype = torch.float16 if use_fp16 else torch.float32
92
+
93
+ self.t_embedder = TimestepEmbedder(model_channels)
94
+ if share_mod:
95
+ self.adaLN_modulation = nn.Sequential(
96
+ nn.SiLU(),
97
+ nn.Linear(model_channels, 6 * model_channels, bias=True)
98
+ )
99
+
100
+ if pe_mode == "ape":
101
+ pos_embedder = AbsolutePositionEmbedder(model_channels, 3)
102
+ coords = torch.meshgrid(*[torch.arange(res, device=self.device) for res in [resolution // patch_size] * 3], indexing='ij')
103
+ coords = torch.stack(coords, dim=-1).reshape(-1, 3)
104
+ pos_emb = pos_embedder(coords)
105
+ self.register_buffer("pos_emb", pos_emb)
106
+
107
+ self.input_layer = nn.Linear(in_channels * patch_size**3, model_channels)
108
+
109
+ self.blocks = nn.ModuleList([
110
+ ModulatedTransformerCrossBlock(
111
+ model_channels,
112
+ cond_channels,
113
+ num_heads=self.num_heads,
114
+ mlp_ratio=self.mlp_ratio,
115
+ attn_mode='full',
116
+ use_checkpoint=self.use_checkpoint,
117
+ use_rope=(pe_mode == "rope"),
118
+ share_mod=share_mod,
119
+ qk_rms_norm=self.qk_rms_norm,
120
+ qk_rms_norm_cross=self.qk_rms_norm_cross,
121
+ )
122
+ for _ in range(num_blocks)
123
+ ])
124
+
125
+ self.out_layer = nn.Linear(model_channels, out_channels * patch_size**3)
126
+
127
+ self.initialize_weights()
128
+ if use_fp16:
129
+ self.convert_to_fp16()
130
+
131
+ @property
132
+ def device(self) -> torch.device:
133
+ """
134
+ Return the device of the model.
135
+ """
136
+ return next(self.parameters()).device
137
+
138
+ def convert_to_fp16(self) -> None:
139
+ """
140
+ Convert the torso of the model to float16.
141
+ """
142
+ self.blocks.apply(convert_module_to_f16)
143
+
144
+ def convert_to_fp32(self) -> None:
145
+ """
146
+ Convert the torso of the model to float32.
147
+ """
148
+ self.blocks.apply(convert_module_to_f32)
149
+
150
+ def initialize_weights(self) -> None:
151
+ # Initialize transformer layers:
152
+ def _basic_init(module):
153
+ if isinstance(module, nn.Linear):
154
+ torch.nn.init.xavier_uniform_(module.weight)
155
+ if module.bias is not None:
156
+ nn.init.constant_(module.bias, 0)
157
+ self.apply(_basic_init)
158
+
159
+ # Initialize timestep embedding MLP:
160
+ nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)
161
+ nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)
162
+
163
+ # Zero-out adaLN modulation layers in DiT blocks:
164
+ if self.share_mod:
165
+ nn.init.constant_(self.adaLN_modulation[-1].weight, 0)
166
+ nn.init.constant_(self.adaLN_modulation[-1].bias, 0)
167
+ else:
168
+ for block in self.blocks:
169
+ nn.init.constant_(block.adaLN_modulation[-1].weight, 0)
170
+ nn.init.constant_(block.adaLN_modulation[-1].bias, 0)
171
+
172
+ # Zero-out output layers:
173
+ nn.init.constant_(self.out_layer.weight, 0)
174
+ nn.init.constant_(self.out_layer.bias, 0)
175
+
176
+ def forward(self, x: torch.Tensor, t: torch.Tensor, cond: torch.Tensor) -> torch.Tensor:
177
+ assert [*x.shape] == [x.shape[0], self.in_channels, *[self.resolution] * 3], \
178
+ f"Input shape mismatch, got {x.shape}, expected {[x.shape[0], self.in_channels, *[self.resolution] * 3]}"
179
+
180
+ h = patchify(x, self.patch_size)
181
+ h = h.view(*h.shape[:2], -1).permute(0, 2, 1).contiguous()
182
+
183
+ h = self.input_layer(h)
184
+ h = h + self.pos_emb[None]
185
+ t_emb = self.t_embedder(t)
186
+ if self.share_mod:
187
+ t_emb = self.adaLN_modulation(t_emb)
188
+ t_emb = t_emb.type(self.dtype)
189
+ h = h.type(self.dtype)
190
+ cond = cond.type(self.dtype)
191
+ for block in self.blocks:
192
+ h = block(h, t_emb, cond)
193
+ h = h.type(x.dtype)
194
+ h = F.layer_norm(h, h.shape[-1:])
195
+ h = self.out_layer(h)
196
+
197
+ h = h.permute(0, 2, 1).view(h.shape[0], h.shape[2], *[self.resolution // self.patch_size] * 3)
198
+ h = unpatchify(h, self.patch_size).contiguous()
199
+
200
+ return h
trellis/models/sparse_structure_vae.py ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ from ..modules.norm import GroupNorm32, ChannelLayerNorm32
6
+ from ..modules.spatial import pixel_shuffle_3d
7
+ from ..modules.utils import zero_module, convert_module_to_f16, convert_module_to_f32
8
+
9
+
10
+ def norm_layer(norm_type: str, *args, **kwargs) -> nn.Module:
11
+ """
12
+ Return a normalization layer.
13
+ """
14
+ if norm_type == "group":
15
+ return GroupNorm32(32, *args, **kwargs)
16
+ elif norm_type == "layer":
17
+ return ChannelLayerNorm32(*args, **kwargs)
18
+ else:
19
+ raise ValueError(f"Invalid norm type {norm_type}")
20
+
21
+
22
+ class ResBlock3d(nn.Module):
23
+ def __init__(
24
+ self,
25
+ channels: int,
26
+ out_channels: Optional[int] = None,
27
+ norm_type: Literal["group", "layer"] = "layer",
28
+ ):
29
+ super().__init__()
30
+ self.channels = channels
31
+ self.out_channels = out_channels or channels
32
+
33
+ self.norm1 = norm_layer(norm_type, channels)
34
+ self.norm2 = norm_layer(norm_type, self.out_channels)
35
+ self.conv1 = nn.Conv3d(channels, self.out_channels, 3, padding=1)
36
+ self.conv2 = zero_module(nn.Conv3d(self.out_channels, self.out_channels, 3, padding=1))
37
+ self.skip_connection = nn.Conv3d(channels, self.out_channels, 1) if channels != self.out_channels else nn.Identity()
38
+
39
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
40
+ h = self.norm1(x)
41
+ h = F.silu(h)
42
+ h = self.conv1(h)
43
+ h = self.norm2(h)
44
+ h = F.silu(h)
45
+ h = self.conv2(h)
46
+ h = h + self.skip_connection(x)
47
+ return h
48
+
49
+
50
+ class DownsampleBlock3d(nn.Module):
51
+ def __init__(
52
+ self,
53
+ in_channels: int,
54
+ out_channels: int,
55
+ mode: Literal["conv", "avgpool"] = "conv",
56
+ ):
57
+ assert mode in ["conv", "avgpool"], f"Invalid mode {mode}"
58
+
59
+ super().__init__()
60
+ self.in_channels = in_channels
61
+ self.out_channels = out_channels
62
+
63
+ if mode == "conv":
64
+ self.conv = nn.Conv3d(in_channels, out_channels, 2, stride=2)
65
+ elif mode == "avgpool":
66
+ assert in_channels == out_channels, "Pooling mode requires in_channels to be equal to out_channels"
67
+
68
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
69
+ if hasattr(self, "conv"):
70
+ return self.conv(x)
71
+ else:
72
+ return F.avg_pool3d(x, 2)
73
+
74
+
75
+ class UpsampleBlock3d(nn.Module):
76
+ def __init__(
77
+ self,
78
+ in_channels: int,
79
+ out_channels: int,
80
+ mode: Literal["conv", "nearest"] = "conv",
81
+ ):
82
+ assert mode in ["conv", "nearest"], f"Invalid mode {mode}"
83
+
84
+ super().__init__()
85
+ self.in_channels = in_channels
86
+ self.out_channels = out_channels
87
+
88
+ if mode == "conv":
89
+ self.conv = nn.Conv3d(in_channels, out_channels*8, 3, padding=1)
90
+ elif mode == "nearest":
91
+ assert in_channels == out_channels, "Nearest mode requires in_channels to be equal to out_channels"
92
+
93
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
94
+ if hasattr(self, "conv"):
95
+ x = self.conv(x)
96
+ return pixel_shuffle_3d(x, 2)
97
+ else:
98
+ return F.interpolate(x, scale_factor=2, mode="nearest")
99
+
100
+
101
+ class SparseStructureEncoder(nn.Module):
102
+ """
103
+ Encoder for Sparse Structure (\mathcal{E}_S in the paper Sec. 3.3).
104
+
105
+ Args:
106
+ in_channels (int): Channels of the input.
107
+ latent_channels (int): Channels of the latent representation.
108
+ num_res_blocks (int): Number of residual blocks at each resolution.
109
+ channels (List[int]): Channels of the encoder blocks.
110
+ num_res_blocks_middle (int): Number of residual blocks in the middle.
111
+ norm_type (Literal["group", "layer"]): Type of normalization layer.
112
+ use_fp16 (bool): Whether to use FP16.
113
+ """
114
+ def __init__(
115
+ self,
116
+ in_channels: int,
117
+ latent_channels: int,
118
+ num_res_blocks: int,
119
+ channels: List[int],
120
+ num_res_blocks_middle: int = 2,
121
+ norm_type: Literal["group", "layer"] = "layer",
122
+ use_fp16: bool = False,
123
+ ):
124
+ super().__init__()
125
+ self.in_channels = in_channels
126
+ self.latent_channels = latent_channels
127
+ self.num_res_blocks = num_res_blocks
128
+ self.channels = channels
129
+ self.num_res_blocks_middle = num_res_blocks_middle
130
+ self.norm_type = norm_type
131
+ self.use_fp16 = use_fp16
132
+ self.dtype = torch.float16 if use_fp16 else torch.float32
133
+
134
+ self.input_layer = nn.Conv3d(in_channels, channels[0], 3, padding=1)
135
+
136
+ self.blocks = nn.ModuleList([])
137
+ for i, ch in enumerate(channels):
138
+ self.blocks.extend([
139
+ ResBlock3d(ch, ch)
140
+ for _ in range(num_res_blocks)
141
+ ])
142
+ if i < len(channels) - 1:
143
+ self.blocks.append(
144
+ DownsampleBlock3d(ch, channels[i+1])
145
+ )
146
+
147
+ self.middle_block = nn.Sequential(*[
148
+ ResBlock3d(channels[-1], channels[-1])
149
+ for _ in range(num_res_blocks_middle)
150
+ ])
151
+
152
+ self.out_layer = nn.Sequential(
153
+ norm_layer(norm_type, channels[-1]),
154
+ nn.SiLU(),
155
+ nn.Conv3d(channels[-1], latent_channels*2, 3, padding=1)
156
+ )
157
+
158
+ if use_fp16:
159
+ self.convert_to_fp16()
160
+
161
+ @property
162
+ def device(self) -> torch.device:
163
+ """
164
+ Return the device of the model.
165
+ """
166
+ return next(self.parameters()).device
167
+
168
+ def convert_to_fp16(self) -> None:
169
+ """
170
+ Convert the torso of the model to float16.
171
+ """
172
+ self.use_fp16 = True
173
+ self.dtype = torch.float16
174
+ self.blocks.apply(convert_module_to_f16)
175
+ self.middle_block.apply(convert_module_to_f16)
176
+
177
+ def convert_to_fp32(self) -> None:
178
+ """
179
+ Convert the torso of the model to float32.
180
+ """
181
+ self.use_fp16 = False
182
+ self.dtype = torch.float32
183
+ self.blocks.apply(convert_module_to_f32)
184
+ self.middle_block.apply(convert_module_to_f32)
185
+
186
+ def forward(self, x: torch.Tensor, sample_posterior: bool = False, return_raw: bool = False) -> torch.Tensor:
187
+ h = self.input_layer(x)
188
+ h = h.type(self.dtype)
189
+
190
+ for block in self.blocks:
191
+ h = block(h)
192
+ h = self.middle_block(h)
193
+
194
+ h = h.type(x.dtype)
195
+ h = self.out_layer(h)
196
+
197
+ mean, logvar = h.chunk(2, dim=1)
198
+
199
+ if sample_posterior:
200
+ std = torch.exp(0.5 * logvar)
201
+ z = mean + std * torch.randn_like(std)
202
+ else:
203
+ z = mean
204
+
205
+ if return_raw:
206
+ return z, mean, logvar
207
+ return z
208
+
209
+
210
+ class SparseStructureDecoder(nn.Module):
211
+ """
212
+ Decoder for Sparse Structure (\mathcal{D}_S in the paper Sec. 3.3).
213
+
214
+ Args:
215
+ out_channels (int): Channels of the output.
216
+ latent_channels (int): Channels of the latent representation.
217
+ num_res_blocks (int): Number of residual blocks at each resolution.
218
+ channels (List[int]): Channels of the decoder blocks.
219
+ num_res_blocks_middle (int): Number of residual blocks in the middle.
220
+ norm_type (Literal["group", "layer"]): Type of normalization layer.
221
+ use_fp16 (bool): Whether to use FP16.
222
+ """
223
+ def __init__(
224
+ self,
225
+ out_channels: int,
226
+ latent_channels: int,
227
+ num_res_blocks: int,
228
+ channels: List[int],
229
+ num_res_blocks_middle: int = 2,
230
+ norm_type: Literal["group", "layer"] = "layer",
231
+ use_fp16: bool = False,
232
+ ):
233
+ super().__init__()
234
+ self.out_channels = out_channels
235
+ self.latent_channels = latent_channels
236
+ self.num_res_blocks = num_res_blocks
237
+ self.channels = channels
238
+ self.num_res_blocks_middle = num_res_blocks_middle
239
+ self.norm_type = norm_type
240
+ self.use_fp16 = use_fp16
241
+ self.dtype = torch.float16 if use_fp16 else torch.float32
242
+
243
+ self.input_layer = nn.Conv3d(latent_channels, channels[0], 3, padding=1)
244
+
245
+ self.middle_block = nn.Sequential(*[
246
+ ResBlock3d(channels[0], channels[0])
247
+ for _ in range(num_res_blocks_middle)
248
+ ])
249
+
250
+ self.blocks = nn.ModuleList([])
251
+ for i, ch in enumerate(channels):
252
+ self.blocks.extend([
253
+ ResBlock3d(ch, ch)
254
+ for _ in range(num_res_blocks)
255
+ ])
256
+ if i < len(channels) - 1:
257
+ self.blocks.append(
258
+ UpsampleBlock3d(ch, channels[i+1])
259
+ )
260
+
261
+ self.out_layer = nn.Sequential(
262
+ norm_layer(norm_type, channels[-1]),
263
+ nn.SiLU(),
264
+ nn.Conv3d(channels[-1], out_channels, 3, padding=1)
265
+ )
266
+
267
+ if use_fp16:
268
+ self.convert_to_fp16()
269
+
270
+ @property
271
+ def device(self) -> torch.device:
272
+ """
273
+ Return the device of the model.
274
+ """
275
+ return next(self.parameters()).device
276
+
277
+ def convert_to_fp16(self) -> None:
278
+ """
279
+ Convert the torso of the model to float16.
280
+ """
281
+ self.use_fp16 = True
282
+ self.dtype = torch.float16
283
+ self.blocks.apply(convert_module_to_f16)
284
+ self.middle_block.apply(convert_module_to_f16)
285
+
286
+ def convert_to_fp32(self) -> None:
287
+ """
288
+ Convert the torso of the model to float32.
289
+ """
290
+ self.use_fp16 = False
291
+ self.dtype = torch.float32
292
+ self.blocks.apply(convert_module_to_f32)
293
+ self.middle_block.apply(convert_module_to_f32)
294
+
295
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
296
+ h = self.input_layer(x)
297
+
298
+ h = h.type(self.dtype)
299
+
300
+ h = self.middle_block(h)
301
+ for block in self.blocks:
302
+ h = block(h)
303
+
304
+ h = h.type(x.dtype)
305
+ h = self.out_layer(h)
306
+ return h
trellis/models/structured_latent_flow.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ import numpy as np
6
+ from ..modules.utils import zero_module, convert_module_to_f16, convert_module_to_f32
7
+ from ..modules.transformer import AbsolutePositionEmbedder
8
+ from ..modules.norm import LayerNorm32
9
+ from ..modules import sparse as sp
10
+ from ..modules.sparse.transformer import ModulatedSparseTransformerCrossBlock
11
+ from .sparse_structure_flow import TimestepEmbedder
12
+
13
+
14
+ class SparseResBlock3d(nn.Module):
15
+ def __init__(
16
+ self,
17
+ channels: int,
18
+ emb_channels: int,
19
+ out_channels: Optional[int] = None,
20
+ downsample: bool = False,
21
+ upsample: bool = False,
22
+ ):
23
+ super().__init__()
24
+ self.channels = channels
25
+ self.emb_channels = emb_channels
26
+ self.out_channels = out_channels or channels
27
+ self.downsample = downsample
28
+ self.upsample = upsample
29
+
30
+ assert not (downsample and upsample), "Cannot downsample and upsample at the same time"
31
+
32
+ self.norm1 = LayerNorm32(channels, elementwise_affine=True, eps=1e-6)
33
+ self.norm2 = LayerNorm32(self.out_channels, elementwise_affine=False, eps=1e-6)
34
+ self.conv1 = sp.SparseConv3d(channels, self.out_channels, 3)
35
+ self.conv2 = zero_module(sp.SparseConv3d(self.out_channels, self.out_channels, 3))
36
+ self.emb_layers = nn.Sequential(
37
+ nn.SiLU(),
38
+ nn.Linear(emb_channels, 2 * self.out_channels, bias=True),
39
+ )
40
+ self.skip_connection = sp.SparseLinear(channels, self.out_channels) if channels != self.out_channels else nn.Identity()
41
+ self.updown = None
42
+ if self.downsample:
43
+ self.updown = sp.SparseDownsample(2)
44
+ elif self.upsample:
45
+ self.updown = sp.SparseUpsample(2)
46
+
47
+ def _updown(self, x: sp.SparseTensor) -> sp.SparseTensor:
48
+ if self.updown is not None:
49
+ x = self.updown(x)
50
+ return x
51
+
52
+ def forward(self, x: sp.SparseTensor, emb: torch.Tensor) -> sp.SparseTensor:
53
+ emb_out = self.emb_layers(emb).type(x.dtype)
54
+ scale, shift = torch.chunk(emb_out, 2, dim=1)
55
+
56
+ x = self._updown(x)
57
+ h = x.replace(self.norm1(x.feats))
58
+ h = h.replace(F.silu(h.feats))
59
+ h = self.conv1(h)
60
+ h = h.replace(self.norm2(h.feats)) * (1 + scale) + shift
61
+ h = h.replace(F.silu(h.feats))
62
+ h = self.conv2(h)
63
+ h = h + self.skip_connection(x)
64
+
65
+ return h
66
+
67
+
68
+ class SLatFlowModel(nn.Module):
69
+ def __init__(
70
+ self,
71
+ resolution: int,
72
+ in_channels: int,
73
+ model_channels: int,
74
+ cond_channels: int,
75
+ out_channels: int,
76
+ num_blocks: int,
77
+ num_heads: Optional[int] = None,
78
+ num_head_channels: Optional[int] = 64,
79
+ mlp_ratio: float = 4,
80
+ patch_size: int = 2,
81
+ num_io_res_blocks: int = 2,
82
+ io_block_channels: List[int] = None,
83
+ pe_mode: Literal["ape", "rope"] = "ape",
84
+ use_fp16: bool = False,
85
+ use_checkpoint: bool = False,
86
+ use_skip_connection: bool = True,
87
+ share_mod: bool = False,
88
+ qk_rms_norm: bool = False,
89
+ qk_rms_norm_cross: bool = False,
90
+ ):
91
+ super().__init__()
92
+ self.resolution = resolution
93
+ self.in_channels = in_channels
94
+ self.model_channels = model_channels
95
+ self.cond_channels = cond_channels
96
+ self.out_channels = out_channels
97
+ self.num_blocks = num_blocks
98
+ self.num_heads = num_heads or model_channels // num_head_channels
99
+ self.mlp_ratio = mlp_ratio
100
+ self.patch_size = patch_size
101
+ self.num_io_res_blocks = num_io_res_blocks
102
+ self.io_block_channels = io_block_channels
103
+ self.pe_mode = pe_mode
104
+ self.use_fp16 = use_fp16
105
+ self.use_checkpoint = use_checkpoint
106
+ self.use_skip_connection = use_skip_connection
107
+ self.share_mod = share_mod
108
+ self.qk_rms_norm = qk_rms_norm
109
+ self.qk_rms_norm_cross = qk_rms_norm_cross
110
+ self.dtype = torch.float16 if use_fp16 else torch.float32
111
+
112
+ assert int(np.log2(patch_size)) == np.log2(patch_size), "Patch size must be a power of 2"
113
+ assert np.log2(patch_size) == len(io_block_channels), "Number of IO ResBlocks must match the number of stages"
114
+
115
+ self.t_embedder = TimestepEmbedder(model_channels)
116
+ if share_mod:
117
+ self.adaLN_modulation = nn.Sequential(
118
+ nn.SiLU(),
119
+ nn.Linear(model_channels, 6 * model_channels, bias=True)
120
+ )
121
+
122
+ if pe_mode == "ape":
123
+ self.pos_embedder = AbsolutePositionEmbedder(model_channels)
124
+
125
+ self.input_layer = sp.SparseLinear(in_channels, io_block_channels[0])
126
+ self.input_blocks = nn.ModuleList([])
127
+ for chs, next_chs in zip(io_block_channels, io_block_channels[1:] + [model_channels]):
128
+ self.input_blocks.extend([
129
+ SparseResBlock3d(
130
+ chs,
131
+ model_channels,
132
+ out_channels=chs,
133
+ )
134
+ for _ in range(num_io_res_blocks-1)
135
+ ])
136
+ self.input_blocks.append(
137
+ SparseResBlock3d(
138
+ chs,
139
+ model_channels,
140
+ out_channels=next_chs,
141
+ downsample=True,
142
+ )
143
+ )
144
+
145
+ self.blocks = nn.ModuleList([
146
+ ModulatedSparseTransformerCrossBlock(
147
+ model_channels,
148
+ cond_channels,
149
+ num_heads=self.num_heads,
150
+ mlp_ratio=self.mlp_ratio,
151
+ attn_mode='full',
152
+ use_checkpoint=self.use_checkpoint,
153
+ use_rope=(pe_mode == "rope"),
154
+ share_mod=self.share_mod,
155
+ qk_rms_norm=self.qk_rms_norm,
156
+ qk_rms_norm_cross=self.qk_rms_norm_cross,
157
+ )
158
+ for _ in range(num_blocks)
159
+ ])
160
+
161
+ self.out_blocks = nn.ModuleList([])
162
+ for chs, prev_chs in zip(reversed(io_block_channels), [model_channels] + list(reversed(io_block_channels[1:]))):
163
+ self.out_blocks.append(
164
+ SparseResBlock3d(
165
+ prev_chs * 2 if self.use_skip_connection else prev_chs,
166
+ model_channels,
167
+ out_channels=chs,
168
+ upsample=True,
169
+ )
170
+ )
171
+ self.out_blocks.extend([
172
+ SparseResBlock3d(
173
+ chs * 2 if self.use_skip_connection else chs,
174
+ model_channels,
175
+ out_channels=chs,
176
+ )
177
+ for _ in range(num_io_res_blocks-1)
178
+ ])
179
+ self.out_layer = sp.SparseLinear(io_block_channels[0], out_channels)
180
+
181
+ self.initialize_weights()
182
+ if use_fp16:
183
+ self.convert_to_fp16()
184
+
185
+ @property
186
+ def device(self) -> torch.device:
187
+ """
188
+ Return the device of the model.
189
+ """
190
+ return next(self.parameters()).device
191
+
192
+ def convert_to_fp16(self) -> None:
193
+ """
194
+ Convert the torso of the model to float16.
195
+ """
196
+ self.input_blocks.apply(convert_module_to_f16)
197
+ self.blocks.apply(convert_module_to_f16)
198
+ self.out_blocks.apply(convert_module_to_f16)
199
+
200
+ def convert_to_fp32(self) -> None:
201
+ """
202
+ Convert the torso of the model to float32.
203
+ """
204
+ self.input_blocks.apply(convert_module_to_f32)
205
+ self.blocks.apply(convert_module_to_f32)
206
+ self.out_blocks.apply(convert_module_to_f32)
207
+
208
+ def initialize_weights(self) -> None:
209
+ # Initialize transformer layers:
210
+ def _basic_init(module):
211
+ if isinstance(module, nn.Linear):
212
+ torch.nn.init.xavier_uniform_(module.weight)
213
+ if module.bias is not None:
214
+ nn.init.constant_(module.bias, 0)
215
+ self.apply(_basic_init)
216
+
217
+ # Initialize timestep embedding MLP:
218
+ nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)
219
+ nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)
220
+
221
+ # Zero-out adaLN modulation layers in DiT blocks:
222
+ if self.share_mod:
223
+ nn.init.constant_(self.adaLN_modulation[-1].weight, 0)
224
+ nn.init.constant_(self.adaLN_modulation[-1].bias, 0)
225
+ else:
226
+ for block in self.blocks:
227
+ nn.init.constant_(block.adaLN_modulation[-1].weight, 0)
228
+ nn.init.constant_(block.adaLN_modulation[-1].bias, 0)
229
+
230
+ # Zero-out output layers:
231
+ nn.init.constant_(self.out_layer.weight, 0)
232
+ nn.init.constant_(self.out_layer.bias, 0)
233
+
234
+ def forward(self, x: sp.SparseTensor, t: torch.Tensor, cond: torch.Tensor) -> sp.SparseTensor:
235
+ h = self.input_layer(x).type(self.dtype)
236
+ t_emb = self.t_embedder(t)
237
+ if self.share_mod:
238
+ t_emb = self.adaLN_modulation(t_emb)
239
+ t_emb = t_emb.type(self.dtype)
240
+ cond = cond.type(self.dtype)
241
+
242
+ skips = []
243
+ # pack with input blocks
244
+ for block in self.input_blocks:
245
+ h = block(h, t_emb)
246
+ skips.append(h.feats)
247
+
248
+ if self.pe_mode == "ape":
249
+ h = h + self.pos_embedder(h.coords[:, 1:]).type(self.dtype)
250
+ for block in self.blocks:
251
+ h = block(h, t_emb, cond)
252
+
253
+ # unpack with output blocks
254
+ for block, skip in zip(self.out_blocks, reversed(skips)):
255
+ if self.use_skip_connection:
256
+ h = block(h.replace(torch.cat([h.feats, skip], dim=1)), t_emb)
257
+ else:
258
+ h = block(h, t_emb)
259
+
260
+ h = h.replace(F.layer_norm(h.feats, h.feats.shape[-1:]))
261
+ h = self.out_layer(h.type(x.dtype))
262
+ return h
trellis/models/structured_latent_vae/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from .encoder import SLatEncoder
2
+ from .decoder_gs import SLatGaussianDecoder
3
+ from .decoder_rf import SLatRadianceFieldDecoder
4
+ from .decoder_mesh import SLatMeshDecoder
trellis/models/structured_latent_vae/base.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ from ...modules.utils import convert_module_to_f16, convert_module_to_f32
5
+ from ...modules import sparse as sp
6
+ from ...modules.transformer import AbsolutePositionEmbedder
7
+ from ...modules.sparse.transformer import SparseTransformerBlock
8
+
9
+
10
+ def block_attn_config(self):
11
+ """
12
+ Return the attention configuration of the model.
13
+ """
14
+ for i in range(self.num_blocks):
15
+ if self.attn_mode == "shift_window":
16
+ yield "serialized", self.window_size, 0, (16 * (i % 2),) * 3, sp.SerializeMode.Z_ORDER
17
+ elif self.attn_mode == "shift_sequence":
18
+ yield "serialized", self.window_size, self.window_size // 2 * (i % 2), (0, 0, 0), sp.SerializeMode.Z_ORDER
19
+ elif self.attn_mode == "shift_order":
20
+ yield "serialized", self.window_size, 0, (0, 0, 0), sp.SerializeModes[i % 4]
21
+ elif self.attn_mode == "full":
22
+ yield "full", None, None, None, None
23
+ elif self.attn_mode == "swin":
24
+ yield "windowed", self.window_size, None, self.window_size // 2 * (i % 2), None
25
+
26
+
27
+ class SparseTransformerBase(nn.Module):
28
+ """
29
+ Sparse Transformer without output layers.
30
+ Serve as the base class for encoder and decoder.
31
+ """
32
+ def __init__(
33
+ self,
34
+ in_channels: int,
35
+ model_channels: int,
36
+ num_blocks: int,
37
+ num_heads: Optional[int] = None,
38
+ num_head_channels: Optional[int] = 64,
39
+ mlp_ratio: float = 4.0,
40
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "full",
41
+ window_size: Optional[int] = None,
42
+ pe_mode: Literal["ape", "rope"] = "ape",
43
+ use_fp16: bool = False,
44
+ use_checkpoint: bool = False,
45
+ qk_rms_norm: bool = False,
46
+ ):
47
+ super().__init__()
48
+ self.in_channels = in_channels
49
+ self.model_channels = model_channels
50
+ self.num_blocks = num_blocks
51
+ self.window_size = window_size
52
+ self.num_heads = num_heads or model_channels // num_head_channels
53
+ self.mlp_ratio = mlp_ratio
54
+ self.attn_mode = attn_mode
55
+ self.pe_mode = pe_mode
56
+ self.use_fp16 = use_fp16
57
+ self.use_checkpoint = use_checkpoint
58
+ self.qk_rms_norm = qk_rms_norm
59
+ self.dtype = torch.float16 if use_fp16 else torch.float32
60
+
61
+ if pe_mode == "ape":
62
+ self.pos_embedder = AbsolutePositionEmbedder(model_channels)
63
+
64
+ self.input_layer = sp.SparseLinear(in_channels, model_channels)
65
+ self.blocks = nn.ModuleList([
66
+ SparseTransformerBlock(
67
+ model_channels,
68
+ num_heads=self.num_heads,
69
+ mlp_ratio=self.mlp_ratio,
70
+ attn_mode=attn_mode,
71
+ window_size=window_size,
72
+ shift_sequence=shift_sequence,
73
+ shift_window=shift_window,
74
+ serialize_mode=serialize_mode,
75
+ use_checkpoint=self.use_checkpoint,
76
+ use_rope=(pe_mode == "rope"),
77
+ qk_rms_norm=self.qk_rms_norm,
78
+ )
79
+ for attn_mode, window_size, shift_sequence, shift_window, serialize_mode in block_attn_config(self)
80
+ ])
81
+
82
+ @property
83
+ def device(self) -> torch.device:
84
+ """
85
+ Return the device of the model.
86
+ """
87
+ return next(self.parameters()).device
88
+
89
+ def convert_to_fp16(self) -> None:
90
+ """
91
+ Convert the torso of the model to float16.
92
+ """
93
+ self.blocks.apply(convert_module_to_f16)
94
+
95
+ def convert_to_fp32(self) -> None:
96
+ """
97
+ Convert the torso of the model to float32.
98
+ """
99
+ self.blocks.apply(convert_module_to_f32)
100
+
101
+ def initialize_weights(self) -> None:
102
+ # Initialize transformer layers:
103
+ def _basic_init(module):
104
+ if isinstance(module, nn.Linear):
105
+ torch.nn.init.xavier_uniform_(module.weight)
106
+ if module.bias is not None:
107
+ nn.init.constant_(module.bias, 0)
108
+ self.apply(_basic_init)
109
+
110
+ def forward(self, x: sp.SparseTensor) -> sp.SparseTensor:
111
+ h = self.input_layer(x)
112
+ if self.pe_mode == "ape":
113
+ h = h + self.pos_embedder(x.coords[:, 1:])
114
+ h = h.type(self.dtype)
115
+ for block in self.blocks:
116
+ h = block(h)
117
+ return h
trellis/models/structured_latent_vae/decoder_gs.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ from ...modules import sparse as sp
6
+ from ...utils.random_utils import hammersley_sequence
7
+ from .base import SparseTransformerBase
8
+ from ...representations import Gaussian
9
+
10
+
11
+ class SLatGaussianDecoder(SparseTransformerBase):
12
+ def __init__(
13
+ self,
14
+ resolution: int,
15
+ model_channels: int,
16
+ latent_channels: int,
17
+ num_blocks: int,
18
+ num_heads: Optional[int] = None,
19
+ num_head_channels: Optional[int] = 64,
20
+ mlp_ratio: float = 4,
21
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "swin",
22
+ window_size: int = 8,
23
+ pe_mode: Literal["ape", "rope"] = "ape",
24
+ use_fp16: bool = False,
25
+ use_checkpoint: bool = False,
26
+ qk_rms_norm: bool = False,
27
+ representation_config: dict = None,
28
+ ):
29
+ super().__init__(
30
+ in_channels=latent_channels,
31
+ model_channels=model_channels,
32
+ num_blocks=num_blocks,
33
+ num_heads=num_heads,
34
+ num_head_channels=num_head_channels,
35
+ mlp_ratio=mlp_ratio,
36
+ attn_mode=attn_mode,
37
+ window_size=window_size,
38
+ pe_mode=pe_mode,
39
+ use_fp16=use_fp16,
40
+ use_checkpoint=use_checkpoint,
41
+ qk_rms_norm=qk_rms_norm,
42
+ )
43
+ self.resolution = resolution
44
+ self.rep_config = representation_config
45
+ self._calc_layout()
46
+ self.out_layer = sp.SparseLinear(model_channels, self.out_channels)
47
+ self._build_perturbation()
48
+
49
+ self.initialize_weights()
50
+ if use_fp16:
51
+ self.convert_to_fp16()
52
+
53
+ def initialize_weights(self) -> None:
54
+ super().initialize_weights()
55
+ # Zero-out output layers:
56
+ nn.init.constant_(self.out_layer.weight, 0)
57
+ nn.init.constant_(self.out_layer.bias, 0)
58
+
59
+ def _build_perturbation(self) -> None:
60
+ perturbation = [hammersley_sequence(3, i, self.rep_config['num_gaussians']) for i in range(self.rep_config['num_gaussians'])]
61
+ perturbation = torch.tensor(perturbation).float() * 2 - 1
62
+ perturbation = perturbation / self.rep_config['voxel_size']
63
+ perturbation = torch.atanh(perturbation).to(self.device)
64
+ self.register_buffer('offset_perturbation', perturbation)
65
+
66
+ def _calc_layout(self) -> None:
67
+ self.layout = {
68
+ '_xyz' : {'shape': (self.rep_config['num_gaussians'], 3), 'size': self.rep_config['num_gaussians'] * 3},
69
+ '_features_dc' : {'shape': (self.rep_config['num_gaussians'], 1, 3), 'size': self.rep_config['num_gaussians'] * 3},
70
+ '_scaling' : {'shape': (self.rep_config['num_gaussians'], 3), 'size': self.rep_config['num_gaussians'] * 3},
71
+ '_rotation' : {'shape': (self.rep_config['num_gaussians'], 4), 'size': self.rep_config['num_gaussians'] * 4},
72
+ '_opacity' : {'shape': (self.rep_config['num_gaussians'], 1), 'size': self.rep_config['num_gaussians']},
73
+ }
74
+ start = 0
75
+ for k, v in self.layout.items():
76
+ v['range'] = (start, start + v['size'])
77
+ start += v['size']
78
+ self.out_channels = start
79
+
80
+ def to_representation(self, x: sp.SparseTensor) -> List[Gaussian]:
81
+ """
82
+ Convert a batch of network outputs to 3D representations.
83
+
84
+ Args:
85
+ x: The [N x * x C] sparse tensor output by the network.
86
+
87
+ Returns:
88
+ list of representations
89
+ """
90
+ ret = []
91
+ for i in range(x.shape[0]):
92
+ representation = Gaussian(
93
+ sh_degree=0,
94
+ aabb=[-0.5, -0.5, -0.5, 1.0, 1.0, 1.0],
95
+ mininum_kernel_size = self.rep_config['3d_filter_kernel_size'],
96
+ scaling_bias = self.rep_config['scaling_bias'],
97
+ opacity_bias = self.rep_config['opacity_bias'],
98
+ scaling_activation = self.rep_config['scaling_activation']
99
+ )
100
+ xyz = (x.coords[x.layout[i]][:, 1:].float() + 0.5) / self.resolution
101
+ for k, v in self.layout.items():
102
+ if k == '_xyz':
103
+ offset = x.feats[x.layout[i]][:, v['range'][0]:v['range'][1]].reshape(-1, *v['shape'])
104
+ offset = offset * self.rep_config['lr'][k]
105
+ if self.rep_config['perturb_offset']:
106
+ offset = offset + self.offset_perturbation
107
+ offset = torch.tanh(offset) / self.resolution * 0.5 * self.rep_config['voxel_size']
108
+ _xyz = xyz.unsqueeze(1) + offset
109
+ setattr(representation, k, _xyz.flatten(0, 1))
110
+ else:
111
+ feats = x.feats[x.layout[i]][:, v['range'][0]:v['range'][1]].reshape(-1, *v['shape']).flatten(0, 1)
112
+ feats = feats * self.rep_config['lr'][k]
113
+ setattr(representation, k, feats)
114
+ ret.append(representation)
115
+ return ret
116
+
117
+ def forward(self, x: sp.SparseTensor) -> List[Gaussian]:
118
+ h = super().forward(x)
119
+ h = h.type(x.dtype)
120
+ h = h.replace(F.layer_norm(h.feats, h.feats.shape[-1:]))
121
+ h = self.out_layer(h)
122
+ return self.to_representation(h)
trellis/models/structured_latent_vae/decoder_mesh.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ import numpy as np
6
+ from ...modules.utils import zero_module, convert_module_to_f16, convert_module_to_f32
7
+ from ...modules import sparse as sp
8
+ from .base import SparseTransformerBase
9
+ from ...representations import MeshExtractResult
10
+ from ...representations.mesh import SparseFeatures2Mesh
11
+
12
+
13
+ class SparseSubdivideBlock3d(nn.Module):
14
+ """
15
+ A 3D subdivide block that can subdivide the sparse tensor.
16
+
17
+ Args:
18
+ channels: channels in the inputs and outputs.
19
+ out_channels: if specified, the number of output channels.
20
+ num_groups: the number of groups for the group norm.
21
+ """
22
+ def __init__(
23
+ self,
24
+ channels: int,
25
+ resolution: int,
26
+ out_channels: Optional[int] = None,
27
+ num_groups: int = 32
28
+ ):
29
+ super().__init__()
30
+ self.channels = channels
31
+ self.resolution = resolution
32
+ self.out_resolution = resolution * 2
33
+ self.out_channels = out_channels or channels
34
+
35
+ self.act_layers = nn.Sequential(
36
+ sp.SparseGroupNorm32(num_groups, channels),
37
+ sp.SparseSiLU()
38
+ )
39
+
40
+ self.sub = sp.SparseSubdivide()
41
+
42
+ self.out_layers = nn.Sequential(
43
+ sp.SparseConv3d(channels, self.out_channels, 3, indice_key=f"res_{self.out_resolution}"),
44
+ sp.SparseGroupNorm32(num_groups, self.out_channels),
45
+ sp.SparseSiLU(),
46
+ zero_module(sp.SparseConv3d(self.out_channels, self.out_channels, 3, indice_key=f"res_{self.out_resolution}")),
47
+ )
48
+
49
+ if self.out_channels == channels:
50
+ self.skip_connection = nn.Identity()
51
+ else:
52
+ self.skip_connection = sp.SparseConv3d(channels, self.out_channels, 1, indice_key=f"res_{self.out_resolution}")
53
+
54
+ def forward(self, x: sp.SparseTensor) -> sp.SparseTensor:
55
+ """
56
+ Apply the block to a Tensor, conditioned on a timestep embedding.
57
+
58
+ Args:
59
+ x: an [N x C x ...] Tensor of features.
60
+ Returns:
61
+ an [N x C x ...] Tensor of outputs.
62
+ """
63
+ h = self.act_layers(x)
64
+ h = self.sub(h)
65
+ x = self.sub(x)
66
+ h = self.out_layers(h)
67
+ h = h + self.skip_connection(x)
68
+ return h
69
+
70
+
71
+ class SLatMeshDecoder(SparseTransformerBase):
72
+ def __init__(
73
+ self,
74
+ resolution: int,
75
+ model_channels: int,
76
+ latent_channels: int,
77
+ num_blocks: int,
78
+ num_heads: Optional[int] = None,
79
+ num_head_channels: Optional[int] = 64,
80
+ mlp_ratio: float = 4,
81
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "swin",
82
+ window_size: int = 8,
83
+ pe_mode: Literal["ape", "rope"] = "ape",
84
+ use_fp16: bool = False,
85
+ use_checkpoint: bool = False,
86
+ qk_rms_norm: bool = False,
87
+ representation_config: dict = None,
88
+ ):
89
+ super().__init__(
90
+ in_channels=latent_channels,
91
+ model_channels=model_channels,
92
+ num_blocks=num_blocks,
93
+ num_heads=num_heads,
94
+ num_head_channels=num_head_channels,
95
+ mlp_ratio=mlp_ratio,
96
+ attn_mode=attn_mode,
97
+ window_size=window_size,
98
+ pe_mode=pe_mode,
99
+ use_fp16=use_fp16,
100
+ use_checkpoint=use_checkpoint,
101
+ qk_rms_norm=qk_rms_norm,
102
+ )
103
+ self.resolution = resolution
104
+ self.rep_config = representation_config
105
+ self.mesh_extractor = SparseFeatures2Mesh(res=self.resolution*4, use_color=self.rep_config.get('use_color', False))
106
+ self.out_channels = self.mesh_extractor.feats_channels
107
+ self.upsample = nn.ModuleList([
108
+ SparseSubdivideBlock3d(
109
+ channels=model_channels,
110
+ resolution=resolution,
111
+ out_channels=model_channels // 4
112
+ ),
113
+ SparseSubdivideBlock3d(
114
+ channels=model_channels // 4,
115
+ resolution=resolution * 2,
116
+ out_channels=model_channels // 8
117
+ )
118
+ ])
119
+ self.out_layer = sp.SparseLinear(model_channels // 8, self.out_channels)
120
+
121
+ self.initialize_weights()
122
+ if use_fp16:
123
+ self.convert_to_fp16()
124
+
125
+ def initialize_weights(self) -> None:
126
+ super().initialize_weights()
127
+ # Zero-out output layers:
128
+ nn.init.constant_(self.out_layer.weight, 0)
129
+ nn.init.constant_(self.out_layer.bias, 0)
130
+
131
+ def convert_to_fp16(self) -> None:
132
+ """
133
+ Convert the torso of the model to float16.
134
+ """
135
+ super().convert_to_fp16()
136
+ self.upsample.apply(convert_module_to_f16)
137
+
138
+ def convert_to_fp32(self) -> None:
139
+ """
140
+ Convert the torso of the model to float32.
141
+ """
142
+ super().convert_to_fp32()
143
+ self.upsample.apply(convert_module_to_f32)
144
+
145
+ def to_representation(self, x: sp.SparseTensor) -> List[MeshExtractResult]:
146
+ """
147
+ Convert a batch of network outputs to 3D representations.
148
+
149
+ Args:
150
+ x: The [N x * x C] sparse tensor output by the network.
151
+
152
+ Returns:
153
+ list of representations
154
+ """
155
+ ret = []
156
+ for i in range(x.shape[0]):
157
+ mesh = self.mesh_extractor(x[i], training=self.training)
158
+ ret.append(mesh)
159
+ return ret
160
+
161
+ def forward(self, x: sp.SparseTensor) -> List[MeshExtractResult]:
162
+ h = super().forward(x)
163
+ for block in self.upsample:
164
+ h = block(h)
165
+ h = h.type(x.dtype)
166
+ h = self.out_layer(h)
167
+ return self.to_representation(h)
trellis/models/structured_latent_vae/decoder_rf.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ import numpy as np
6
+ from ...modules import sparse as sp
7
+ from .base import SparseTransformerBase
8
+ from ...representations import Strivec
9
+
10
+
11
+ class SLatRadianceFieldDecoder(SparseTransformerBase):
12
+ def __init__(
13
+ self,
14
+ resolution: int,
15
+ model_channels: int,
16
+ latent_channels: int,
17
+ num_blocks: int,
18
+ num_heads: Optional[int] = None,
19
+ num_head_channels: Optional[int] = 64,
20
+ mlp_ratio: float = 4,
21
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "swin",
22
+ window_size: int = 8,
23
+ pe_mode: Literal["ape", "rope"] = "ape",
24
+ use_fp16: bool = False,
25
+ use_checkpoint: bool = False,
26
+ qk_rms_norm: bool = False,
27
+ representation_config: dict = None,
28
+ ):
29
+ super().__init__(
30
+ in_channels=latent_channels,
31
+ model_channels=model_channels,
32
+ num_blocks=num_blocks,
33
+ num_heads=num_heads,
34
+ num_head_channels=num_head_channels,
35
+ mlp_ratio=mlp_ratio,
36
+ attn_mode=attn_mode,
37
+ window_size=window_size,
38
+ pe_mode=pe_mode,
39
+ use_fp16=use_fp16,
40
+ use_checkpoint=use_checkpoint,
41
+ qk_rms_norm=qk_rms_norm,
42
+ )
43
+ self.resolution = resolution
44
+ self.rep_config = representation_config
45
+ self._calc_layout()
46
+ self.out_layer = sp.SparseLinear(model_channels, self.out_channels)
47
+
48
+ self.initialize_weights()
49
+ if use_fp16:
50
+ self.convert_to_fp16()
51
+
52
+ def initialize_weights(self) -> None:
53
+ super().initialize_weights()
54
+ # Zero-out output layers:
55
+ nn.init.constant_(self.out_layer.weight, 0)
56
+ nn.init.constant_(self.out_layer.bias, 0)
57
+
58
+ def _calc_layout(self) -> None:
59
+ self.layout = {
60
+ 'trivec': {'shape': (self.rep_config['rank'], 3, self.rep_config['dim']), 'size': self.rep_config['rank'] * 3 * self.rep_config['dim']},
61
+ 'density': {'shape': (self.rep_config['rank'],), 'size': self.rep_config['rank']},
62
+ 'features_dc': {'shape': (self.rep_config['rank'], 1, 3), 'size': self.rep_config['rank'] * 3},
63
+ }
64
+ start = 0
65
+ for k, v in self.layout.items():
66
+ v['range'] = (start, start + v['size'])
67
+ start += v['size']
68
+ self.out_channels = start
69
+
70
+ def to_representation(self, x: sp.SparseTensor) -> List[Strivec]:
71
+ """
72
+ Convert a batch of network outputs to 3D representations.
73
+
74
+ Args:
75
+ x: The [N x * x C] sparse tensor output by the network.
76
+
77
+ Returns:
78
+ list of representations
79
+ """
80
+ ret = []
81
+ for i in range(x.shape[0]):
82
+ representation = Strivec(
83
+ sh_degree=0,
84
+ resolution=self.resolution,
85
+ aabb=[-0.5, -0.5, -0.5, 1, 1, 1],
86
+ rank=self.rep_config['rank'],
87
+ dim=self.rep_config['dim'],
88
+ device='cuda',
89
+ )
90
+ representation.density_shift = 0.0
91
+ representation.position = (x.coords[x.layout[i]][:, 1:].float() + 0.5) / self.resolution
92
+ representation.depth = torch.full((representation.position.shape[0], 1), int(np.log2(self.resolution)), dtype=torch.uint8, device='cuda')
93
+ for k, v in self.layout.items():
94
+ setattr(representation, k, x.feats[x.layout[i]][:, v['range'][0]:v['range'][1]].reshape(-1, *v['shape']))
95
+ representation.trivec = representation.trivec + 1
96
+ ret.append(representation)
97
+ return ret
98
+
99
+ def forward(self, x: sp.SparseTensor) -> List[Strivec]:
100
+ h = super().forward(x)
101
+ h = h.type(x.dtype)
102
+ h = h.replace(F.layer_norm(h.feats, h.feats.shape[-1:]))
103
+ h = self.out_layer(h)
104
+ return self.to_representation(h)
trellis/models/structured_latent_vae/encoder.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ from ...modules import sparse as sp
6
+ from .base import SparseTransformerBase
7
+
8
+
9
+ class SLatEncoder(SparseTransformerBase):
10
+ def __init__(
11
+ self,
12
+ resolution: int,
13
+ in_channels: int,
14
+ model_channels: int,
15
+ latent_channels: int,
16
+ num_blocks: int,
17
+ num_heads: Optional[int] = None,
18
+ num_head_channels: Optional[int] = 64,
19
+ mlp_ratio: float = 4,
20
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "swin",
21
+ window_size: int = 8,
22
+ pe_mode: Literal["ape", "rope"] = "ape",
23
+ use_fp16: bool = False,
24
+ use_checkpoint: bool = False,
25
+ qk_rms_norm: bool = False,
26
+ ):
27
+ super().__init__(
28
+ in_channels=in_channels,
29
+ model_channels=model_channels,
30
+ num_blocks=num_blocks,
31
+ num_heads=num_heads,
32
+ num_head_channels=num_head_channels,
33
+ mlp_ratio=mlp_ratio,
34
+ attn_mode=attn_mode,
35
+ window_size=window_size,
36
+ pe_mode=pe_mode,
37
+ use_fp16=use_fp16,
38
+ use_checkpoint=use_checkpoint,
39
+ qk_rms_norm=qk_rms_norm,
40
+ )
41
+ self.resolution = resolution
42
+ self.out_layer = sp.SparseLinear(model_channels, 2 * latent_channels)
43
+
44
+ self.initialize_weights()
45
+ if use_fp16:
46
+ self.convert_to_fp16()
47
+
48
+ def initialize_weights(self) -> None:
49
+ super().initialize_weights()
50
+ # Zero-out output layers:
51
+ nn.init.constant_(self.out_layer.weight, 0)
52
+ nn.init.constant_(self.out_layer.bias, 0)
53
+
54
+ def forward(self, x: sp.SparseTensor, sample_posterior=True, return_raw=False):
55
+ h = super().forward(x)
56
+ h = h.type(x.dtype)
57
+ h = h.replace(F.layer_norm(h.feats, h.feats.shape[-1:]))
58
+ h = self.out_layer(h)
59
+
60
+ # Sample from the posterior distribution
61
+ mean, logvar = h.feats.chunk(2, dim=-1)
62
+ if sample_posterior:
63
+ std = torch.exp(0.5 * logvar)
64
+ z = mean + std * torch.randn_like(std)
65
+ else:
66
+ z = mean
67
+ z = h.replace(z)
68
+
69
+ if return_raw:
70
+ return z, mean, logvar
71
+ else:
72
+ return z
trellis/modules/attention/__init__.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+
3
+ BACKEND = 'flash_attn'
4
+ DEBUG = False
5
+
6
+ def __from_env():
7
+ import os
8
+
9
+ global BACKEND
10
+ global DEBUG
11
+
12
+ env_attn_backend = os.environ.get('ATTN_BACKEND')
13
+ env_sttn_debug = os.environ.get('ATTN_DEBUG')
14
+
15
+ if env_attn_backend is not None and env_attn_backend in ['xformers', 'flash_attn', 'sdpa', 'naive']:
16
+ BACKEND = env_attn_backend
17
+ if env_sttn_debug is not None:
18
+ DEBUG = env_sttn_debug == '1'
19
+
20
+ print(f"[ATTENTION] Using backend: {BACKEND}")
21
+
22
+
23
+ __from_env()
24
+
25
+
26
+ def set_backend(backend: Literal['xformers', 'flash_attn']):
27
+ global BACKEND
28
+ BACKEND = backend
29
+
30
+ def set_debug(debug: bool):
31
+ global DEBUG
32
+ DEBUG = debug
33
+
34
+
35
+ from .full_attn import *
36
+ from .modules import *
trellis/modules/attention/full_attn.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import math
4
+ from . import DEBUG, BACKEND
5
+
6
+ if BACKEND == 'xformers':
7
+ import xformers.ops as xops
8
+ elif BACKEND == 'flash_attn':
9
+ import flash_attn
10
+ elif BACKEND == 'sdpa':
11
+ from torch.nn.functional import scaled_dot_product_attention as sdpa
12
+ elif BACKEND == 'naive':
13
+ pass
14
+ else:
15
+ raise ValueError(f"Unknown attention backend: {BACKEND}")
16
+
17
+
18
+ __all__ = [
19
+ 'scaled_dot_product_attention',
20
+ ]
21
+
22
+
23
+ def _naive_sdpa(q, k, v):
24
+ """
25
+ Naive implementation of scaled dot product attention.
26
+ """
27
+ q = q.permute(0, 2, 1, 3) # [N, H, L, C]
28
+ k = k.permute(0, 2, 1, 3) # [N, H, L, C]
29
+ v = v.permute(0, 2, 1, 3) # [N, H, L, C]
30
+ scale_factor = 1 / math.sqrt(q.size(-1))
31
+ attn_weight = q @ k.transpose(-2, -1) * scale_factor
32
+ attn_weight = torch.softmax(attn_weight, dim=-1)
33
+ out = attn_weight @ v
34
+ out = out.permute(0, 2, 1, 3) # [N, L, H, C]
35
+ return out
36
+
37
+
38
+ @overload
39
+ def scaled_dot_product_attention(qkv: torch.Tensor) -> torch.Tensor:
40
+ """
41
+ Apply scaled dot product attention.
42
+
43
+ Args:
44
+ qkv (torch.Tensor): A [N, L, 3, H, C] tensor containing Qs, Ks, and Vs.
45
+ """
46
+ ...
47
+
48
+ @overload
49
+ def scaled_dot_product_attention(q: torch.Tensor, kv: torch.Tensor) -> torch.Tensor:
50
+ """
51
+ Apply scaled dot product attention.
52
+
53
+ Args:
54
+ q (torch.Tensor): A [N, L, H, C] tensor containing Qs.
55
+ kv (torch.Tensor): A [N, L, 2, H, C] tensor containing Ks and Vs.
56
+ """
57
+ ...
58
+
59
+ @overload
60
+ def scaled_dot_product_attention(q: torch.Tensor, k: torch.Tensor, v: torch.Tensor) -> torch.Tensor:
61
+ """
62
+ Apply scaled dot product attention.
63
+
64
+ Args:
65
+ q (torch.Tensor): A [N, L, H, Ci] tensor containing Qs.
66
+ k (torch.Tensor): A [N, L, H, Ci] tensor containing Ks.
67
+ v (torch.Tensor): A [N, L, H, Co] tensor containing Vs.
68
+
69
+ Note:
70
+ k and v are assumed to have the same coordinate map.
71
+ """
72
+ ...
73
+
74
+ def scaled_dot_product_attention(*args, **kwargs):
75
+ arg_names_dict = {
76
+ 1: ['qkv'],
77
+ 2: ['q', 'kv'],
78
+ 3: ['q', 'k', 'v']
79
+ }
80
+ num_all_args = len(args) + len(kwargs)
81
+ assert num_all_args in arg_names_dict, f"Invalid number of arguments, got {num_all_args}, expected 1, 2, or 3"
82
+ for key in arg_names_dict[num_all_args][len(args):]:
83
+ assert key in kwargs, f"Missing argument {key}"
84
+
85
+ if num_all_args == 1:
86
+ qkv = args[0] if len(args) > 0 else kwargs['qkv']
87
+ assert len(qkv.shape) == 5 and qkv.shape[2] == 3, f"Invalid shape for qkv, got {qkv.shape}, expected [N, L, 3, H, C]"
88
+ device = qkv.device
89
+
90
+ elif num_all_args == 2:
91
+ q = args[0] if len(args) > 0 else kwargs['q']
92
+ kv = args[1] if len(args) > 1 else kwargs['kv']
93
+ assert q.shape[0] == kv.shape[0], f"Batch size mismatch, got {q.shape[0]} and {kv.shape[0]}"
94
+ assert len(q.shape) == 4, f"Invalid shape for q, got {q.shape}, expected [N, L, H, C]"
95
+ assert len(kv.shape) == 5, f"Invalid shape for kv, got {kv.shape}, expected [N, L, 2, H, C]"
96
+ device = q.device
97
+
98
+ elif num_all_args == 3:
99
+ q = args[0] if len(args) > 0 else kwargs['q']
100
+ k = args[1] if len(args) > 1 else kwargs['k']
101
+ v = args[2] if len(args) > 2 else kwargs['v']
102
+ assert q.shape[0] == k.shape[0] == v.shape[0], f"Batch size mismatch, got {q.shape[0]}, {k.shape[0]}, and {v.shape[0]}"
103
+ assert len(q.shape) == 4, f"Invalid shape for q, got {q.shape}, expected [N, L, H, Ci]"
104
+ assert len(k.shape) == 4, f"Invalid shape for k, got {k.shape}, expected [N, L, H, Ci]"
105
+ assert len(v.shape) == 4, f"Invalid shape for v, got {v.shape}, expected [N, L, H, Co]"
106
+ device = q.device
107
+
108
+ if BACKEND == 'xformers':
109
+ if num_all_args == 1:
110
+ q, k, v = qkv.unbind(dim=2)
111
+ elif num_all_args == 2:
112
+ k, v = kv.unbind(dim=2)
113
+ out = xops.memory_efficient_attention(q, k, v)
114
+ elif BACKEND == 'flash_attn':
115
+ if num_all_args == 1:
116
+ out = flash_attn.flash_attn_qkvpacked_func(qkv)
117
+ elif num_all_args == 2:
118
+ out = flash_attn.flash_attn_kvpacked_func(q, kv)
119
+ elif num_all_args == 3:
120
+ out = flash_attn.flash_attn_func(q, k, v)
121
+ elif BACKEND == 'sdpa':
122
+ if num_all_args == 1:
123
+ q, k, v = qkv.unbind(dim=2)
124
+ elif num_all_args == 2:
125
+ k, v = kv.unbind(dim=2)
126
+ q = q.permute(0, 2, 1, 3) # [N, H, L, C]
127
+ k = k.permute(0, 2, 1, 3) # [N, H, L, C]
128
+ v = v.permute(0, 2, 1, 3) # [N, H, L, C]
129
+ out = sdpa(q, k, v) # [N, H, L, C]
130
+ out = out.permute(0, 2, 1, 3) # [N, L, H, C]
131
+ elif BACKEND == 'naive':
132
+ if num_all_args == 1:
133
+ q, k, v = qkv.unbind(dim=2)
134
+ elif num_all_args == 2:
135
+ k, v = kv.unbind(dim=2)
136
+ out = _naive_sdpa(q, k, v)
137
+ else:
138
+ raise ValueError(f"Unknown attention module: {BACKEND}")
139
+
140
+ return out
trellis/modules/attention/modules.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ from .full_attn import scaled_dot_product_attention
6
+
7
+
8
+ class MultiHeadRMSNorm(nn.Module):
9
+ def __init__(self, dim: int, heads: int):
10
+ super().__init__()
11
+ self.scale = dim ** 0.5
12
+ self.gamma = nn.Parameter(torch.ones(heads, dim))
13
+
14
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
15
+ return (F.normalize(x.float(), dim = -1) * self.gamma * self.scale).to(x.dtype)
16
+
17
+
18
+ class RotaryPositionEmbedder(nn.Module):
19
+ def __init__(self, hidden_size: int, in_channels: int = 3):
20
+ super().__init__()
21
+ assert hidden_size % 2 == 0, "Hidden size must be divisible by 2"
22
+ self.hidden_size = hidden_size
23
+ self.in_channels = in_channels
24
+ self.freq_dim = hidden_size // in_channels // 2
25
+ self.freqs = torch.arange(self.freq_dim, dtype=torch.float32) / self.freq_dim
26
+ self.freqs = 1.0 / (10000 ** self.freqs)
27
+
28
+ def _get_phases(self, indices: torch.Tensor) -> torch.Tensor:
29
+ self.freqs = self.freqs.to(indices.device)
30
+ phases = torch.outer(indices, self.freqs)
31
+ phases = torch.polar(torch.ones_like(phases), phases)
32
+ return phases
33
+
34
+ def _rotary_embedding(self, x: torch.Tensor, phases: torch.Tensor) -> torch.Tensor:
35
+ x_complex = torch.view_as_complex(x.float().reshape(*x.shape[:-1], -1, 2))
36
+ x_rotated = x_complex * phases
37
+ x_embed = torch.view_as_real(x_rotated).reshape(*x_rotated.shape[:-1], -1).to(x.dtype)
38
+ return x_embed
39
+
40
+ def forward(self, q: torch.Tensor, k: torch.Tensor, indices: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, torch.Tensor]:
41
+ """
42
+ Args:
43
+ q (sp.SparseTensor): [..., N, D] tensor of queries
44
+ k (sp.SparseTensor): [..., N, D] tensor of keys
45
+ indices (torch.Tensor): [..., N, C] tensor of spatial positions
46
+ """
47
+ if indices is None:
48
+ indices = torch.arange(q.shape[-2], device=q.device)
49
+ if len(q.shape) > 2:
50
+ indices = indices.unsqueeze(0).expand(q.shape[:-2] + (-1,))
51
+
52
+ phases = self._get_phases(indices.reshape(-1)).reshape(*indices.shape[:-1], -1)
53
+ if phases.shape[1] < self.hidden_size // 2:
54
+ phases = torch.cat([phases, torch.polar(
55
+ torch.ones(*phases.shape[:-1], self.hidden_size // 2 - phases.shape[1], device=phases.device),
56
+ torch.zeros(*phases.shape[:-1], self.hidden_size // 2 - phases.shape[1], device=phases.device)
57
+ )], dim=-1)
58
+ q_embed = self._rotary_embedding(q, phases)
59
+ k_embed = self._rotary_embedding(k, phases)
60
+ return q_embed, k_embed
61
+
62
+
63
+ class MultiHeadAttention(nn.Module):
64
+ def __init__(
65
+ self,
66
+ channels: int,
67
+ num_heads: int,
68
+ ctx_channels: Optional[int]=None,
69
+ type: Literal["self", "cross"] = "self",
70
+ attn_mode: Literal["full", "windowed"] = "full",
71
+ window_size: Optional[int] = None,
72
+ shift_window: Optional[Tuple[int, int, int]] = None,
73
+ qkv_bias: bool = True,
74
+ use_rope: bool = False,
75
+ qk_rms_norm: bool = False,
76
+ ):
77
+ super().__init__()
78
+ assert channels % num_heads == 0
79
+ assert type in ["self", "cross"], f"Invalid attention type: {type}"
80
+ assert attn_mode in ["full", "windowed"], f"Invalid attention mode: {attn_mode}"
81
+ assert type == "self" or attn_mode == "full", "Cross-attention only supports full attention"
82
+
83
+ if attn_mode == "windowed":
84
+ raise NotImplementedError("Windowed attention is not yet implemented")
85
+
86
+ self.channels = channels
87
+ self.head_dim = channels // num_heads
88
+ self.ctx_channels = ctx_channels if ctx_channels is not None else channels
89
+ self.num_heads = num_heads
90
+ self._type = type
91
+ self.attn_mode = attn_mode
92
+ self.window_size = window_size
93
+ self.shift_window = shift_window
94
+ self.use_rope = use_rope
95
+ self.qk_rms_norm = qk_rms_norm
96
+
97
+ if self._type == "self":
98
+ self.to_qkv = nn.Linear(channels, channels * 3, bias=qkv_bias)
99
+ else:
100
+ self.to_q = nn.Linear(channels, channels, bias=qkv_bias)
101
+ self.to_kv = nn.Linear(self.ctx_channels, channels * 2, bias=qkv_bias)
102
+
103
+ if self.qk_rms_norm:
104
+ self.q_rms_norm = MultiHeadRMSNorm(self.head_dim, num_heads)
105
+ self.k_rms_norm = MultiHeadRMSNorm(self.head_dim, num_heads)
106
+
107
+ self.to_out = nn.Linear(channels, channels)
108
+
109
+ if use_rope:
110
+ self.rope = RotaryPositionEmbedder(channels)
111
+
112
+ def forward(self, x: torch.Tensor, context: Optional[torch.Tensor] = None, indices: Optional[torch.Tensor] = None) -> torch.Tensor:
113
+ B, L, C = x.shape
114
+ if self._type == "self":
115
+ qkv = self.to_qkv(x)
116
+ qkv = qkv.reshape(B, L, 3, self.num_heads, -1)
117
+ if self.use_rope:
118
+ q, k, v = qkv.unbind(dim=2)
119
+ q, k = self.rope(q, k, indices)
120
+ qkv = torch.stack([q, k, v], dim=2)
121
+ if self.attn_mode == "full":
122
+ if self.qk_rms_norm:
123
+ q, k, v = qkv.unbind(dim=2)
124
+ q = self.q_rms_norm(q)
125
+ k = self.k_rms_norm(k)
126
+ h = scaled_dot_product_attention(q, k, v)
127
+ else:
128
+ h = scaled_dot_product_attention(qkv)
129
+ elif self.attn_mode == "windowed":
130
+ raise NotImplementedError("Windowed attention is not yet implemented")
131
+ else:
132
+ Lkv = context.shape[1]
133
+ q = self.to_q(x)
134
+ kv = self.to_kv(context)
135
+ q = q.reshape(B, L, self.num_heads, -1)
136
+ kv = kv.reshape(B, Lkv, 2, self.num_heads, -1)
137
+ if self.qk_rms_norm:
138
+ q = self.q_rms_norm(q)
139
+ k, v = kv.unbind(dim=2)
140
+ k = self.k_rms_norm(k)
141
+ h = scaled_dot_product_attention(q, k, v)
142
+ else:
143
+ h = scaled_dot_product_attention(q, kv)
144
+ h = h.reshape(B, L, -1)
145
+ h = self.to_out(h)
146
+ return h
trellis/modules/norm.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+
5
+ class LayerNorm32(nn.LayerNorm):
6
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
7
+ return super().forward(x.float()).type(x.dtype)
8
+
9
+
10
+ class GroupNorm32(nn.GroupNorm):
11
+ """
12
+ A GroupNorm layer that converts to float32 before the forward pass.
13
+ """
14
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
15
+ return super().forward(x.float()).type(x.dtype)
16
+
17
+
18
+ class ChannelLayerNorm32(LayerNorm32):
19
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
20
+ DIM = x.dim()
21
+ x = x.permute(0, *range(2, DIM), 1).contiguous()
22
+ x = super().forward(x)
23
+ x = x.permute(0, DIM-1, *range(1, DIM-1)).contiguous()
24
+ return x
25
+
trellis/modules/sparse/__init__.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+
3
+ BACKEND = 'spconv'
4
+ DEBUG = False
5
+ ATTN = 'flash_attn'
6
+
7
+ def __from_env():
8
+ import os
9
+
10
+ global BACKEND
11
+ global DEBUG
12
+ global ATTN
13
+
14
+ env_sparse_backend = os.environ.get('SPARSE_BACKEND')
15
+ env_sparse_debug = os.environ.get('SPARSE_DEBUG')
16
+ env_sparse_attn = os.environ.get('SPARSE_ATTN_BACKEND')
17
+ if env_sparse_attn is None:
18
+ env_sparse_attn = os.environ.get('ATTN_BACKEND')
19
+
20
+ if env_sparse_backend is not None and env_sparse_backend in ['spconv', 'torchsparse']:
21
+ BACKEND = env_sparse_backend
22
+ if env_sparse_debug is not None:
23
+ DEBUG = env_sparse_debug == '1'
24
+ if env_sparse_attn is not None and env_sparse_attn in ['xformers', 'flash_attn']:
25
+ ATTN = env_sparse_attn
26
+
27
+ print(f"[SPARSE] Backend: {BACKEND}, Attention: {ATTN}")
28
+
29
+
30
+ __from_env()
31
+
32
+
33
+ def set_backend(backend: Literal['spconv', 'torchsparse']):
34
+ global BACKEND
35
+ BACKEND = backend
36
+
37
+ def set_debug(debug: bool):
38
+ global DEBUG
39
+ DEBUG = debug
40
+
41
+ def set_attn(attn: Literal['xformers', 'flash_attn']):
42
+ global ATTN
43
+ ATTN = attn
44
+
45
+
46
+ import importlib
47
+
48
+ __attributes = {
49
+ 'SparseTensor': 'basic',
50
+ 'sparse_batch_broadcast': 'basic',
51
+ 'sparse_batch_op': 'basic',
52
+ 'sparse_cat': 'basic',
53
+ 'sparse_unbind': 'basic',
54
+ 'SparseGroupNorm': 'norm',
55
+ 'SparseLayerNorm': 'norm',
56
+ 'SparseGroupNorm32': 'norm',
57
+ 'SparseLayerNorm32': 'norm',
58
+ 'SparseReLU': 'nonlinearity',
59
+ 'SparseSiLU': 'nonlinearity',
60
+ 'SparseGELU': 'nonlinearity',
61
+ 'SparseActivation': 'nonlinearity',
62
+ 'SparseLinear': 'linear',
63
+ 'sparse_scaled_dot_product_attention': 'attention',
64
+ 'SerializeMode': 'attention',
65
+ 'sparse_serialized_scaled_dot_product_self_attention': 'attention',
66
+ 'sparse_windowed_scaled_dot_product_self_attention': 'attention',
67
+ 'SparseMultiHeadAttention': 'attention',
68
+ 'SparseConv3d': 'conv',
69
+ 'SparseInverseConv3d': 'conv',
70
+ 'SparseDownsample': 'spatial',
71
+ 'SparseUpsample': 'spatial',
72
+ 'SparseSubdivide' : 'spatial'
73
+ }
74
+
75
+ __submodules = ['transformer']
76
+
77
+ __all__ = list(__attributes.keys()) + __submodules
78
+
79
+ def __getattr__(name):
80
+ if name not in globals():
81
+ if name in __attributes:
82
+ module_name = __attributes[name]
83
+ module = importlib.import_module(f".{module_name}", __name__)
84
+ globals()[name] = getattr(module, name)
85
+ elif name in __submodules:
86
+ module = importlib.import_module(f".{name}", __name__)
87
+ globals()[name] = module
88
+ else:
89
+ raise AttributeError(f"module {__name__} has no attribute {name}")
90
+ return globals()[name]
91
+
92
+
93
+ # For Pylance
94
+ if __name__ == '__main__':
95
+ from .basic import *
96
+ from .norm import *
97
+ from .nonlinearity import *
98
+ from .linear import *
99
+ from .attention import *
100
+ from .conv import *
101
+ from .spatial import *
102
+ import transformer
trellis/modules/sparse/attention/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from .full_attn import *
2
+ from .serialized_attn import *
3
+ from .windowed_attn import *
4
+ from .modules import *
trellis/modules/sparse/attention/full_attn.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ from .. import SparseTensor
4
+ from .. import DEBUG, ATTN
5
+
6
+ if ATTN == 'xformers':
7
+ import xformers.ops as xops
8
+ elif ATTN == 'flash_attn':
9
+ import flash_attn
10
+ else:
11
+ raise ValueError(f"Unknown attention module: {ATTN}")
12
+
13
+
14
+ __all__ = [
15
+ 'sparse_scaled_dot_product_attention',
16
+ ]
17
+
18
+
19
+ @overload
20
+ def sparse_scaled_dot_product_attention(qkv: SparseTensor) -> SparseTensor:
21
+ """
22
+ Apply scaled dot product attention to a sparse tensor.
23
+
24
+ Args:
25
+ qkv (SparseTensor): A [N, *, 3, H, C] sparse tensor containing Qs, Ks, and Vs.
26
+ """
27
+ ...
28
+
29
+ @overload
30
+ def sparse_scaled_dot_product_attention(q: SparseTensor, kv: Union[SparseTensor, torch.Tensor]) -> SparseTensor:
31
+ """
32
+ Apply scaled dot product attention to a sparse tensor.
33
+
34
+ Args:
35
+ q (SparseTensor): A [N, *, H, C] sparse tensor containing Qs.
36
+ kv (SparseTensor or torch.Tensor): A [N, *, 2, H, C] sparse tensor or a [N, L, 2, H, C] dense tensor containing Ks and Vs.
37
+ """
38
+ ...
39
+
40
+ @overload
41
+ def sparse_scaled_dot_product_attention(q: torch.Tensor, kv: SparseTensor) -> torch.Tensor:
42
+ """
43
+ Apply scaled dot product attention to a sparse tensor.
44
+
45
+ Args:
46
+ q (SparseTensor): A [N, L, H, C] dense tensor containing Qs.
47
+ kv (SparseTensor or torch.Tensor): A [N, *, 2, H, C] sparse tensor containing Ks and Vs.
48
+ """
49
+ ...
50
+
51
+ @overload
52
+ def sparse_scaled_dot_product_attention(q: SparseTensor, k: SparseTensor, v: SparseTensor) -> SparseTensor:
53
+ """
54
+ Apply scaled dot product attention to a sparse tensor.
55
+
56
+ Args:
57
+ q (SparseTensor): A [N, *, H, Ci] sparse tensor containing Qs.
58
+ k (SparseTensor): A [N, *, H, Ci] sparse tensor containing Ks.
59
+ v (SparseTensor): A [N, *, H, Co] sparse tensor containing Vs.
60
+
61
+ Note:
62
+ k and v are assumed to have the same coordinate map.
63
+ """
64
+ ...
65
+
66
+ @overload
67
+ def sparse_scaled_dot_product_attention(q: SparseTensor, k: torch.Tensor, v: torch.Tensor) -> SparseTensor:
68
+ """
69
+ Apply scaled dot product attention to a sparse tensor.
70
+
71
+ Args:
72
+ q (SparseTensor): A [N, *, H, Ci] sparse tensor containing Qs.
73
+ k (torch.Tensor): A [N, L, H, Ci] dense tensor containing Ks.
74
+ v (torch.Tensor): A [N, L, H, Co] dense tensor containing Vs.
75
+ """
76
+ ...
77
+
78
+ @overload
79
+ def sparse_scaled_dot_product_attention(q: torch.Tensor, k: SparseTensor, v: SparseTensor) -> torch.Tensor:
80
+ """
81
+ Apply scaled dot product attention to a sparse tensor.
82
+
83
+ Args:
84
+ q (torch.Tensor): A [N, L, H, Ci] dense tensor containing Qs.
85
+ k (SparseTensor): A [N, *, H, Ci] sparse tensor containing Ks.
86
+ v (SparseTensor): A [N, *, H, Co] sparse tensor containing Vs.
87
+ """
88
+ ...
89
+
90
+ def sparse_scaled_dot_product_attention(*args, **kwargs):
91
+ arg_names_dict = {
92
+ 1: ['qkv'],
93
+ 2: ['q', 'kv'],
94
+ 3: ['q', 'k', 'v']
95
+ }
96
+ num_all_args = len(args) + len(kwargs)
97
+ assert num_all_args in arg_names_dict, f"Invalid number of arguments, got {num_all_args}, expected 1, 2, or 3"
98
+ for key in arg_names_dict[num_all_args][len(args):]:
99
+ assert key in kwargs, f"Missing argument {key}"
100
+
101
+ if num_all_args == 1:
102
+ qkv = args[0] if len(args) > 0 else kwargs['qkv']
103
+ assert isinstance(qkv, SparseTensor), f"qkv must be a SparseTensor, got {type(qkv)}"
104
+ assert len(qkv.shape) == 4 and qkv.shape[1] == 3, f"Invalid shape for qkv, got {qkv.shape}, expected [N, *, 3, H, C]"
105
+ device = qkv.device
106
+
107
+ s = qkv
108
+ q_seqlen = [qkv.layout[i].stop - qkv.layout[i].start for i in range(qkv.shape[0])]
109
+ kv_seqlen = q_seqlen
110
+ qkv = qkv.feats # [T, 3, H, C]
111
+
112
+ elif num_all_args == 2:
113
+ q = args[0] if len(args) > 0 else kwargs['q']
114
+ kv = args[1] if len(args) > 1 else kwargs['kv']
115
+ assert isinstance(q, SparseTensor) and isinstance(kv, (SparseTensor, torch.Tensor)) or \
116
+ isinstance(q, torch.Tensor) and isinstance(kv, SparseTensor), \
117
+ f"Invalid types, got {type(q)} and {type(kv)}"
118
+ assert q.shape[0] == kv.shape[0], f"Batch size mismatch, got {q.shape[0]} and {kv.shape[0]}"
119
+ device = q.device
120
+
121
+ if isinstance(q, SparseTensor):
122
+ assert len(q.shape) == 3, f"Invalid shape for q, got {q.shape}, expected [N, *, H, C]"
123
+ s = q
124
+ q_seqlen = [q.layout[i].stop - q.layout[i].start for i in range(q.shape[0])]
125
+ q = q.feats # [T_Q, H, C]
126
+ else:
127
+ assert len(q.shape) == 4, f"Invalid shape for q, got {q.shape}, expected [N, L, H, C]"
128
+ s = None
129
+ N, L, H, C = q.shape
130
+ q_seqlen = [L] * N
131
+ q = q.reshape(N * L, H, C) # [T_Q, H, C]
132
+
133
+ if isinstance(kv, SparseTensor):
134
+ assert len(kv.shape) == 4 and kv.shape[1] == 2, f"Invalid shape for kv, got {kv.shape}, expected [N, *, 2, H, C]"
135
+ kv_seqlen = [kv.layout[i].stop - kv.layout[i].start for i in range(kv.shape[0])]
136
+ kv = kv.feats # [T_KV, 2, H, C]
137
+ else:
138
+ assert len(kv.shape) == 5, f"Invalid shape for kv, got {kv.shape}, expected [N, L, 2, H, C]"
139
+ N, L, _, H, C = kv.shape
140
+ kv_seqlen = [L] * N
141
+ kv = kv.reshape(N * L, 2, H, C) # [T_KV, 2, H, C]
142
+
143
+ elif num_all_args == 3:
144
+ q = args[0] if len(args) > 0 else kwargs['q']
145
+ k = args[1] if len(args) > 1 else kwargs['k']
146
+ v = args[2] if len(args) > 2 else kwargs['v']
147
+ assert isinstance(q, SparseTensor) and isinstance(k, (SparseTensor, torch.Tensor)) and type(k) == type(v) or \
148
+ isinstance(q, torch.Tensor) and isinstance(k, SparseTensor) and isinstance(v, SparseTensor), \
149
+ f"Invalid types, got {type(q)}, {type(k)}, and {type(v)}"
150
+ assert q.shape[0] == k.shape[0] == v.shape[0], f"Batch size mismatch, got {q.shape[0]}, {k.shape[0]}, and {v.shape[0]}"
151
+ device = q.device
152
+
153
+ if isinstance(q, SparseTensor):
154
+ assert len(q.shape) == 3, f"Invalid shape for q, got {q.shape}, expected [N, *, H, Ci]"
155
+ s = q
156
+ q_seqlen = [q.layout[i].stop - q.layout[i].start for i in range(q.shape[0])]
157
+ q = q.feats # [T_Q, H, Ci]
158
+ else:
159
+ assert len(q.shape) == 4, f"Invalid shape for q, got {q.shape}, expected [N, L, H, Ci]"
160
+ s = None
161
+ N, L, H, CI = q.shape
162
+ q_seqlen = [L] * N
163
+ q = q.reshape(N * L, H, CI) # [T_Q, H, Ci]
164
+
165
+ if isinstance(k, SparseTensor):
166
+ assert len(k.shape) == 3, f"Invalid shape for k, got {k.shape}, expected [N, *, H, Ci]"
167
+ assert len(v.shape) == 3, f"Invalid shape for v, got {v.shape}, expected [N, *, H, Co]"
168
+ kv_seqlen = [k.layout[i].stop - k.layout[i].start for i in range(k.shape[0])]
169
+ k = k.feats # [T_KV, H, Ci]
170
+ v = v.feats # [T_KV, H, Co]
171
+ else:
172
+ assert len(k.shape) == 4, f"Invalid shape for k, got {k.shape}, expected [N, L, H, Ci]"
173
+ assert len(v.shape) == 4, f"Invalid shape for v, got {v.shape}, expected [N, L, H, Co]"
174
+ N, L, H, CI, CO = *k.shape, v.shape[-1]
175
+ kv_seqlen = [L] * N
176
+ k = k.reshape(N * L, H, CI) # [T_KV, H, Ci]
177
+ v = v.reshape(N * L, H, CO) # [T_KV, H, Co]
178
+
179
+ if DEBUG:
180
+ if s is not None:
181
+ for i in range(s.shape[0]):
182
+ assert (s.coords[s.layout[i]] == i).all(), f"SparseScaledDotProductSelfAttention: batch index mismatch"
183
+ if num_all_args in [2, 3]:
184
+ assert q.shape[:2] == [1, sum(q_seqlen)], f"SparseScaledDotProductSelfAttention: q shape mismatch"
185
+ if num_all_args == 3:
186
+ assert k.shape[:2] == [1, sum(kv_seqlen)], f"SparseScaledDotProductSelfAttention: k shape mismatch"
187
+ assert v.shape[:2] == [1, sum(kv_seqlen)], f"SparseScaledDotProductSelfAttention: v shape mismatch"
188
+
189
+ if ATTN == 'xformers':
190
+ if num_all_args == 1:
191
+ q, k, v = qkv.unbind(dim=1)
192
+ elif num_all_args == 2:
193
+ k, v = kv.unbind(dim=1)
194
+ q = q.unsqueeze(0)
195
+ k = k.unsqueeze(0)
196
+ v = v.unsqueeze(0)
197
+ mask = xops.fmha.BlockDiagonalMask.from_seqlens(q_seqlen, kv_seqlen)
198
+ out = xops.memory_efficient_attention(q, k, v, mask)[0]
199
+ elif ATTN == 'flash_attn':
200
+ cu_seqlens_q = torch.cat([torch.tensor([0]), torch.cumsum(torch.tensor(q_seqlen), dim=0)]).int().to(device)
201
+ if num_all_args in [2, 3]:
202
+ cu_seqlens_kv = torch.cat([torch.tensor([0]), torch.cumsum(torch.tensor(kv_seqlen), dim=0)]).int().to(device)
203
+ if num_all_args == 1:
204
+ out = flash_attn.flash_attn_varlen_qkvpacked_func(qkv, cu_seqlens_q, max(q_seqlen))
205
+ elif num_all_args == 2:
206
+ out = flash_attn.flash_attn_varlen_kvpacked_func(q, kv, cu_seqlens_q, cu_seqlens_kv, max(q_seqlen), max(kv_seqlen))
207
+ elif num_all_args == 3:
208
+ out = flash_attn.flash_attn_varlen_func(q, k, v, cu_seqlens_q, cu_seqlens_kv, max(q_seqlen), max(kv_seqlen))
209
+ else:
210
+ raise ValueError(f"Unknown attention module: {ATTN}")
211
+
212
+ if s is not None:
213
+ return s.replace(out)
214
+ else:
215
+ return out.reshape(N, L, H, -1)
trellis/modules/sparse/attention/modules.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ from .. import SparseTensor
6
+ from .full_attn import sparse_scaled_dot_product_attention
7
+ from .serialized_attn import SerializeMode, sparse_serialized_scaled_dot_product_self_attention
8
+ from .windowed_attn import sparse_windowed_scaled_dot_product_self_attention
9
+ from ...attention import RotaryPositionEmbedder
10
+
11
+
12
+ class SparseMultiHeadRMSNorm(nn.Module):
13
+ def __init__(self, dim: int, heads: int):
14
+ super().__init__()
15
+ self.scale = dim ** 0.5
16
+ self.gamma = nn.Parameter(torch.ones(heads, dim))
17
+
18
+ def forward(self, x: Union[SparseTensor, torch.Tensor]) -> Union[SparseTensor, torch.Tensor]:
19
+ x_type = x.dtype
20
+ x = x.float()
21
+ if isinstance(x, SparseTensor):
22
+ x = x.replace(F.normalize(x.feats, dim=-1))
23
+ else:
24
+ x = F.normalize(x, dim=-1)
25
+ return (x * self.gamma * self.scale).to(x_type)
26
+
27
+
28
+ class SparseMultiHeadAttention(nn.Module):
29
+ def __init__(
30
+ self,
31
+ channels: int,
32
+ num_heads: int,
33
+ ctx_channels: Optional[int] = None,
34
+ type: Literal["self", "cross"] = "self",
35
+ attn_mode: Literal["full", "serialized", "windowed"] = "full",
36
+ window_size: Optional[int] = None,
37
+ shift_sequence: Optional[int] = None,
38
+ shift_window: Optional[Tuple[int, int, int]] = None,
39
+ serialize_mode: Optional[SerializeMode] = None,
40
+ qkv_bias: bool = True,
41
+ use_rope: bool = False,
42
+ qk_rms_norm: bool = False,
43
+ ):
44
+ super().__init__()
45
+ assert channels % num_heads == 0
46
+ assert type in ["self", "cross"], f"Invalid attention type: {type}"
47
+ assert attn_mode in ["full", "serialized", "windowed"], f"Invalid attention mode: {attn_mode}"
48
+ assert type == "self" or attn_mode == "full", "Cross-attention only supports full attention"
49
+ assert type == "self" or use_rope is False, "Rotary position embeddings only supported for self-attention"
50
+ self.channels = channels
51
+ self.ctx_channels = ctx_channels if ctx_channels is not None else channels
52
+ self.num_heads = num_heads
53
+ self._type = type
54
+ self.attn_mode = attn_mode
55
+ self.window_size = window_size
56
+ self.shift_sequence = shift_sequence
57
+ self.shift_window = shift_window
58
+ self.serialize_mode = serialize_mode
59
+ self.use_rope = use_rope
60
+ self.qk_rms_norm = qk_rms_norm
61
+
62
+ if self._type == "self":
63
+ self.to_qkv = nn.Linear(channels, channels * 3, bias=qkv_bias)
64
+ else:
65
+ self.to_q = nn.Linear(channels, channels, bias=qkv_bias)
66
+ self.to_kv = nn.Linear(self.ctx_channels, channels * 2, bias=qkv_bias)
67
+
68
+ if self.qk_rms_norm:
69
+ self.q_rms_norm = SparseMultiHeadRMSNorm(channels // num_heads, num_heads)
70
+ self.k_rms_norm = SparseMultiHeadRMSNorm(channels // num_heads, num_heads)
71
+
72
+ self.to_out = nn.Linear(channels, channels)
73
+
74
+ if use_rope:
75
+ self.rope = RotaryPositionEmbedder(channels)
76
+
77
+ @staticmethod
78
+ def _linear(module: nn.Linear, x: Union[SparseTensor, torch.Tensor]) -> Union[SparseTensor, torch.Tensor]:
79
+ if isinstance(x, SparseTensor):
80
+ return x.replace(module(x.feats))
81
+ else:
82
+ return module(x)
83
+
84
+ @staticmethod
85
+ def _reshape_chs(x: Union[SparseTensor, torch.Tensor], shape: Tuple[int, ...]) -> Union[SparseTensor, torch.Tensor]:
86
+ if isinstance(x, SparseTensor):
87
+ return x.reshape(*shape)
88
+ else:
89
+ return x.reshape(*x.shape[:2], *shape)
90
+
91
+ def _fused_pre(self, x: Union[SparseTensor, torch.Tensor], num_fused: int) -> Union[SparseTensor, torch.Tensor]:
92
+ if isinstance(x, SparseTensor):
93
+ x_feats = x.feats.unsqueeze(0)
94
+ else:
95
+ x_feats = x
96
+ x_feats = x_feats.reshape(*x_feats.shape[:2], num_fused, self.num_heads, -1)
97
+ return x.replace(x_feats.squeeze(0)) if isinstance(x, SparseTensor) else x_feats
98
+
99
+ def _rope(self, qkv: SparseTensor) -> SparseTensor:
100
+ q, k, v = qkv.feats.unbind(dim=1) # [T, H, C]
101
+ q, k = self.rope(q, k, qkv.coords[:, 1:])
102
+ qkv = qkv.replace(torch.stack([q, k, v], dim=1))
103
+ return qkv
104
+
105
+ def forward(self, x: Union[SparseTensor, torch.Tensor], context: Optional[Union[SparseTensor, torch.Tensor]] = None) -> Union[SparseTensor, torch.Tensor]:
106
+ if self._type == "self":
107
+ qkv = self._linear(self.to_qkv, x)
108
+ qkv = self._fused_pre(qkv, num_fused=3)
109
+ if self.use_rope:
110
+ qkv = self._rope(qkv)
111
+ if self.qk_rms_norm:
112
+ q, k, v = qkv.unbind(dim=1)
113
+ q = self.q_rms_norm(q)
114
+ k = self.k_rms_norm(k)
115
+ qkv = qkv.replace(torch.stack([q.feats, k.feats, v.feats], dim=1))
116
+ if self.attn_mode == "full":
117
+ h = sparse_scaled_dot_product_attention(qkv)
118
+ elif self.attn_mode == "serialized":
119
+ h = sparse_serialized_scaled_dot_product_self_attention(
120
+ qkv, self.window_size, serialize_mode=self.serialize_mode, shift_sequence=self.shift_sequence, shift_window=self.shift_window
121
+ )
122
+ elif self.attn_mode == "windowed":
123
+ h = sparse_windowed_scaled_dot_product_self_attention(
124
+ qkv, self.window_size, shift_window=self.shift_window
125
+ )
126
+ else:
127
+ q = self._linear(self.to_q, x)
128
+ q = self._reshape_chs(q, (self.num_heads, -1))
129
+ kv = self._linear(self.to_kv, context)
130
+ kv = self._fused_pre(kv, num_fused=2)
131
+ if self.qk_rms_norm:
132
+ q = self.q_rms_norm(q)
133
+ k, v = kv.unbind(dim=1)
134
+ k = self.k_rms_norm(k)
135
+ kv = kv.replace(torch.stack([k.feats, v.feats], dim=1))
136
+ h = sparse_scaled_dot_product_attention(q, kv)
137
+ h = self._reshape_chs(h, (-1,))
138
+ h = self._linear(self.to_out, h)
139
+ return h
trellis/modules/sparse/attention/serialized_attn.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ from enum import Enum
3
+ import torch
4
+ import math
5
+ from .. import SparseTensor
6
+ from .. import DEBUG, ATTN
7
+
8
+ if ATTN == 'xformers':
9
+ import xformers.ops as xops
10
+ elif ATTN == 'flash_attn':
11
+ import flash_attn
12
+ else:
13
+ raise ValueError(f"Unknown attention module: {ATTN}")
14
+
15
+
16
+ __all__ = [
17
+ 'sparse_serialized_scaled_dot_product_self_attention',
18
+ ]
19
+
20
+
21
+ class SerializeMode(Enum):
22
+ Z_ORDER = 0
23
+ Z_ORDER_TRANSPOSED = 1
24
+ HILBERT = 2
25
+ HILBERT_TRANSPOSED = 3
26
+
27
+
28
+ SerializeModes = [
29
+ SerializeMode.Z_ORDER,
30
+ SerializeMode.Z_ORDER_TRANSPOSED,
31
+ SerializeMode.HILBERT,
32
+ SerializeMode.HILBERT_TRANSPOSED
33
+ ]
34
+
35
+
36
+ def calc_serialization(
37
+ tensor: SparseTensor,
38
+ window_size: int,
39
+ serialize_mode: SerializeMode = SerializeMode.Z_ORDER,
40
+ shift_sequence: int = 0,
41
+ shift_window: Tuple[int, int, int] = (0, 0, 0)
42
+ ) -> Tuple[torch.Tensor, torch.Tensor, List[int]]:
43
+ """
44
+ Calculate serialization and partitioning for a set of coordinates.
45
+
46
+ Args:
47
+ tensor (SparseTensor): The input tensor.
48
+ window_size (int): The window size to use.
49
+ serialize_mode (SerializeMode): The serialization mode to use.
50
+ shift_sequence (int): The shift of serialized sequence.
51
+ shift_window (Tuple[int, int, int]): The shift of serialized coordinates.
52
+
53
+ Returns:
54
+ (torch.Tensor, torch.Tensor): Forwards and backwards indices.
55
+ """
56
+ fwd_indices = []
57
+ bwd_indices = []
58
+ seq_lens = []
59
+ seq_batch_indices = []
60
+ offsets = [0]
61
+
62
+ if 'vox2seq' not in globals():
63
+ import vox2seq
64
+
65
+ # Serialize the input
66
+ serialize_coords = tensor.coords[:, 1:].clone()
67
+ serialize_coords += torch.tensor(shift_window, dtype=torch.int32, device=tensor.device).reshape(1, 3)
68
+ if serialize_mode == SerializeMode.Z_ORDER:
69
+ code = vox2seq.encode(serialize_coords, mode='z_order', permute=[0, 1, 2])
70
+ elif serialize_mode == SerializeMode.Z_ORDER_TRANSPOSED:
71
+ code = vox2seq.encode(serialize_coords, mode='z_order', permute=[1, 0, 2])
72
+ elif serialize_mode == SerializeMode.HILBERT:
73
+ code = vox2seq.encode(serialize_coords, mode='hilbert', permute=[0, 1, 2])
74
+ elif serialize_mode == SerializeMode.HILBERT_TRANSPOSED:
75
+ code = vox2seq.encode(serialize_coords, mode='hilbert', permute=[1, 0, 2])
76
+ else:
77
+ raise ValueError(f"Unknown serialize mode: {serialize_mode}")
78
+
79
+ for bi, s in enumerate(tensor.layout):
80
+ num_points = s.stop - s.start
81
+ num_windows = (num_points + window_size - 1) // window_size
82
+ valid_window_size = num_points / num_windows
83
+ to_ordered = torch.argsort(code[s.start:s.stop])
84
+ if num_windows == 1:
85
+ fwd_indices.append(to_ordered)
86
+ bwd_indices.append(torch.zeros_like(to_ordered).scatter_(0, to_ordered, torch.arange(num_points, device=tensor.device)))
87
+ fwd_indices[-1] += s.start
88
+ bwd_indices[-1] += offsets[-1]
89
+ seq_lens.append(num_points)
90
+ seq_batch_indices.append(bi)
91
+ offsets.append(offsets[-1] + seq_lens[-1])
92
+ else:
93
+ # Partition the input
94
+ offset = 0
95
+ mids = [(i + 0.5) * valid_window_size + shift_sequence for i in range(num_windows)]
96
+ split = [math.floor(i * valid_window_size + shift_sequence) for i in range(num_windows + 1)]
97
+ bwd_index = torch.zeros((num_points,), dtype=torch.int64, device=tensor.device)
98
+ for i in range(num_windows):
99
+ mid = mids[i]
100
+ valid_start = split[i]
101
+ valid_end = split[i + 1]
102
+ padded_start = math.floor(mid - 0.5 * window_size)
103
+ padded_end = padded_start + window_size
104
+ fwd_indices.append(to_ordered[torch.arange(padded_start, padded_end, device=tensor.device) % num_points])
105
+ offset += valid_start - padded_start
106
+ bwd_index.scatter_(0, fwd_indices[-1][valid_start-padded_start:valid_end-padded_start], torch.arange(offset, offset + valid_end - valid_start, device=tensor.device))
107
+ offset += padded_end - valid_start
108
+ fwd_indices[-1] += s.start
109
+ seq_lens.extend([window_size] * num_windows)
110
+ seq_batch_indices.extend([bi] * num_windows)
111
+ bwd_indices.append(bwd_index + offsets[-1])
112
+ offsets.append(offsets[-1] + num_windows * window_size)
113
+
114
+ fwd_indices = torch.cat(fwd_indices)
115
+ bwd_indices = torch.cat(bwd_indices)
116
+
117
+ return fwd_indices, bwd_indices, seq_lens, seq_batch_indices
118
+
119
+
120
+ def sparse_serialized_scaled_dot_product_self_attention(
121
+ qkv: SparseTensor,
122
+ window_size: int,
123
+ serialize_mode: SerializeMode = SerializeMode.Z_ORDER,
124
+ shift_sequence: int = 0,
125
+ shift_window: Tuple[int, int, int] = (0, 0, 0)
126
+ ) -> SparseTensor:
127
+ """
128
+ Apply serialized scaled dot product self attention to a sparse tensor.
129
+
130
+ Args:
131
+ qkv (SparseTensor): [N, *, 3, H, C] sparse tensor containing Qs, Ks, and Vs.
132
+ window_size (int): The window size to use.
133
+ serialize_mode (SerializeMode): The serialization mode to use.
134
+ shift_sequence (int): The shift of serialized sequence.
135
+ shift_window (Tuple[int, int, int]): The shift of serialized coordinates.
136
+ shift (int): The shift to use.
137
+ """
138
+ assert len(qkv.shape) == 4 and qkv.shape[1] == 3, f"Invalid shape for qkv, got {qkv.shape}, expected [N, *, 3, H, C]"
139
+
140
+ serialization_spatial_cache_name = f'serialization_{serialize_mode}_{window_size}_{shift_sequence}_{shift_window}'
141
+ serialization_spatial_cache = qkv.get_spatial_cache(serialization_spatial_cache_name)
142
+ if serialization_spatial_cache is None:
143
+ fwd_indices, bwd_indices, seq_lens, seq_batch_indices = calc_serialization(qkv, window_size, serialize_mode, shift_sequence, shift_window)
144
+ qkv.register_spatial_cache(serialization_spatial_cache_name, (fwd_indices, bwd_indices, seq_lens, seq_batch_indices))
145
+ else:
146
+ fwd_indices, bwd_indices, seq_lens, seq_batch_indices = serialization_spatial_cache
147
+
148
+ M = fwd_indices.shape[0]
149
+ T = qkv.feats.shape[0]
150
+ H = qkv.feats.shape[2]
151
+ C = qkv.feats.shape[3]
152
+
153
+ qkv_feats = qkv.feats[fwd_indices] # [M, 3, H, C]
154
+
155
+ if DEBUG:
156
+ start = 0
157
+ qkv_coords = qkv.coords[fwd_indices]
158
+ for i in range(len(seq_lens)):
159
+ assert (qkv_coords[start:start+seq_lens[i], 0] == seq_batch_indices[i]).all(), f"SparseWindowedScaledDotProductSelfAttention: batch index mismatch"
160
+ start += seq_lens[i]
161
+
162
+ if all([seq_len == window_size for seq_len in seq_lens]):
163
+ B = len(seq_lens)
164
+ N = window_size
165
+ qkv_feats = qkv_feats.reshape(B, N, 3, H, C)
166
+ if ATTN == 'xformers':
167
+ q, k, v = qkv_feats.unbind(dim=2) # [B, N, H, C]
168
+ out = xops.memory_efficient_attention(q, k, v) # [B, N, H, C]
169
+ elif ATTN == 'flash_attn':
170
+ out = flash_attn.flash_attn_qkvpacked_func(qkv_feats) # [B, N, H, C]
171
+ else:
172
+ raise ValueError(f"Unknown attention module: {ATTN}")
173
+ out = out.reshape(B * N, H, C) # [M, H, C]
174
+ else:
175
+ if ATTN == 'xformers':
176
+ q, k, v = qkv_feats.unbind(dim=1) # [M, H, C]
177
+ q = q.unsqueeze(0) # [1, M, H, C]
178
+ k = k.unsqueeze(0) # [1, M, H, C]
179
+ v = v.unsqueeze(0) # [1, M, H, C]
180
+ mask = xops.fmha.BlockDiagonalMask.from_seqlens(seq_lens)
181
+ out = xops.memory_efficient_attention(q, k, v, mask)[0] # [M, H, C]
182
+ elif ATTN == 'flash_attn':
183
+ cu_seqlens = torch.cat([torch.tensor([0]), torch.cumsum(torch.tensor(seq_lens), dim=0)], dim=0) \
184
+ .to(qkv.device).int()
185
+ out = flash_attn.flash_attn_varlen_qkvpacked_func(qkv_feats, cu_seqlens, max(seq_lens)) # [M, H, C]
186
+
187
+ out = out[bwd_indices] # [T, H, C]
188
+
189
+ if DEBUG:
190
+ qkv_coords = qkv_coords[bwd_indices]
191
+ assert torch.equal(qkv_coords, qkv.coords), "SparseWindowedScaledDotProductSelfAttention: coordinate mismatch"
192
+
193
+ return qkv.replace(out)
trellis/modules/sparse/attention/windowed_attn.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import math
4
+ from .. import SparseTensor
5
+ from .. import DEBUG, ATTN
6
+
7
+ if ATTN == 'xformers':
8
+ import xformers.ops as xops
9
+ elif ATTN == 'flash_attn':
10
+ import flash_attn
11
+ else:
12
+ raise ValueError(f"Unknown attention module: {ATTN}")
13
+
14
+
15
+ __all__ = [
16
+ 'sparse_windowed_scaled_dot_product_self_attention',
17
+ ]
18
+
19
+
20
+ def calc_window_partition(
21
+ tensor: SparseTensor,
22
+ window_size: Union[int, Tuple[int, ...]],
23
+ shift_window: Union[int, Tuple[int, ...]] = 0
24
+ ) -> Tuple[torch.Tensor, torch.Tensor, List[int], List[int]]:
25
+ """
26
+ Calculate serialization and partitioning for a set of coordinates.
27
+
28
+ Args:
29
+ tensor (SparseTensor): The input tensor.
30
+ window_size (int): The window size to use.
31
+ shift_window (Tuple[int, ...]): The shift of serialized coordinates.
32
+
33
+ Returns:
34
+ (torch.Tensor): Forwards indices.
35
+ (torch.Tensor): Backwards indices.
36
+ (List[int]): Sequence lengths.
37
+ (List[int]): Sequence batch indices.
38
+ """
39
+ DIM = tensor.coords.shape[1] - 1
40
+ shift_window = (shift_window,) * DIM if isinstance(shift_window, int) else shift_window
41
+ window_size = (window_size,) * DIM if isinstance(window_size, int) else window_size
42
+ shifted_coords = tensor.coords.clone().detach()
43
+ shifted_coords[:, 1:] += torch.tensor(shift_window, device=tensor.device, dtype=torch.int32).unsqueeze(0)
44
+
45
+ MAX_COORDS = shifted_coords[:, 1:].max(dim=0).values.tolist()
46
+ NUM_WINDOWS = [math.ceil((mc + 1) / ws) for mc, ws in zip(MAX_COORDS, window_size)]
47
+ OFFSET = torch.cumprod(torch.tensor([1] + NUM_WINDOWS[::-1]), dim=0).tolist()[::-1]
48
+
49
+ shifted_coords[:, 1:] //= torch.tensor(window_size, device=tensor.device, dtype=torch.int32).unsqueeze(0)
50
+ shifted_indices = (shifted_coords * torch.tensor(OFFSET, device=tensor.device, dtype=torch.int32).unsqueeze(0)).sum(dim=1)
51
+ fwd_indices = torch.argsort(shifted_indices)
52
+ bwd_indices = torch.empty_like(fwd_indices)
53
+ bwd_indices[fwd_indices] = torch.arange(fwd_indices.shape[0], device=tensor.device)
54
+ seq_lens = torch.bincount(shifted_indices)
55
+ seq_batch_indices = torch.arange(seq_lens.shape[0], device=tensor.device, dtype=torch.int32) // OFFSET[0]
56
+ mask = seq_lens != 0
57
+ seq_lens = seq_lens[mask].tolist()
58
+ seq_batch_indices = seq_batch_indices[mask].tolist()
59
+
60
+ return fwd_indices, bwd_indices, seq_lens, seq_batch_indices
61
+
62
+
63
+ def sparse_windowed_scaled_dot_product_self_attention(
64
+ qkv: SparseTensor,
65
+ window_size: int,
66
+ shift_window: Tuple[int, int, int] = (0, 0, 0)
67
+ ) -> SparseTensor:
68
+ """
69
+ Apply windowed scaled dot product self attention to a sparse tensor.
70
+
71
+ Args:
72
+ qkv (SparseTensor): [N, *, 3, H, C] sparse tensor containing Qs, Ks, and Vs.
73
+ window_size (int): The window size to use.
74
+ shift_window (Tuple[int, int, int]): The shift of serialized coordinates.
75
+ shift (int): The shift to use.
76
+ """
77
+ assert len(qkv.shape) == 4 and qkv.shape[1] == 3, f"Invalid shape for qkv, got {qkv.shape}, expected [N, *, 3, H, C]"
78
+
79
+ serialization_spatial_cache_name = f'window_partition_{window_size}_{shift_window}'
80
+ serialization_spatial_cache = qkv.get_spatial_cache(serialization_spatial_cache_name)
81
+ if serialization_spatial_cache is None:
82
+ fwd_indices, bwd_indices, seq_lens, seq_batch_indices = calc_window_partition(qkv, window_size, shift_window)
83
+ qkv.register_spatial_cache(serialization_spatial_cache_name, (fwd_indices, bwd_indices, seq_lens, seq_batch_indices))
84
+ else:
85
+ fwd_indices, bwd_indices, seq_lens, seq_batch_indices = serialization_spatial_cache
86
+
87
+ M = fwd_indices.shape[0]
88
+ T = qkv.feats.shape[0]
89
+ H = qkv.feats.shape[2]
90
+ C = qkv.feats.shape[3]
91
+
92
+ qkv_feats = qkv.feats[fwd_indices] # [M, 3, H, C]
93
+
94
+ if DEBUG:
95
+ start = 0
96
+ qkv_coords = qkv.coords[fwd_indices]
97
+ for i in range(len(seq_lens)):
98
+ seq_coords = qkv_coords[start:start+seq_lens[i]]
99
+ assert (seq_coords[:, 0] == seq_batch_indices[i]).all(), f"SparseWindowedScaledDotProductSelfAttention: batch index mismatch"
100
+ assert (seq_coords[:, 1:].max(dim=0).values - seq_coords[:, 1:].min(dim=0).values < window_size).all(), \
101
+ f"SparseWindowedScaledDotProductSelfAttention: window size exceeded"
102
+ start += seq_lens[i]
103
+
104
+ if all([seq_len == window_size for seq_len in seq_lens]):
105
+ B = len(seq_lens)
106
+ N = window_size
107
+ qkv_feats = qkv_feats.reshape(B, N, 3, H, C)
108
+ if ATTN == 'xformers':
109
+ q, k, v = qkv_feats.unbind(dim=2) # [B, N, H, C]
110
+ out = xops.memory_efficient_attention(q, k, v) # [B, N, H, C]
111
+ elif ATTN == 'flash_attn':
112
+ out = flash_attn.flash_attn_qkvpacked_func(qkv_feats) # [B, N, H, C]
113
+ else:
114
+ raise ValueError(f"Unknown attention module: {ATTN}")
115
+ out = out.reshape(B * N, H, C) # [M, H, C]
116
+ else:
117
+ if ATTN == 'xformers':
118
+ q, k, v = qkv_feats.unbind(dim=1) # [M, H, C]
119
+ q = q.unsqueeze(0) # [1, M, H, C]
120
+ k = k.unsqueeze(0) # [1, M, H, C]
121
+ v = v.unsqueeze(0) # [1, M, H, C]
122
+ mask = xops.fmha.BlockDiagonalMask.from_seqlens(seq_lens)
123
+ out = xops.memory_efficient_attention(q, k, v, mask)[0] # [M, H, C]
124
+ elif ATTN == 'flash_attn':
125
+ cu_seqlens = torch.cat([torch.tensor([0]), torch.cumsum(torch.tensor(seq_lens), dim=0)], dim=0) \
126
+ .to(qkv.device).int()
127
+ out = flash_attn.flash_attn_varlen_qkvpacked_func(qkv_feats, cu_seqlens, max(seq_lens)) # [M, H, C]
128
+
129
+ out = out[bwd_indices] # [T, H, C]
130
+
131
+ if DEBUG:
132
+ qkv_coords = qkv_coords[bwd_indices]
133
+ assert torch.equal(qkv_coords, qkv.coords), "SparseWindowedScaledDotProductSelfAttention: coordinate mismatch"
134
+
135
+ return qkv.replace(out)
trellis/modules/sparse/basic.py ADDED
@@ -0,0 +1,459 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ from . import BACKEND, DEBUG
5
+ SparseTensorData = None # Lazy import
6
+
7
+
8
+ __all__ = [
9
+ 'SparseTensor',
10
+ 'sparse_batch_broadcast',
11
+ 'sparse_batch_op',
12
+ 'sparse_cat',
13
+ 'sparse_unbind',
14
+ ]
15
+
16
+
17
+ class SparseTensor:
18
+ """
19
+ Sparse tensor with support for both torchsparse and spconv backends.
20
+
21
+ Parameters:
22
+ - feats (torch.Tensor): Features of the sparse tensor.
23
+ - coords (torch.Tensor): Coordinates of the sparse tensor.
24
+ - shape (torch.Size): Shape of the sparse tensor.
25
+ - layout (List[slice]): Layout of the sparse tensor for each batch
26
+ - data (SparseTensorData): Sparse tensor data used for convolusion
27
+
28
+ NOTE:
29
+ - Data corresponding to a same batch should be contiguous.
30
+ - Coords should be in [0, 1023]
31
+ """
32
+ @overload
33
+ def __init__(self, feats: torch.Tensor, coords: torch.Tensor, shape: Optional[torch.Size] = None, layout: Optional[List[slice]] = None, **kwargs): ...
34
+
35
+ @overload
36
+ def __init__(self, data, shape: Optional[torch.Size] = None, layout: Optional[List[slice]] = None, **kwargs): ...
37
+
38
+ def __init__(self, *args, **kwargs):
39
+ # Lazy import of sparse tensor backend
40
+ global SparseTensorData
41
+ if SparseTensorData is None:
42
+ import importlib
43
+ if BACKEND == 'torchsparse':
44
+ SparseTensorData = importlib.import_module('torchsparse').SparseTensor
45
+ elif BACKEND == 'spconv':
46
+ SparseTensorData = importlib.import_module('spconv.pytorch').SparseConvTensor
47
+
48
+ method_id = 0
49
+ if len(args) != 0:
50
+ method_id = 0 if isinstance(args[0], torch.Tensor) else 1
51
+ else:
52
+ method_id = 1 if 'data' in kwargs else 0
53
+
54
+ if method_id == 0:
55
+ feats, coords, shape, layout = args + (None,) * (4 - len(args))
56
+ if 'feats' in kwargs:
57
+ feats = kwargs['feats']
58
+ del kwargs['feats']
59
+ if 'coords' in kwargs:
60
+ coords = kwargs['coords']
61
+ del kwargs['coords']
62
+ if 'shape' in kwargs:
63
+ shape = kwargs['shape']
64
+ del kwargs['shape']
65
+ if 'layout' in kwargs:
66
+ layout = kwargs['layout']
67
+ del kwargs['layout']
68
+
69
+ if shape is None:
70
+ shape = self.__cal_shape(feats, coords)
71
+ if layout is None:
72
+ layout = self.__cal_layout(coords, shape[0])
73
+ if BACKEND == 'torchsparse':
74
+ self.data = SparseTensorData(feats, coords, **kwargs)
75
+ elif BACKEND == 'spconv':
76
+ spatial_shape = list(coords.max(0)[0] + 1)[1:]
77
+ self.data = SparseTensorData(feats.reshape(feats.shape[0], -1), coords, spatial_shape, shape[0], **kwargs)
78
+ self.data._features = feats
79
+ elif method_id == 1:
80
+ data, shape, layout = args + (None,) * (3 - len(args))
81
+ if 'data' in kwargs:
82
+ data = kwargs['data']
83
+ del kwargs['data']
84
+ if 'shape' in kwargs:
85
+ shape = kwargs['shape']
86
+ del kwargs['shape']
87
+ if 'layout' in kwargs:
88
+ layout = kwargs['layout']
89
+ del kwargs['layout']
90
+
91
+ self.data = data
92
+ if shape is None:
93
+ shape = self.__cal_shape(self.feats, self.coords)
94
+ if layout is None:
95
+ layout = self.__cal_layout(self.coords, shape[0])
96
+
97
+ self._shape = shape
98
+ self._layout = layout
99
+ self._scale = kwargs.get('scale', (1, 1, 1))
100
+ self._spatial_cache = kwargs.get('spatial_cache', {})
101
+
102
+ if DEBUG:
103
+ try:
104
+ assert self.feats.shape[0] == self.coords.shape[0], f"Invalid feats shape: {self.feats.shape}, coords shape: {self.coords.shape}"
105
+ assert self.shape == self.__cal_shape(self.feats, self.coords), f"Invalid shape: {self.shape}"
106
+ assert self.layout == self.__cal_layout(self.coords, self.shape[0]), f"Invalid layout: {self.layout}"
107
+ for i in range(self.shape[0]):
108
+ assert torch.all(self.coords[self.layout[i], 0] == i), f"The data of batch {i} is not contiguous"
109
+ except Exception as e:
110
+ print('Debugging information:')
111
+ print(f"- Shape: {self.shape}")
112
+ print(f"- Layout: {self.layout}")
113
+ print(f"- Scale: {self._scale}")
114
+ print(f"- Coords: {self.coords}")
115
+ raise e
116
+
117
+ def __cal_shape(self, feats, coords):
118
+ shape = []
119
+ shape.append(coords[:, 0].max().item() + 1)
120
+ shape.extend([*feats.shape[1:]])
121
+ return torch.Size(shape)
122
+
123
+ def __cal_layout(self, coords, batch_size):
124
+ seq_len = torch.bincount(coords[:, 0], minlength=batch_size)
125
+ offset = torch.cumsum(seq_len, dim=0)
126
+ layout = [slice((offset[i] - seq_len[i]).item(), offset[i].item()) for i in range(batch_size)]
127
+ return layout
128
+
129
+ @property
130
+ def shape(self) -> torch.Size:
131
+ return self._shape
132
+
133
+ def dim(self) -> int:
134
+ return len(self.shape)
135
+
136
+ @property
137
+ def layout(self) -> List[slice]:
138
+ return self._layout
139
+
140
+ @property
141
+ def feats(self) -> torch.Tensor:
142
+ if BACKEND == 'torchsparse':
143
+ return self.data.F
144
+ elif BACKEND == 'spconv':
145
+ return self.data.features
146
+
147
+ @feats.setter
148
+ def feats(self, value: torch.Tensor):
149
+ if BACKEND == 'torchsparse':
150
+ self.data.F = value
151
+ elif BACKEND == 'spconv':
152
+ self.data.features = value
153
+
154
+ @property
155
+ def coords(self) -> torch.Tensor:
156
+ if BACKEND == 'torchsparse':
157
+ return self.data.C
158
+ elif BACKEND == 'spconv':
159
+ return self.data.indices
160
+
161
+ @coords.setter
162
+ def coords(self, value: torch.Tensor):
163
+ if BACKEND == 'torchsparse':
164
+ self.data.C = value
165
+ elif BACKEND == 'spconv':
166
+ self.data.indices = value
167
+
168
+ @property
169
+ def dtype(self):
170
+ return self.feats.dtype
171
+
172
+ @property
173
+ def device(self):
174
+ return self.feats.device
175
+
176
+ @overload
177
+ def to(self, dtype: torch.dtype) -> 'SparseTensor': ...
178
+
179
+ @overload
180
+ def to(self, device: Optional[Union[str, torch.device]] = None, dtype: Optional[torch.dtype] = None) -> 'SparseTensor': ...
181
+
182
+ def to(self, *args, **kwargs) -> 'SparseTensor':
183
+ device = None
184
+ dtype = None
185
+ if len(args) == 2:
186
+ device, dtype = args
187
+ elif len(args) == 1:
188
+ if isinstance(args[0], torch.dtype):
189
+ dtype = args[0]
190
+ else:
191
+ device = args[0]
192
+ if 'dtype' in kwargs:
193
+ assert dtype is None, "to() received multiple values for argument 'dtype'"
194
+ dtype = kwargs['dtype']
195
+ if 'device' in kwargs:
196
+ assert device is None, "to() received multiple values for argument 'device'"
197
+ device = kwargs['device']
198
+
199
+ new_feats = self.feats.to(device=device, dtype=dtype)
200
+ new_coords = self.coords.to(device=device)
201
+ return self.replace(new_feats, new_coords)
202
+
203
+ def type(self, dtype):
204
+ new_feats = self.feats.type(dtype)
205
+ return self.replace(new_feats)
206
+
207
+ def cpu(self) -> 'SparseTensor':
208
+ new_feats = self.feats.cpu()
209
+ new_coords = self.coords.cpu()
210
+ return self.replace(new_feats, new_coords)
211
+
212
+ def cuda(self) -> 'SparseTensor':
213
+ new_feats = self.feats.cuda()
214
+ new_coords = self.coords.cuda()
215
+ return self.replace(new_feats, new_coords)
216
+
217
+ def half(self) -> 'SparseTensor':
218
+ new_feats = self.feats.half()
219
+ return self.replace(new_feats)
220
+
221
+ def float(self) -> 'SparseTensor':
222
+ new_feats = self.feats.float()
223
+ return self.replace(new_feats)
224
+
225
+ def detach(self) -> 'SparseTensor':
226
+ new_coords = self.coords.detach()
227
+ new_feats = self.feats.detach()
228
+ return self.replace(new_feats, new_coords)
229
+
230
+ def dense(self) -> torch.Tensor:
231
+ if BACKEND == 'torchsparse':
232
+ return self.data.dense()
233
+ elif BACKEND == 'spconv':
234
+ return self.data.dense()
235
+
236
+ def reshape(self, *shape) -> 'SparseTensor':
237
+ new_feats = self.feats.reshape(self.feats.shape[0], *shape)
238
+ return self.replace(new_feats)
239
+
240
+ def unbind(self, dim: int) -> List['SparseTensor']:
241
+ return sparse_unbind(self, dim)
242
+
243
+ def replace(self, feats: torch.Tensor, coords: Optional[torch.Tensor] = None) -> 'SparseTensor':
244
+ new_shape = [self.shape[0]]
245
+ new_shape.extend(feats.shape[1:])
246
+ if BACKEND == 'torchsparse':
247
+ new_data = SparseTensorData(
248
+ feats=feats,
249
+ coords=self.data.coords if coords is None else coords,
250
+ stride=self.data.stride,
251
+ spatial_range=self.data.spatial_range,
252
+ )
253
+ new_data._caches = self.data._caches
254
+ elif BACKEND == 'spconv':
255
+ new_data = SparseTensorData(
256
+ self.data.features.reshape(self.data.features.shape[0], -1),
257
+ self.data.indices,
258
+ self.data.spatial_shape,
259
+ self.data.batch_size,
260
+ self.data.grid,
261
+ self.data.voxel_num,
262
+ self.data.indice_dict
263
+ )
264
+ new_data._features = feats
265
+ new_data.benchmark = self.data.benchmark
266
+ new_data.benchmark_record = self.data.benchmark_record
267
+ new_data.thrust_allocator = self.data.thrust_allocator
268
+ new_data._timer = self.data._timer
269
+ new_data.force_algo = self.data.force_algo
270
+ new_data.int8_scale = self.data.int8_scale
271
+ if coords is not None:
272
+ new_data.indices = coords
273
+ new_tensor = SparseTensor(new_data, shape=torch.Size(new_shape), layout=self.layout, scale=self._scale, spatial_cache=self._spatial_cache)
274
+ return new_tensor
275
+
276
+ @staticmethod
277
+ def full(aabb, dim, value, dtype=torch.float32, device=None) -> 'SparseTensor':
278
+ N, C = dim
279
+ x = torch.arange(aabb[0], aabb[3] + 1)
280
+ y = torch.arange(aabb[1], aabb[4] + 1)
281
+ z = torch.arange(aabb[2], aabb[5] + 1)
282
+ coords = torch.stack(torch.meshgrid(x, y, z, indexing='ij'), dim=-1).reshape(-1, 3)
283
+ coords = torch.cat([
284
+ torch.arange(N).view(-1, 1).repeat(1, coords.shape[0]).view(-1, 1),
285
+ coords.repeat(N, 1),
286
+ ], dim=1).to(dtype=torch.int32, device=device)
287
+ feats = torch.full((coords.shape[0], C), value, dtype=dtype, device=device)
288
+ return SparseTensor(feats=feats, coords=coords)
289
+
290
+ def __merge_sparse_cache(self, other: 'SparseTensor') -> dict:
291
+ new_cache = {}
292
+ for k in set(list(self._spatial_cache.keys()) + list(other._spatial_cache.keys())):
293
+ if k in self._spatial_cache:
294
+ new_cache[k] = self._spatial_cache[k]
295
+ if k in other._spatial_cache:
296
+ if k not in new_cache:
297
+ new_cache[k] = other._spatial_cache[k]
298
+ else:
299
+ new_cache[k].update(other._spatial_cache[k])
300
+ return new_cache
301
+
302
+ def __neg__(self) -> 'SparseTensor':
303
+ return self.replace(-self.feats)
304
+
305
+ def __elemwise__(self, other: Union[torch.Tensor, 'SparseTensor'], op: callable) -> 'SparseTensor':
306
+ if isinstance(other, torch.Tensor):
307
+ try:
308
+ other = torch.broadcast_to(other, self.shape)
309
+ other = sparse_batch_broadcast(self, other)
310
+ except:
311
+ pass
312
+ if isinstance(other, SparseTensor):
313
+ other = other.feats
314
+ new_feats = op(self.feats, other)
315
+ new_tensor = self.replace(new_feats)
316
+ if isinstance(other, SparseTensor):
317
+ new_tensor._spatial_cache = self.__merge_sparse_cache(other)
318
+ return new_tensor
319
+
320
+ def __add__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
321
+ return self.__elemwise__(other, torch.add)
322
+
323
+ def __radd__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
324
+ return self.__elemwise__(other, torch.add)
325
+
326
+ def __sub__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
327
+ return self.__elemwise__(other, torch.sub)
328
+
329
+ def __rsub__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
330
+ return self.__elemwise__(other, lambda x, y: torch.sub(y, x))
331
+
332
+ def __mul__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
333
+ return self.__elemwise__(other, torch.mul)
334
+
335
+ def __rmul__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
336
+ return self.__elemwise__(other, torch.mul)
337
+
338
+ def __truediv__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
339
+ return self.__elemwise__(other, torch.div)
340
+
341
+ def __rtruediv__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
342
+ return self.__elemwise__(other, lambda x, y: torch.div(y, x))
343
+
344
+ def __getitem__(self, idx):
345
+ if isinstance(idx, int):
346
+ idx = [idx]
347
+ elif isinstance(idx, slice):
348
+ idx = range(*idx.indices(self.shape[0]))
349
+ elif isinstance(idx, torch.Tensor):
350
+ if idx.dtype == torch.bool:
351
+ assert idx.shape == (self.shape[0],), f"Invalid index shape: {idx.shape}"
352
+ idx = idx.nonzero().squeeze(1)
353
+ elif idx.dtype in [torch.int32, torch.int64]:
354
+ assert len(idx.shape) == 1, f"Invalid index shape: {idx.shape}"
355
+ else:
356
+ raise ValueError(f"Unknown index type: {idx.dtype}")
357
+ else:
358
+ raise ValueError(f"Unknown index type: {type(idx)}")
359
+
360
+ coords = []
361
+ feats = []
362
+ for new_idx, old_idx in enumerate(idx):
363
+ coords.append(self.coords[self.layout[old_idx]].clone())
364
+ coords[-1][:, 0] = new_idx
365
+ feats.append(self.feats[self.layout[old_idx]])
366
+ coords = torch.cat(coords, dim=0).contiguous()
367
+ feats = torch.cat(feats, dim=0).contiguous()
368
+ return SparseTensor(feats=feats, coords=coords)
369
+
370
+ def register_spatial_cache(self, key, value) -> None:
371
+ """
372
+ Register a spatial cache.
373
+ The spatial cache can be any thing you want to cache.
374
+ The registery and retrieval of the cache is based on current scale.
375
+ """
376
+ scale_key = str(self._scale)
377
+ if scale_key not in self._spatial_cache:
378
+ self._spatial_cache[scale_key] = {}
379
+ self._spatial_cache[scale_key][key] = value
380
+
381
+ def get_spatial_cache(self, key=None):
382
+ """
383
+ Get a spatial cache.
384
+ """
385
+ scale_key = str(self._scale)
386
+ cur_scale_cache = self._spatial_cache.get(scale_key, {})
387
+ if key is None:
388
+ return cur_scale_cache
389
+ return cur_scale_cache.get(key, None)
390
+
391
+
392
+ def sparse_batch_broadcast(input: SparseTensor, other: torch.Tensor) -> torch.Tensor:
393
+ """
394
+ Broadcast a 1D tensor to a sparse tensor along the batch dimension then perform an operation.
395
+
396
+ Args:
397
+ input (torch.Tensor): 1D tensor to broadcast.
398
+ target (SparseTensor): Sparse tensor to broadcast to.
399
+ op (callable): Operation to perform after broadcasting. Defaults to torch.add.
400
+ """
401
+ coords, feats = input.coords, input.feats
402
+ broadcasted = torch.zeros_like(feats)
403
+ for k in range(input.shape[0]):
404
+ broadcasted[input.layout[k]] = other[k]
405
+ return broadcasted
406
+
407
+
408
+ def sparse_batch_op(input: SparseTensor, other: torch.Tensor, op: callable = torch.add) -> SparseTensor:
409
+ """
410
+ Broadcast a 1D tensor to a sparse tensor along the batch dimension then perform an operation.
411
+
412
+ Args:
413
+ input (torch.Tensor): 1D tensor to broadcast.
414
+ target (SparseTensor): Sparse tensor to broadcast to.
415
+ op (callable): Operation to perform after broadcasting. Defaults to torch.add.
416
+ """
417
+ return input.replace(op(input.feats, sparse_batch_broadcast(input, other)))
418
+
419
+
420
+ def sparse_cat(inputs: List[SparseTensor], dim: int = 0) -> SparseTensor:
421
+ """
422
+ Concatenate a list of sparse tensors.
423
+
424
+ Args:
425
+ inputs (List[SparseTensor]): List of sparse tensors to concatenate.
426
+ """
427
+ if dim == 0:
428
+ start = 0
429
+ coords = []
430
+ for input in inputs:
431
+ coords.append(input.coords.clone())
432
+ coords[-1][:, 0] += start
433
+ start += input.shape[0]
434
+ coords = torch.cat(coords, dim=0)
435
+ feats = torch.cat([input.feats for input in inputs], dim=0)
436
+ output = SparseTensor(
437
+ coords=coords,
438
+ feats=feats,
439
+ )
440
+ else:
441
+ feats = torch.cat([input.feats for input in inputs], dim=dim)
442
+ output = inputs[0].replace(feats)
443
+
444
+ return output
445
+
446
+
447
+ def sparse_unbind(input: SparseTensor, dim: int) -> List[SparseTensor]:
448
+ """
449
+ Unbind a sparse tensor along a dimension.
450
+
451
+ Args:
452
+ input (SparseTensor): Sparse tensor to unbind.
453
+ dim (int): Dimension to unbind.
454
+ """
455
+ if dim == 0:
456
+ return [input[i] for i in range(input.shape[0])]
457
+ else:
458
+ feats = input.feats.unbind(dim)
459
+ return [input.replace(f) for f in feats]
trellis/modules/sparse/conv/__init__.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .. import BACKEND
2
+
3
+
4
+ SPCONV_ALGO = 'auto' # 'auto', 'implicit_gemm', 'native'
5
+
6
+ def __from_env():
7
+ import os
8
+
9
+ global SPCONV_ALGO
10
+ env_spconv_algo = os.environ.get('SPCONV_ALGO')
11
+ if env_spconv_algo is not None and env_spconv_algo in ['auto', 'implicit_gemm', 'native']:
12
+ SPCONV_ALGO = env_spconv_algo
13
+ print(f"[SPARSE][CONV] spconv algo: {SPCONV_ALGO}")
14
+
15
+
16
+ __from_env()
17
+
18
+ if BACKEND == 'torchsparse':
19
+ from .conv_torchsparse import *
20
+ elif BACKEND == 'spconv':
21
+ from .conv_spconv import *
trellis/modules/sparse/conv/conv_spconv.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from .. import SparseTensor
4
+ from .. import DEBUG
5
+ from . import SPCONV_ALGO
6
+
7
+ class SparseConv3d(nn.Module):
8
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, padding=None, bias=True, indice_key=None):
9
+ super(SparseConv3d, self).__init__()
10
+ if 'spconv' not in globals():
11
+ import spconv.pytorch as spconv
12
+ algo = None
13
+ if SPCONV_ALGO == 'native':
14
+ algo = spconv.ConvAlgo.Native
15
+ elif SPCONV_ALGO == 'implicit_gemm':
16
+ algo = spconv.ConvAlgo.MaskImplicitGemm
17
+ if stride == 1 and (padding is None):
18
+ self.conv = spconv.SubMConv3d(in_channels, out_channels, kernel_size, dilation=dilation, bias=bias, indice_key=indice_key, algo=algo)
19
+ else:
20
+ self.conv = spconv.SparseConv3d(in_channels, out_channels, kernel_size, stride=stride, dilation=dilation, padding=padding, bias=bias, indice_key=indice_key, algo=algo)
21
+ self.stride = tuple(stride) if isinstance(stride, (list, tuple)) else (stride, stride, stride)
22
+ self.padding = padding
23
+
24
+ def forward(self, x: SparseTensor) -> SparseTensor:
25
+ spatial_changed = any(s != 1 for s in self.stride) or (self.padding is not None)
26
+ new_data = self.conv(x.data)
27
+ new_shape = [x.shape[0], self.conv.out_channels]
28
+ new_layout = None if spatial_changed else x.layout
29
+
30
+ if spatial_changed and (x.shape[0] != 1):
31
+ # spconv was non-1 stride will break the contiguous of the output tensor, sort by the coords
32
+ fwd = new_data.indices[:, 0].argsort()
33
+ bwd = torch.zeros_like(fwd).scatter_(0, fwd, torch.arange(fwd.shape[0], device=fwd.device))
34
+ sorted_feats = new_data.features[fwd]
35
+ sorted_coords = new_data.indices[fwd]
36
+ unsorted_data = new_data
37
+ new_data = spconv.SparseConvTensor(sorted_feats, sorted_coords, unsorted_data.spatial_shape, unsorted_data.batch_size) # type: ignore
38
+
39
+ out = SparseTensor(
40
+ new_data, shape=torch.Size(new_shape), layout=new_layout,
41
+ scale=tuple([s * stride for s, stride in zip(x._scale, self.stride)]),
42
+ spatial_cache=x._spatial_cache,
43
+ )
44
+
45
+ if spatial_changed and (x.shape[0] != 1):
46
+ out.register_spatial_cache(f'conv_{self.stride}_unsorted_data', unsorted_data)
47
+ out.register_spatial_cache(f'conv_{self.stride}_sort_bwd', bwd)
48
+
49
+ return out
50
+
51
+
52
+ class SparseInverseConv3d(nn.Module):
53
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, bias=True, indice_key=None):
54
+ super(SparseInverseConv3d, self).__init__()
55
+ if 'spconv' not in globals():
56
+ import spconv.pytorch as spconv
57
+ self.conv = spconv.SparseInverseConv3d(in_channels, out_channels, kernel_size, bias=bias, indice_key=indice_key)
58
+ self.stride = tuple(stride) if isinstance(stride, (list, tuple)) else (stride, stride, stride)
59
+
60
+ def forward(self, x: SparseTensor) -> SparseTensor:
61
+ spatial_changed = any(s != 1 for s in self.stride)
62
+ if spatial_changed:
63
+ # recover the original spconv order
64
+ data = x.get_spatial_cache(f'conv_{self.stride}_unsorted_data')
65
+ bwd = x.get_spatial_cache(f'conv_{self.stride}_sort_bwd')
66
+ data = data.replace_feature(x.feats[bwd])
67
+ if DEBUG:
68
+ assert torch.equal(data.indices, x.coords[bwd]), 'Recover the original order failed'
69
+ else:
70
+ data = x.data
71
+
72
+ new_data = self.conv(data)
73
+ new_shape = [x.shape[0], self.conv.out_channels]
74
+ new_layout = None if spatial_changed else x.layout
75
+ out = SparseTensor(
76
+ new_data, shape=torch.Size(new_shape), layout=new_layout,
77
+ scale=tuple([s // stride for s, stride in zip(x._scale, self.stride)]),
78
+ spatial_cache=x._spatial_cache,
79
+ )
80
+ return out
trellis/modules/sparse/conv/conv_torchsparse.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from .. import SparseTensor
4
+
5
+
6
+ class SparseConv3d(nn.Module):
7
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, bias=True, indice_key=None):
8
+ super(SparseConv3d, self).__init__()
9
+ if 'torchsparse' not in globals():
10
+ import torchsparse
11
+ self.conv = torchsparse.nn.Conv3d(in_channels, out_channels, kernel_size, stride, 0, dilation, bias)
12
+
13
+ def forward(self, x: SparseTensor) -> SparseTensor:
14
+ out = self.conv(x.data)
15
+ new_shape = [x.shape[0], self.conv.out_channels]
16
+ out = SparseTensor(out, shape=torch.Size(new_shape), layout=x.layout if all(s == 1 for s in self.conv.stride) else None)
17
+ out._spatial_cache = x._spatial_cache
18
+ out._scale = tuple([s * stride for s, stride in zip(x._scale, self.conv.stride)])
19
+ return out
20
+
21
+
22
+ class SparseInverseConv3d(nn.Module):
23
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, bias=True, indice_key=None):
24
+ super(SparseInverseConv3d, self).__init__()
25
+ if 'torchsparse' not in globals():
26
+ import torchsparse
27
+ self.conv = torchsparse.nn.Conv3d(in_channels, out_channels, kernel_size, stride, 0, dilation, bias, transposed=True)
28
+
29
+ def forward(self, x: SparseTensor) -> SparseTensor:
30
+ out = self.conv(x.data)
31
+ new_shape = [x.shape[0], self.conv.out_channels]
32
+ out = SparseTensor(out, shape=torch.Size(new_shape), layout=x.layout if all(s == 1 for s in self.conv.stride) else None)
33
+ out._spatial_cache = x._spatial_cache
34
+ out._scale = tuple([s // stride for s, stride in zip(x._scale, self.conv.stride)])
35
+ return out
36
+
37
+
38
+
trellis/modules/sparse/linear.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from . import SparseTensor
4
+
5
+ __all__ = [
6
+ 'SparseLinear'
7
+ ]
8
+
9
+
10
+ class SparseLinear(nn.Linear):
11
+ def __init__(self, in_features, out_features, bias=True):
12
+ super(SparseLinear, self).__init__(in_features, out_features, bias)
13
+
14
+ def forward(self, input: SparseTensor) -> SparseTensor:
15
+ return input.replace(super().forward(input.feats))
trellis/modules/sparse/nonlinearity.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from . import SparseTensor
4
+
5
+ __all__ = [
6
+ 'SparseReLU',
7
+ 'SparseSiLU',
8
+ 'SparseGELU',
9
+ 'SparseActivation'
10
+ ]
11
+
12
+
13
+ class SparseReLU(nn.ReLU):
14
+ def forward(self, input: SparseTensor) -> SparseTensor:
15
+ return input.replace(super().forward(input.feats))
16
+
17
+
18
+ class SparseSiLU(nn.SiLU):
19
+ def forward(self, input: SparseTensor) -> SparseTensor:
20
+ return input.replace(super().forward(input.feats))
21
+
22
+
23
+ class SparseGELU(nn.GELU):
24
+ def forward(self, input: SparseTensor) -> SparseTensor:
25
+ return input.replace(super().forward(input.feats))
26
+
27
+
28
+ class SparseActivation(nn.Module):
29
+ def __init__(self, activation: nn.Module):
30
+ super().__init__()
31
+ self.activation = activation
32
+
33
+ def forward(self, input: SparseTensor) -> SparseTensor:
34
+ return input.replace(self.activation(input.feats))
35
+
trellis/modules/sparse/norm.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from . import SparseTensor
4
+ from . import DEBUG
5
+
6
+ __all__ = [
7
+ 'SparseGroupNorm',
8
+ 'SparseLayerNorm',
9
+ 'SparseGroupNorm32',
10
+ 'SparseLayerNorm32',
11
+ ]
12
+
13
+
14
+ class SparseGroupNorm(nn.GroupNorm):
15
+ def __init__(self, num_groups, num_channels, eps=1e-5, affine=True):
16
+ super(SparseGroupNorm, self).__init__(num_groups, num_channels, eps, affine)
17
+
18
+ def forward(self, input: SparseTensor) -> SparseTensor:
19
+ nfeats = torch.zeros_like(input.feats)
20
+ for k in range(input.shape[0]):
21
+ if DEBUG:
22
+ assert (input.coords[input.layout[k], 0] == k).all(), f"SparseGroupNorm: batch index mismatch"
23
+ bfeats = input.feats[input.layout[k]]
24
+ bfeats = bfeats.permute(1, 0).reshape(1, input.shape[1], -1)
25
+ bfeats = super().forward(bfeats)
26
+ bfeats = bfeats.reshape(input.shape[1], -1).permute(1, 0)
27
+ nfeats[input.layout[k]] = bfeats
28
+ return input.replace(nfeats)
29
+
30
+
31
+ class SparseLayerNorm(nn.LayerNorm):
32
+ def __init__(self, normalized_shape, eps=1e-5, elementwise_affine=True):
33
+ super(SparseLayerNorm, self).__init__(normalized_shape, eps, elementwise_affine)
34
+
35
+ def forward(self, input: SparseTensor) -> SparseTensor:
36
+ nfeats = torch.zeros_like(input.feats)
37
+ for k in range(input.shape[0]):
38
+ bfeats = input.feats[input.layout[k]]
39
+ bfeats = bfeats.permute(1, 0).reshape(1, input.shape[1], -1)
40
+ bfeats = super().forward(bfeats)
41
+ bfeats = bfeats.reshape(input.shape[1], -1).permute(1, 0)
42
+ nfeats[input.layout[k]] = bfeats
43
+ return input.replace(nfeats)
44
+
45
+
46
+ class SparseGroupNorm32(SparseGroupNorm):
47
+ """
48
+ A GroupNorm layer that converts to float32 before the forward pass.
49
+ """
50
+ def forward(self, x: SparseTensor) -> SparseTensor:
51
+ return super().forward(x.float()).type(x.dtype)
52
+
53
+ class SparseLayerNorm32(SparseLayerNorm):
54
+ """
55
+ A LayerNorm layer that converts to float32 before the forward pass.
56
+ """
57
+ def forward(self, x: SparseTensor) -> SparseTensor:
58
+ return super().forward(x.float()).type(x.dtype)
trellis/modules/sparse/spatial.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ from . import SparseTensor
5
+
6
+ __all__ = [
7
+ 'SparseDownsample',
8
+ 'SparseUpsample',
9
+ 'SparseSubdivide'
10
+ ]
11
+
12
+
13
+ class SparseDownsample(nn.Module):
14
+ """
15
+ Downsample a sparse tensor by a factor of `factor`.
16
+ Implemented as average pooling.
17
+ """
18
+ def __init__(self, factor: Union[int, Tuple[int, ...], List[int]]):
19
+ super(SparseDownsample, self).__init__()
20
+ self.factor = tuple(factor) if isinstance(factor, (list, tuple)) else factor
21
+
22
+ def forward(self, input: SparseTensor) -> SparseTensor:
23
+ DIM = input.coords.shape[-1] - 1
24
+ factor = self.factor if isinstance(self.factor, tuple) else (self.factor,) * DIM
25
+ assert DIM == len(factor), 'Input coordinates must have the same dimension as the downsample factor.'
26
+
27
+ coord = list(input.coords.unbind(dim=-1))
28
+ for i, f in enumerate(factor):
29
+ coord[i+1] = coord[i+1] // f
30
+
31
+ MAX = [coord[i+1].max().item() + 1 for i in range(DIM)]
32
+ OFFSET = torch.cumprod(torch.tensor(MAX[::-1]), 0).tolist()[::-1] + [1]
33
+ code = sum([c * o for c, o in zip(coord, OFFSET)])
34
+ code, idx = code.unique(return_inverse=True)
35
+
36
+ new_feats = torch.scatter_reduce(
37
+ torch.zeros(code.shape[0], input.feats.shape[1], device=input.feats.device, dtype=input.feats.dtype),
38
+ dim=0,
39
+ index=idx.unsqueeze(1).expand(-1, input.feats.shape[1]),
40
+ src=input.feats,
41
+ reduce='mean'
42
+ )
43
+ new_coords = torch.stack(
44
+ [code // OFFSET[0]] +
45
+ [(code // OFFSET[i+1]) % MAX[i] for i in range(DIM)],
46
+ dim=-1
47
+ )
48
+ out = SparseTensor(new_feats, new_coords, input.shape,)
49
+ out._scale = tuple([s // f for s, f in zip(input._scale, factor)])
50
+ out._spatial_cache = input._spatial_cache
51
+
52
+ out.register_spatial_cache(f'upsample_{factor}_coords', input.coords)
53
+ out.register_spatial_cache(f'upsample_{factor}_layout', input.layout)
54
+ out.register_spatial_cache(f'upsample_{factor}_idx', idx)
55
+
56
+ return out
57
+
58
+
59
+ class SparseUpsample(nn.Module):
60
+ """
61
+ Upsample a sparse tensor by a factor of `factor`.
62
+ Implemented as nearest neighbor interpolation.
63
+ """
64
+ def __init__(self, factor: Union[int, Tuple[int, int, int], List[int]]):
65
+ super(SparseUpsample, self).__init__()
66
+ self.factor = tuple(factor) if isinstance(factor, (list, tuple)) else factor
67
+
68
+ def forward(self, input: SparseTensor) -> SparseTensor:
69
+ DIM = input.coords.shape[-1] - 1
70
+ factor = self.factor if isinstance(self.factor, tuple) else (self.factor,) * DIM
71
+ assert DIM == len(factor), 'Input coordinates must have the same dimension as the upsample factor.'
72
+
73
+ new_coords = input.get_spatial_cache(f'upsample_{factor}_coords')
74
+ new_layout = input.get_spatial_cache(f'upsample_{factor}_layout')
75
+ idx = input.get_spatial_cache(f'upsample_{factor}_idx')
76
+ if any([x is None for x in [new_coords, new_layout, idx]]):
77
+ raise ValueError('Upsample cache not found. SparseUpsample must be paired with SparseDownsample.')
78
+ new_feats = input.feats[idx]
79
+ out = SparseTensor(new_feats, new_coords, input.shape, new_layout)
80
+ out._scale = tuple([s * f for s, f in zip(input._scale, factor)])
81
+ out._spatial_cache = input._spatial_cache
82
+ return out
83
+
84
+ class SparseSubdivide(nn.Module):
85
+ """
86
+ Upsample a sparse tensor by a factor of `factor`.
87
+ Implemented as nearest neighbor interpolation.
88
+ """
89
+ def __init__(self):
90
+ super(SparseSubdivide, self).__init__()
91
+
92
+ def forward(self, input: SparseTensor) -> SparseTensor:
93
+ DIM = input.coords.shape[-1] - 1
94
+ # upsample scale=2^DIM
95
+ n_cube = torch.ones([2] * DIM, device=input.device, dtype=torch.int)
96
+ n_coords = torch.nonzero(n_cube)
97
+ n_coords = torch.cat([torch.zeros_like(n_coords[:, :1]), n_coords], dim=-1)
98
+ factor = n_coords.shape[0]
99
+ assert factor == 2 ** DIM
100
+ # print(n_coords.shape)
101
+ new_coords = input.coords.clone()
102
+ new_coords[:, 1:] *= 2
103
+ new_coords = new_coords.unsqueeze(1) + n_coords.unsqueeze(0).to(new_coords.dtype)
104
+
105
+ new_feats = input.feats.unsqueeze(1).expand(input.feats.shape[0], factor, *input.feats.shape[1:])
106
+ out = SparseTensor(new_feats.flatten(0, 1), new_coords.flatten(0, 1), input.shape)
107
+ out._scale = input._scale * 2
108
+ out._spatial_cache = input._spatial_cache
109
+ return out
110
+
trellis/modules/sparse/transformer/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .blocks import *
2
+ from .modulated import *
trellis/modules/sparse/transformer/blocks.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ from ..basic import SparseTensor
5
+ from ..linear import SparseLinear
6
+ from ..nonlinearity import SparseGELU
7
+ from ..attention import SparseMultiHeadAttention, SerializeMode
8
+ from ...norm import LayerNorm32
9
+
10
+
11
+ class SparseFeedForwardNet(nn.Module):
12
+ def __init__(self, channels: int, mlp_ratio: float = 4.0):
13
+ super().__init__()
14
+ self.mlp = nn.Sequential(
15
+ SparseLinear(channels, int(channels * mlp_ratio)),
16
+ SparseGELU(approximate="tanh"),
17
+ SparseLinear(int(channels * mlp_ratio), channels),
18
+ )
19
+
20
+ def forward(self, x: SparseTensor) -> SparseTensor:
21
+ return self.mlp(x)
22
+
23
+
24
+ class SparseTransformerBlock(nn.Module):
25
+ """
26
+ Sparse Transformer block (MSA + FFN).
27
+ """
28
+ def __init__(
29
+ self,
30
+ channels: int,
31
+ num_heads: int,
32
+ mlp_ratio: float = 4.0,
33
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "full",
34
+ window_size: Optional[int] = None,
35
+ shift_sequence: Optional[int] = None,
36
+ shift_window: Optional[Tuple[int, int, int]] = None,
37
+ serialize_mode: Optional[SerializeMode] = None,
38
+ use_checkpoint: bool = False,
39
+ use_rope: bool = False,
40
+ qk_rms_norm: bool = False,
41
+ qkv_bias: bool = True,
42
+ ln_affine: bool = False,
43
+ ):
44
+ super().__init__()
45
+ self.use_checkpoint = use_checkpoint
46
+ self.norm1 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
47
+ self.norm2 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
48
+ self.attn = SparseMultiHeadAttention(
49
+ channels,
50
+ num_heads=num_heads,
51
+ attn_mode=attn_mode,
52
+ window_size=window_size,
53
+ shift_sequence=shift_sequence,
54
+ shift_window=shift_window,
55
+ serialize_mode=serialize_mode,
56
+ qkv_bias=qkv_bias,
57
+ use_rope=use_rope,
58
+ qk_rms_norm=qk_rms_norm,
59
+ )
60
+ self.mlp = SparseFeedForwardNet(
61
+ channels,
62
+ mlp_ratio=mlp_ratio,
63
+ )
64
+
65
+ def _forward(self, x: SparseTensor) -> SparseTensor:
66
+ h = x.replace(self.norm1(x.feats))
67
+ h = self.attn(h)
68
+ x = x + h
69
+ h = x.replace(self.norm2(x.feats))
70
+ h = self.mlp(h)
71
+ x = x + h
72
+ return x
73
+
74
+ def forward(self, x: SparseTensor) -> SparseTensor:
75
+ if self.use_checkpoint:
76
+ return torch.utils.checkpoint.checkpoint(self._forward, x, use_reentrant=False)
77
+ else:
78
+ return self._forward(x)
79
+
80
+
81
+ class SparseTransformerCrossBlock(nn.Module):
82
+ """
83
+ Sparse Transformer cross-attention block (MSA + MCA + FFN).
84
+ """
85
+ def __init__(
86
+ self,
87
+ channels: int,
88
+ ctx_channels: int,
89
+ num_heads: int,
90
+ mlp_ratio: float = 4.0,
91
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "full",
92
+ window_size: Optional[int] = None,
93
+ shift_sequence: Optional[int] = None,
94
+ shift_window: Optional[Tuple[int, int, int]] = None,
95
+ serialize_mode: Optional[SerializeMode] = None,
96
+ use_checkpoint: bool = False,
97
+ use_rope: bool = False,
98
+ qk_rms_norm: bool = False,
99
+ qk_rms_norm_cross: bool = False,
100
+ qkv_bias: bool = True,
101
+ ln_affine: bool = False,
102
+ ):
103
+ super().__init__()
104
+ self.use_checkpoint = use_checkpoint
105
+ self.norm1 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
106
+ self.norm2 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
107
+ self.norm3 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
108
+ self.self_attn = SparseMultiHeadAttention(
109
+ channels,
110
+ num_heads=num_heads,
111
+ type="self",
112
+ attn_mode=attn_mode,
113
+ window_size=window_size,
114
+ shift_sequence=shift_sequence,
115
+ shift_window=shift_window,
116
+ serialize_mode=serialize_mode,
117
+ qkv_bias=qkv_bias,
118
+ use_rope=use_rope,
119
+ qk_rms_norm=qk_rms_norm,
120
+ )
121
+ self.cross_attn = SparseMultiHeadAttention(
122
+ channels,
123
+ ctx_channels=ctx_channels,
124
+ num_heads=num_heads,
125
+ type="cross",
126
+ attn_mode="full",
127
+ qkv_bias=qkv_bias,
128
+ qk_rms_norm=qk_rms_norm_cross,
129
+ )
130
+ self.mlp = SparseFeedForwardNet(
131
+ channels,
132
+ mlp_ratio=mlp_ratio,
133
+ )
134
+
135
+ def _forward(self, x: SparseTensor, mod: torch.Tensor, context: torch.Tensor):
136
+ h = x.replace(self.norm1(x.feats))
137
+ h = self.self_attn(h)
138
+ x = x + h
139
+ h = x.replace(self.norm2(x.feats))
140
+ h = self.cross_attn(h, context)
141
+ x = x + h
142
+ h = x.replace(self.norm3(x.feats))
143
+ h = self.mlp(h)
144
+ x = x + h
145
+ return x
146
+
147
+ def forward(self, x: SparseTensor, context: torch.Tensor):
148
+ if self.use_checkpoint:
149
+ return torch.utils.checkpoint.checkpoint(self._forward, x, context, use_reentrant=False)
150
+ else:
151
+ return self._forward(x, context)
trellis/modules/sparse/transformer/modulated.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ from ..basic import SparseTensor
5
+ from ..attention import SparseMultiHeadAttention, SerializeMode
6
+ from ...norm import LayerNorm32
7
+ from .blocks import SparseFeedForwardNet
8
+
9
+
10
+ class ModulatedSparseTransformerBlock(nn.Module):
11
+ """
12
+ Sparse Transformer block (MSA + FFN) with adaptive layer norm conditioning.
13
+ """
14
+ def __init__(
15
+ self,
16
+ channels: int,
17
+ num_heads: int,
18
+ mlp_ratio: float = 4.0,
19
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "full",
20
+ window_size: Optional[int] = None,
21
+ shift_sequence: Optional[int] = None,
22
+ shift_window: Optional[Tuple[int, int, int]] = None,
23
+ serialize_mode: Optional[SerializeMode] = None,
24
+ use_checkpoint: bool = False,
25
+ use_rope: bool = False,
26
+ qk_rms_norm: bool = False,
27
+ qkv_bias: bool = True,
28
+ share_mod: bool = False,
29
+ ):
30
+ super().__init__()
31
+ self.use_checkpoint = use_checkpoint
32
+ self.share_mod = share_mod
33
+ self.norm1 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
34
+ self.norm2 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
35
+ self.attn = SparseMultiHeadAttention(
36
+ channels,
37
+ num_heads=num_heads,
38
+ attn_mode=attn_mode,
39
+ window_size=window_size,
40
+ shift_sequence=shift_sequence,
41
+ shift_window=shift_window,
42
+ serialize_mode=serialize_mode,
43
+ qkv_bias=qkv_bias,
44
+ use_rope=use_rope,
45
+ qk_rms_norm=qk_rms_norm,
46
+ )
47
+ self.mlp = SparseFeedForwardNet(
48
+ channels,
49
+ mlp_ratio=mlp_ratio,
50
+ )
51
+ if not share_mod:
52
+ self.adaLN_modulation = nn.Sequential(
53
+ nn.SiLU(),
54
+ nn.Linear(channels, 6 * channels, bias=True)
55
+ )
56
+
57
+ def _forward(self, x: SparseTensor, mod: torch.Tensor) -> SparseTensor:
58
+ if self.share_mod:
59
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = mod.chunk(6, dim=1)
60
+ else:
61
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(mod).chunk(6, dim=1)
62
+ h = x.replace(self.norm1(x.feats))
63
+ h = h * (1 + scale_msa) + shift_msa
64
+ h = self.attn(h)
65
+ h = h * gate_msa
66
+ x = x + h
67
+ h = x.replace(self.norm2(x.feats))
68
+ h = h * (1 + scale_mlp) + shift_mlp
69
+ h = self.mlp(h)
70
+ h = h * gate_mlp
71
+ x = x + h
72
+ return x
73
+
74
+ def forward(self, x: SparseTensor, mod: torch.Tensor) -> SparseTensor:
75
+ if self.use_checkpoint:
76
+ return torch.utils.checkpoint.checkpoint(self._forward, x, mod, use_reentrant=False)
77
+ else:
78
+ return self._forward(x, mod)
79
+
80
+
81
+ class ModulatedSparseTransformerCrossBlock(nn.Module):
82
+ """
83
+ Sparse Transformer cross-attention block (MSA + MCA + FFN) with adaptive layer norm conditioning.
84
+ """
85
+ def __init__(
86
+ self,
87
+ channels: int,
88
+ ctx_channels: int,
89
+ num_heads: int,
90
+ mlp_ratio: float = 4.0,
91
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "full",
92
+ window_size: Optional[int] = None,
93
+ shift_sequence: Optional[int] = None,
94
+ shift_window: Optional[Tuple[int, int, int]] = None,
95
+ serialize_mode: Optional[SerializeMode] = None,
96
+ use_checkpoint: bool = False,
97
+ use_rope: bool = False,
98
+ qk_rms_norm: bool = False,
99
+ qk_rms_norm_cross: bool = False,
100
+ qkv_bias: bool = True,
101
+ share_mod: bool = False,
102
+
103
+ ):
104
+ super().__init__()
105
+ self.use_checkpoint = use_checkpoint
106
+ self.share_mod = share_mod
107
+ self.norm1 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
108
+ self.norm2 = LayerNorm32(channels, elementwise_affine=True, eps=1e-6)
109
+ self.norm3 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
110
+ self.self_attn = SparseMultiHeadAttention(
111
+ channels,
112
+ num_heads=num_heads,
113
+ type="self",
114
+ attn_mode=attn_mode,
115
+ window_size=window_size,
116
+ shift_sequence=shift_sequence,
117
+ shift_window=shift_window,
118
+ serialize_mode=serialize_mode,
119
+ qkv_bias=qkv_bias,
120
+ use_rope=use_rope,
121
+ qk_rms_norm=qk_rms_norm,
122
+ )
123
+ self.cross_attn = SparseMultiHeadAttention(
124
+ channels,
125
+ ctx_channels=ctx_channels,
126
+ num_heads=num_heads,
127
+ type="cross",
128
+ attn_mode="full",
129
+ qkv_bias=qkv_bias,
130
+ qk_rms_norm=qk_rms_norm_cross,
131
+ )
132
+ self.mlp = SparseFeedForwardNet(
133
+ channels,
134
+ mlp_ratio=mlp_ratio,
135
+ )
136
+ if not share_mod:
137
+ self.adaLN_modulation = nn.Sequential(
138
+ nn.SiLU(),
139
+ nn.Linear(channels, 6 * channels, bias=True)
140
+ )
141
+
142
+ def _forward(self, x: SparseTensor, mod: torch.Tensor, context: torch.Tensor) -> SparseTensor:
143
+ if self.share_mod:
144
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = mod.chunk(6, dim=1)
145
+ else:
146
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(mod).chunk(6, dim=1)
147
+ h = x.replace(self.norm1(x.feats))
148
+ h = h * (1 + scale_msa) + shift_msa
149
+ h = self.self_attn(h)
150
+ h = h * gate_msa
151
+ x = x + h
152
+ h = x.replace(self.norm2(x.feats))
153
+ h = self.cross_attn(h, context)
154
+ x = x + h
155
+ h = x.replace(self.norm3(x.feats))
156
+ h = h * (1 + scale_mlp) + shift_mlp
157
+ h = self.mlp(h)
158
+ h = h * gate_mlp
159
+ x = x + h
160
+ return x
161
+
162
+ def forward(self, x: SparseTensor, mod: torch.Tensor, context: torch.Tensor) -> SparseTensor:
163
+ if self.use_checkpoint:
164
+ return torch.utils.checkpoint.checkpoint(self._forward, x, mod, context, use_reentrant=False)
165
+ else:
166
+ return self._forward(x, mod, context)
trellis/modules/spatial.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ def pixel_shuffle_3d(x: torch.Tensor, scale_factor: int) -> torch.Tensor:
5
+ """
6
+ 3D pixel shuffle.
7
+ """
8
+ B, C, H, W, D = x.shape
9
+ C_ = C // scale_factor**3
10
+ x = x.reshape(B, C_, scale_factor, scale_factor, scale_factor, H, W, D)
11
+ x = x.permute(0, 1, 5, 2, 6, 3, 7, 4)
12
+ x = x.reshape(B, C_, H*scale_factor, W*scale_factor, D*scale_factor)
13
+ return x
14
+
15
+
16
+ def patchify(x: torch.Tensor, patch_size: int):
17
+ """
18
+ Patchify a tensor.
19
+
20
+ Args:
21
+ x (torch.Tensor): (N, C, *spatial) tensor
22
+ patch_size (int): Patch size
23
+ """
24
+ DIM = x.dim() - 2
25
+ for d in range(2, DIM + 2):
26
+ assert x.shape[d] % patch_size == 0, f"Dimension {d} of input tensor must be divisible by patch size, got {x.shape[d]} and {patch_size}"
27
+
28
+ x = x.reshape(*x.shape[:2], *sum([[x.shape[d] // patch_size, patch_size] for d in range(2, DIM + 2)], []))
29
+ x = x.permute(0, 1, *([2 * i + 3 for i in range(DIM)] + [2 * i + 2 for i in range(DIM)]))
30
+ x = x.reshape(x.shape[0], x.shape[1] * (patch_size ** DIM), *(x.shape[-DIM:]))
31
+ return x
32
+
33
+
34
+ def unpatchify(x: torch.Tensor, patch_size: int):
35
+ """
36
+ Unpatchify a tensor.
37
+
38
+ Args:
39
+ x (torch.Tensor): (N, C, *spatial) tensor
40
+ patch_size (int): Patch size
41
+ """
42
+ DIM = x.dim() - 2
43
+ assert x.shape[1] % (patch_size ** DIM) == 0, f"Second dimension of input tensor must be divisible by patch size to unpatchify, got {x.shape[1]} and {patch_size ** DIM}"
44
+
45
+ x = x.reshape(x.shape[0], x.shape[1] // (patch_size ** DIM), *([patch_size] * DIM), *(x.shape[-DIM:]))
46
+ x = x.permute(0, 1, *(sum([[2 + DIM + i, 2 + i] for i in range(DIM)], [])))
47
+ x = x.reshape(x.shape[0], x.shape[1], *[x.shape[2 + 2 * i] * patch_size for i in range(DIM)])
48
+ return x
trellis/modules/transformer/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .blocks import *
2
+ from .modulated import *
trellis/modules/transformer/blocks.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ from ..attention import MultiHeadAttention
5
+ from ..norm import LayerNorm32
6
+
7
+
8
+ class AbsolutePositionEmbedder(nn.Module):
9
+ """
10
+ Embeds spatial positions into vector representations.
11
+ """
12
+ def __init__(self, channels: int, in_channels: int = 3):
13
+ super().__init__()
14
+ self.channels = channels
15
+ self.in_channels = in_channels
16
+ self.freq_dim = channels // in_channels // 2
17
+ self.freqs = torch.arange(self.freq_dim, dtype=torch.float32) / self.freq_dim
18
+ self.freqs = 1.0 / (10000 ** self.freqs)
19
+
20
+ def _sin_cos_embedding(self, x: torch.Tensor) -> torch.Tensor:
21
+ """
22
+ Create sinusoidal position embeddings.
23
+
24
+ Args:
25
+ x: a 1-D Tensor of N indices
26
+
27
+ Returns:
28
+ an (N, D) Tensor of positional embeddings.
29
+ """
30
+ self.freqs = self.freqs.to(x.device)
31
+ out = torch.outer(x, self.freqs)
32
+ out = torch.cat([torch.sin(out), torch.cos(out)], dim=-1)
33
+ return out
34
+
35
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
36
+ """
37
+ Args:
38
+ x (torch.Tensor): (N, D) tensor of spatial positions
39
+ """
40
+ N, D = x.shape
41
+ assert D == self.in_channels, "Input dimension must match number of input channels"
42
+ embed = self._sin_cos_embedding(x.reshape(-1))
43
+ embed = embed.reshape(N, -1)
44
+ if embed.shape[1] < self.channels:
45
+ embed = torch.cat([embed, torch.zeros(N, self.channels - embed.shape[1], device=embed.device)], dim=-1)
46
+ return embed
47
+
48
+
49
+ class FeedForwardNet(nn.Module):
50
+ def __init__(self, channels: int, mlp_ratio: float = 4.0):
51
+ super().__init__()
52
+ self.mlp = nn.Sequential(
53
+ nn.Linear(channels, int(channels * mlp_ratio)),
54
+ nn.GELU(approximate="tanh"),
55
+ nn.Linear(int(channels * mlp_ratio), channels),
56
+ )
57
+
58
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
59
+ return self.mlp(x)
60
+
61
+
62
+ class TransformerBlock(nn.Module):
63
+ """
64
+ Transformer block (MSA + FFN).
65
+ """
66
+ def __init__(
67
+ self,
68
+ channels: int,
69
+ num_heads: int,
70
+ mlp_ratio: float = 4.0,
71
+ attn_mode: Literal["full", "windowed"] = "full",
72
+ window_size: Optional[int] = None,
73
+ shift_window: Optional[int] = None,
74
+ use_checkpoint: bool = False,
75
+ use_rope: bool = False,
76
+ qk_rms_norm: bool = False,
77
+ qkv_bias: bool = True,
78
+ ln_affine: bool = False,
79
+ ):
80
+ super().__init__()
81
+ self.use_checkpoint = use_checkpoint
82
+ self.norm1 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
83
+ self.norm2 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
84
+ self.attn = MultiHeadAttention(
85
+ channels,
86
+ num_heads=num_heads,
87
+ attn_mode=attn_mode,
88
+ window_size=window_size,
89
+ shift_window=shift_window,
90
+ qkv_bias=qkv_bias,
91
+ use_rope=use_rope,
92
+ qk_rms_norm=qk_rms_norm,
93
+ )
94
+ self.mlp = FeedForwardNet(
95
+ channels,
96
+ mlp_ratio=mlp_ratio,
97
+ )
98
+
99
+ def _forward(self, x: torch.Tensor) -> torch.Tensor:
100
+ h = self.norm1(x)
101
+ h = self.attn(h)
102
+ x = x + h
103
+ h = self.norm2(x)
104
+ h = self.mlp(h)
105
+ x = x + h
106
+ return x
107
+
108
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
109
+ if self.use_checkpoint:
110
+ return torch.utils.checkpoint.checkpoint(self._forward, x, use_reentrant=False)
111
+ else:
112
+ return self._forward(x)
113
+
114
+
115
+ class TransformerCrossBlock(nn.Module):
116
+ """
117
+ Transformer cross-attention block (MSA + MCA + FFN).
118
+ """
119
+ def __init__(
120
+ self,
121
+ channels: int,
122
+ ctx_channels: int,
123
+ num_heads: int,
124
+ mlp_ratio: float = 4.0,
125
+ attn_mode: Literal["full", "windowed"] = "full",
126
+ window_size: Optional[int] = None,
127
+ shift_window: Optional[Tuple[int, int, int]] = None,
128
+ use_checkpoint: bool = False,
129
+ use_rope: bool = False,
130
+ qk_rms_norm: bool = False,
131
+ qk_rms_norm_cross: bool = False,
132
+ qkv_bias: bool = True,
133
+ ln_affine: bool = False,
134
+ ):
135
+ super().__init__()
136
+ self.use_checkpoint = use_checkpoint
137
+ self.norm1 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
138
+ self.norm2 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
139
+ self.norm3 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
140
+ self.self_attn = MultiHeadAttention(
141
+ channels,
142
+ num_heads=num_heads,
143
+ type="self",
144
+ attn_mode=attn_mode,
145
+ window_size=window_size,
146
+ shift_window=shift_window,
147
+ qkv_bias=qkv_bias,
148
+ use_rope=use_rope,
149
+ qk_rms_norm=qk_rms_norm,
150
+ )
151
+ self.cross_attn = MultiHeadAttention(
152
+ channels,
153
+ ctx_channels=ctx_channels,
154
+ num_heads=num_heads,
155
+ type="cross",
156
+ attn_mode="full",
157
+ qkv_bias=qkv_bias,
158
+ qk_rms_norm=qk_rms_norm_cross,
159
+ )
160
+ self.mlp = FeedForwardNet(
161
+ channels,
162
+ mlp_ratio=mlp_ratio,
163
+ )
164
+
165
+ def _forward(self, x: torch.Tensor, context: torch.Tensor):
166
+ h = self.norm1(x)
167
+ h = self.self_attn(h)
168
+ x = x + h
169
+ h = self.norm2(x)
170
+ h = self.cross_attn(h, context)
171
+ x = x + h
172
+ h = self.norm3(x)
173
+ h = self.mlp(h)
174
+ x = x + h
175
+ return x
176
+
177
+ def forward(self, x: torch.Tensor, context: torch.Tensor):
178
+ if self.use_checkpoint:
179
+ return torch.utils.checkpoint.checkpoint(self._forward, x, context, use_reentrant=False)
180
+ else:
181
+ return self._forward(x, context)
182
+
trellis/modules/transformer/modulated.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ from ..attention import MultiHeadAttention
5
+ from ..norm import LayerNorm32
6
+ from .blocks import FeedForwardNet
7
+
8
+
9
+ class ModulatedTransformerBlock(nn.Module):
10
+ """
11
+ Transformer block (MSA + FFN) with adaptive layer norm conditioning.
12
+ """
13
+ def __init__(
14
+ self,
15
+ channels: int,
16
+ num_heads: int,
17
+ mlp_ratio: float = 4.0,
18
+ attn_mode: Literal["full", "windowed"] = "full",
19
+ window_size: Optional[int] = None,
20
+ shift_window: Optional[Tuple[int, int, int]] = None,
21
+ use_checkpoint: bool = False,
22
+ use_rope: bool = False,
23
+ qk_rms_norm: bool = False,
24
+ qkv_bias: bool = True,
25
+ share_mod: bool = False,
26
+ ):
27
+ super().__init__()
28
+ self.use_checkpoint = use_checkpoint
29
+ self.share_mod = share_mod
30
+ self.norm1 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
31
+ self.norm2 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
32
+ self.attn = MultiHeadAttention(
33
+ channels,
34
+ num_heads=num_heads,
35
+ attn_mode=attn_mode,
36
+ window_size=window_size,
37
+ shift_window=shift_window,
38
+ qkv_bias=qkv_bias,
39
+ use_rope=use_rope,
40
+ qk_rms_norm=qk_rms_norm,
41
+ )
42
+ self.mlp = FeedForwardNet(
43
+ channels,
44
+ mlp_ratio=mlp_ratio,
45
+ )
46
+ if not share_mod:
47
+ self.adaLN_modulation = nn.Sequential(
48
+ nn.SiLU(),
49
+ nn.Linear(channels, 6 * channels, bias=True)
50
+ )
51
+
52
+ def _forward(self, x: torch.Tensor, mod: torch.Tensor) -> torch.Tensor:
53
+ if self.share_mod:
54
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = mod.chunk(6, dim=1)
55
+ else:
56
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(mod).chunk(6, dim=1)
57
+ h = self.norm1(x)
58
+ h = h * (1 + scale_msa.unsqueeze(1)) + shift_msa.unsqueeze(1)
59
+ h = self.attn(h)
60
+ h = h * gate_msa.unsqueeze(1)
61
+ x = x + h
62
+ h = self.norm2(x)
63
+ h = h * (1 + scale_mlp.unsqueeze(1)) + shift_mlp.unsqueeze(1)
64
+ h = self.mlp(h)
65
+ h = h * gate_mlp.unsqueeze(1)
66
+ x = x + h
67
+ return x
68
+
69
+ def forward(self, x: torch.Tensor, mod: torch.Tensor) -> torch.Tensor:
70
+ if self.use_checkpoint:
71
+ return torch.utils.checkpoint.checkpoint(self._forward, x, mod, use_reentrant=False)
72
+ else:
73
+ return self._forward(x, mod)
74
+
75
+
76
+ class ModulatedTransformerCrossBlock(nn.Module):
77
+ """
78
+ Transformer cross-attention block (MSA + MCA + FFN) with adaptive layer norm conditioning.
79
+ """
80
+ def __init__(
81
+ self,
82
+ channels: int,
83
+ ctx_channels: int,
84
+ num_heads: int,
85
+ mlp_ratio: float = 4.0,
86
+ attn_mode: Literal["full", "windowed"] = "full",
87
+ window_size: Optional[int] = None,
88
+ shift_window: Optional[Tuple[int, int, int]] = None,
89
+ use_checkpoint: bool = False,
90
+ use_rope: bool = False,
91
+ qk_rms_norm: bool = False,
92
+ qk_rms_norm_cross: bool = False,
93
+ qkv_bias: bool = True,
94
+ share_mod: bool = False,
95
+ ):
96
+ super().__init__()
97
+ self.use_checkpoint = use_checkpoint
98
+ self.share_mod = share_mod
99
+ self.norm1 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
100
+ self.norm2 = LayerNorm32(channels, elementwise_affine=True, eps=1e-6)
101
+ self.norm3 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
102
+ self.self_attn = MultiHeadAttention(
103
+ channels,
104
+ num_heads=num_heads,
105
+ type="self",
106
+ attn_mode=attn_mode,
107
+ window_size=window_size,
108
+ shift_window=shift_window,
109
+ qkv_bias=qkv_bias,
110
+ use_rope=use_rope,
111
+ qk_rms_norm=qk_rms_norm,
112
+ )
113
+ self.cross_attn = MultiHeadAttention(
114
+ channels,
115
+ ctx_channels=ctx_channels,
116
+ num_heads=num_heads,
117
+ type="cross",
118
+ attn_mode="full",
119
+ qkv_bias=qkv_bias,
120
+ qk_rms_norm=qk_rms_norm_cross,
121
+ )
122
+ self.mlp = FeedForwardNet(
123
+ channels,
124
+ mlp_ratio=mlp_ratio,
125
+ )
126
+ if not share_mod:
127
+ self.adaLN_modulation = nn.Sequential(
128
+ nn.SiLU(),
129
+ nn.Linear(channels, 6 * channels, bias=True)
130
+ )
131
+
132
+ def _forward(self, x: torch.Tensor, mod: torch.Tensor, context: torch.Tensor):
133
+ if self.share_mod:
134
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = mod.chunk(6, dim=1)
135
+ else:
136
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(mod).chunk(6, dim=1)
137
+ h = self.norm1(x)
138
+ h = h * (1 + scale_msa.unsqueeze(1)) + shift_msa.unsqueeze(1)
139
+ h = self.self_attn(h)
140
+ h = h * gate_msa.unsqueeze(1)
141
+ x = x + h
142
+ h = self.norm2(x)
143
+ h = self.cross_attn(h, context)
144
+ x = x + h
145
+ h = self.norm3(x)
146
+ h = h * (1 + scale_mlp.unsqueeze(1)) + shift_mlp.unsqueeze(1)
147
+ h = self.mlp(h)
148
+ h = h * gate_mlp.unsqueeze(1)
149
+ x = x + h
150
+ return x
151
+
152
+ def forward(self, x: torch.Tensor, mod: torch.Tensor, context: torch.Tensor):
153
+ if self.use_checkpoint:
154
+ return torch.utils.checkpoint.checkpoint(self._forward, x, mod, context, use_reentrant=False)
155
+ else:
156
+ return self._forward(x, mod, context)
157
+
trellis/modules/utils.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+ from ..modules import sparse as sp
3
+
4
+ FP16_MODULES = (
5
+ nn.Conv1d,
6
+ nn.Conv2d,
7
+ nn.Conv3d,
8
+ nn.ConvTranspose1d,
9
+ nn.ConvTranspose2d,
10
+ nn.ConvTranspose3d,
11
+ nn.Linear,
12
+ sp.SparseConv3d,
13
+ sp.SparseInverseConv3d,
14
+ sp.SparseLinear,
15
+ )
16
+
17
+ def convert_module_to_f16(l):
18
+ """
19
+ Convert primitive modules to float16.
20
+ """
21
+ if isinstance(l, FP16_MODULES):
22
+ for p in l.parameters():
23
+ p.data = p.data.half()
24
+
25
+
26
+ def convert_module_to_f32(l):
27
+ """
28
+ Convert primitive modules to float32, undoing convert_module_to_f16().
29
+ """
30
+ if isinstance(l, FP16_MODULES):
31
+ for p in l.parameters():
32
+ p.data = p.data.float()
33
+
34
+
35
+ def zero_module(module):
36
+ """
37
+ Zero out the parameters of a module and return it.
38
+ """
39
+ for p in module.parameters():
40
+ p.detach().zero_()
41
+ return module
42
+
43
+
44
+ def scale_module(module, scale):
45
+ """
46
+ Scale the parameters of a module and return it.
47
+ """
48
+ for p in module.parameters():
49
+ p.detach().mul_(scale)
50
+ return module
51
+
52
+
53
+ def modulate(x, shift, scale):
54
+ return x * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1)
trellis/pipelines/__init__.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import samplers
2
+ from .trellis_image_to_3d import TrellisImageTo3DPipeline
3
+
4
+
5
+ def from_pretrained(path: str):
6
+ """
7
+ Load a pipeline from a model folder or a Hugging Face model hub.
8
+
9
+ Args:
10
+ path: The path to the model. Can be either local path or a Hugging Face model name.
11
+ """
12
+ import os
13
+ import json
14
+ is_local = os.path.exists(f"{path}/pipeline.json")
15
+
16
+ if is_local:
17
+ config_file = f"{path}/pipeline.json"
18
+ else:
19
+ from huggingface_hub import hf_hub_download
20
+ config_file = hf_hub_download(path, "pipeline.json")
21
+
22
+ with open(config_file, 'r') as f:
23
+ config = json.load(f)
24
+ return globals()[config['name']].from_pretrained(path)
trellis/pipelines/base.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ from .. import models
5
+
6
+
7
+ class Pipeline:
8
+ """
9
+ A base class for pipelines.
10
+ """
11
+ def __init__(
12
+ self,
13
+ models: dict[str, nn.Module] = None,
14
+ ):
15
+ if models is None:
16
+ return
17
+ self.models = models
18
+ for model in self.models.values():
19
+ model.eval()
20
+
21
+ @staticmethod
22
+ def from_pretrained(path: str) -> "Pipeline":
23
+ """
24
+ Load a pretrained model.
25
+ """
26
+ import os
27
+ import json
28
+ is_local = os.path.exists(f"{path}/pipeline.json")
29
+
30
+ if is_local:
31
+ config_file = f"{path}/pipeline.json"
32
+ else:
33
+ from huggingface_hub import hf_hub_download
34
+ config_file = hf_hub_download(path, "pipeline.json")
35
+
36
+ with open(config_file, 'r') as f:
37
+ args = json.load(f)['args']
38
+
39
+ _models = {
40
+ k: models.from_pretrained(f"{path}/{v}")
41
+ for k, v in args['models'].items()
42
+ }
43
+
44
+ new_pipeline = Pipeline(_models)
45
+ new_pipeline._pretrained_args = args
46
+ return new_pipeline
47
+
48
+ @property
49
+ def device(self) -> torch.device:
50
+ for model in self.models.values():
51
+ if hasattr(model, 'device'):
52
+ return model.device
53
+ for model in self.models.values():
54
+ if hasattr(model, 'parameters'):
55
+ return next(model.parameters()).device
56
+ raise RuntimeError("No device found.")
57
+
58
+ def to(self, device: torch.device) -> None:
59
+ for model in self.models.values():
60
+ model.to(device)
61
+
62
+ def cuda(self) -> None:
63
+ self.to(torch.device("cuda"))
64
+
65
+ def cpu(self) -> None:
66
+ self.to(torch.device("cpu"))
trellis/pipelines/samplers/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .base import Sampler
2
+ from .flow_euler import FlowEulerSampler, FlowEulerCfgSampler, FlowEulerGuidanceIntervalSampler
trellis/pipelines/samplers/base.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ from abc import ABC, abstractmethod
3
+
4
+
5
+ class Sampler(ABC):
6
+ """
7
+ A base class for samplers.
8
+ """
9
+
10
+ @abstractmethod
11
+ def sample(
12
+ self,
13
+ model,
14
+ **kwargs
15
+ ):
16
+ """
17
+ Sample from a model.
18
+ """
19
+ pass
20
+
trellis/pipelines/samplers/classifier_free_guidance_mixin.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+
3
+
4
+ class ClassifierFreeGuidanceSamplerMixin:
5
+ """
6
+ A mixin class for samplers that apply classifier-free guidance.
7
+ """
8
+
9
+ def _inference_model(self, model, x_t, t, cond, neg_cond, cfg_strength, **kwargs):
10
+ pred = super()._inference_model(model, x_t, t, cond, **kwargs)
11
+ neg_pred = super()._inference_model(model, x_t, t, neg_cond, **kwargs)
12
+ return (1 + cfg_strength) * pred - cfg_strength * neg_pred
trellis/pipelines/samplers/flow_euler.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import numpy as np
4
+ from tqdm import tqdm
5
+ from easydict import EasyDict as edict
6
+ from .base import Sampler
7
+ from .classifier_free_guidance_mixin import ClassifierFreeGuidanceSamplerMixin
8
+ from .guidance_interval_mixin import GuidanceIntervalSamplerMixin
9
+
10
+
11
+ class FlowEulerSampler(Sampler):
12
+ """
13
+ Generate samples from a flow-matching model using Euler sampling.
14
+
15
+ Args:
16
+ sigma_min: The minimum scale of noise in flow.
17
+ """
18
+ def __init__(
19
+ self,
20
+ sigma_min: float,
21
+ ):
22
+ self.sigma_min = sigma_min
23
+
24
+ def _eps_to_xstart(self, x_t, t, eps):
25
+ assert x_t.shape == eps.shape
26
+ return (x_t - (self.sigma_min + (1 - self.sigma_min) * t) * eps) / (1 - t)
27
+
28
+ def _xstart_to_eps(self, x_t, t, x_0):
29
+ assert x_t.shape == x_0.shape
30
+ return (x_t - (1 - t) * x_0) / (self.sigma_min + (1 - self.sigma_min) * t)
31
+
32
+ def _v_to_xstart_eps(self, x_t, t, v):
33
+ assert x_t.shape == v.shape
34
+ eps = (1 - t) * v + x_t
35
+ x_0 = (1 - self.sigma_min) * x_t - (self.sigma_min + (1 - self.sigma_min) * t) * v
36
+ return x_0, eps
37
+
38
+ def _inference_model(self, model, x_t, t, cond=None, **kwargs):
39
+ t = torch.tensor([1000 * t] * x_t.shape[0], device=x_t.device, dtype=torch.float32)
40
+ return model(x_t, t, cond, **kwargs)
41
+
42
+ def _get_model_prediction(self, model, x_t, t, cond=None, **kwargs):
43
+ pred_v = self._inference_model(model, x_t, t, cond, **kwargs)
44
+ pred_x_0, pred_eps = self._v_to_xstart_eps(x_t=x_t, t=t, v=pred_v)
45
+ return pred_x_0, pred_eps, pred_v
46
+
47
+ @torch.no_grad()
48
+ def sample_once(
49
+ self,
50
+ model,
51
+ x_t,
52
+ t: float,
53
+ t_prev: float,
54
+ cond: Optional[Any] = None,
55
+ **kwargs
56
+ ):
57
+ """
58
+ Sample x_{t-1} from the model using Euler method.
59
+
60
+ Args:
61
+ model: The model to sample from.
62
+ x_t: The [N x C x ...] tensor of noisy inputs at time t.
63
+ t: The current timestep.
64
+ t_prev: The previous timestep.
65
+ cond: conditional information.
66
+ **kwargs: Additional arguments for model inference.
67
+
68
+ Returns:
69
+ a dict containing the following
70
+ - 'pred_x_prev': x_{t-1}.
71
+ - 'pred_x_0': a prediction of x_0.
72
+ """
73
+ pred_x_0, pred_eps, pred_v = self._get_model_prediction(model, x_t, t, cond, **kwargs)
74
+ pred_x_prev = x_t - (t - t_prev) * pred_v
75
+ return edict({"pred_x_prev": pred_x_prev, "pred_x_0": pred_x_0})
76
+
77
+ @torch.no_grad()
78
+ def sample(
79
+ self,
80
+ model,
81
+ noise,
82
+ cond: Optional[Any] = None,
83
+ steps: int = 50,
84
+ rescale_t: float = 1.0,
85
+ verbose: bool = True,
86
+ **kwargs
87
+ ):
88
+ """
89
+ Generate samples from the model using Euler method.
90
+
91
+ Args:
92
+ model: The model to sample from.
93
+ noise: The initial noise tensor.
94
+ cond: conditional information.
95
+ steps: The number of steps to sample.
96
+ rescale_t: The rescale factor for t.
97
+ verbose: If True, show a progress bar.
98
+ **kwargs: Additional arguments for model_inference.
99
+
100
+ Returns:
101
+ a dict containing the following
102
+ - 'samples': the model samples.
103
+ - 'pred_x_t': a list of prediction of x_t.
104
+ - 'pred_x_0': a list of prediction of x_0.
105
+ """
106
+ sample = noise
107
+ t_seq = np.linspace(1, 0, steps + 1)
108
+ t_seq = rescale_t * t_seq / (1 + (rescale_t - 1) * t_seq)
109
+ t_pairs = list((t_seq[i], t_seq[i + 1]) for i in range(steps))
110
+ ret = edict({"samples": None, "pred_x_t": [], "pred_x_0": []})
111
+ for t, t_prev in tqdm(t_pairs, desc="Sampling", disable=not verbose):
112
+ out = self.sample_once(model, sample, t, t_prev, cond, **kwargs)
113
+ sample = out.pred_x_prev
114
+ ret.pred_x_t.append(out.pred_x_prev)
115
+ ret.pred_x_0.append(out.pred_x_0)
116
+ ret.samples = sample
117
+ return ret
118
+
119
+
120
+ class FlowEulerCfgSampler(ClassifierFreeGuidanceSamplerMixin, FlowEulerSampler):
121
+ """
122
+ Generate samples from a flow-matching model using Euler sampling with classifier-free guidance.
123
+ """
124
+ @torch.no_grad()
125
+ def sample(
126
+ self,
127
+ model,
128
+ noise,
129
+ cond,
130
+ neg_cond,
131
+ steps: int = 50,
132
+ rescale_t: float = 1.0,
133
+ cfg_strength: float = 3.0,
134
+ verbose: bool = True,
135
+ **kwargs
136
+ ):
137
+ """
138
+ Generate samples from the model using Euler method.
139
+
140
+ Args:
141
+ model: The model to sample from.
142
+ noise: The initial noise tensor.
143
+ cond: conditional information.
144
+ neg_cond: negative conditional information.
145
+ steps: The number of steps to sample.
146
+ rescale_t: The rescale factor for t.
147
+ cfg_strength: The strength of classifier-free guidance.
148
+ verbose: If True, show a progress bar.
149
+ **kwargs: Additional arguments for model_inference.
150
+
151
+ Returns:
152
+ a dict containing the following
153
+ - 'samples': the model samples.
154
+ - 'pred_x_t': a list of prediction of x_t.
155
+ - 'pred_x_0': a list of prediction of x_0.
156
+ """
157
+ return super().sample(model, noise, cond, steps, rescale_t, verbose, neg_cond=neg_cond, cfg_strength=cfg_strength, **kwargs)
158
+
159
+
160
+ class FlowEulerGuidanceIntervalSampler(GuidanceIntervalSamplerMixin, FlowEulerSampler):
161
+ """
162
+ Generate samples from a flow-matching model using Euler sampling with classifier-free guidance and interval.
163
+ """
164
+ @torch.no_grad()
165
+ def sample(
166
+ self,
167
+ model,
168
+ noise,
169
+ cond,
170
+ neg_cond,
171
+ steps: int = 50,
172
+ rescale_t: float = 1.0,
173
+ cfg_strength: float = 3.0,
174
+ cfg_interval: Tuple[float, float] = (0.0, 1.0),
175
+ verbose: bool = True,
176
+ **kwargs
177
+ ):
178
+ """
179
+ Generate samples from the model using Euler method.
180
+
181
+ Args:
182
+ model: The model to sample from.
183
+ noise: The initial noise tensor.
184
+ cond: conditional information.
185
+ neg_cond: negative conditional information.
186
+ steps: The number of steps to sample.
187
+ rescale_t: The rescale factor for t.
188
+ cfg_strength: The strength of classifier-free guidance.
189
+ cfg_interval: The interval for classifier-free guidance.
190
+ verbose: If True, show a progress bar.
191
+ **kwargs: Additional arguments for model_inference.
192
+
193
+ Returns:
194
+ a dict containing the following
195
+ - 'samples': the model samples.
196
+ - 'pred_x_t': a list of prediction of x_t.
197
+ - 'pred_x_0': a list of prediction of x_0.
198
+ """
199
+ return super().sample(model, noise, cond, steps, rescale_t, verbose, neg_cond=neg_cond, cfg_strength=cfg_strength, cfg_interval=cfg_interval, **kwargs)