Upload 4 files
Browse files- convertFBX2GLB.py +59 -0
- generateARKITGLBWithBlender.py +270 -0
- generateVertexIndices.py +84 -0
- original_app.py +645 -0
convertFBX2GLB.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Copyright (c) 2024-2025, The Alibaba 3DAIGC Team Authors.
|
| 3 |
+
|
| 4 |
+
Blender FBX to GLB Converter
|
| 5 |
+
Converts 3D models from FBX to glTF Binary (GLB) format with optimized settings.
|
| 6 |
+
Requires Blender to run in background mode.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import bpy
|
| 10 |
+
import sys
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
|
| 13 |
+
def clean_scene():
|
| 14 |
+
"""Clear all objects and data from the current Blender scene"""
|
| 15 |
+
bpy.ops.object.select_all(action='SELECT')
|
| 16 |
+
bpy.ops.object.delete()
|
| 17 |
+
for collection in [bpy.data.meshes, bpy.data.materials, bpy.data.textures]:
|
| 18 |
+
for item in collection:
|
| 19 |
+
collection.remove(item)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def main():
|
| 23 |
+
try:
|
| 24 |
+
# Parse command line arguments after "--"
|
| 25 |
+
argv = sys.argv[sys.argv.index("--") + 1:]
|
| 26 |
+
input_fbx = Path(argv[0])
|
| 27 |
+
output_glb = Path(argv[1])
|
| 28 |
+
|
| 29 |
+
# Validate input file
|
| 30 |
+
if not input_fbx.exists():
|
| 31 |
+
raise FileNotFoundError(f"Input FBX file not found: {input_fbx}")
|
| 32 |
+
|
| 33 |
+
# Prepare scene
|
| 34 |
+
clean_scene()
|
| 35 |
+
|
| 36 |
+
# Import FBX with default settings
|
| 37 |
+
print(f"Importing {input_fbx}...")
|
| 38 |
+
bpy.ops.import_scene.fbx(filepath=str(input_fbx))
|
| 39 |
+
|
| 40 |
+
# Export optimized GLB
|
| 41 |
+
print(f"Exporting to {output_glb}...")
|
| 42 |
+
bpy.ops.export_scene.gltf(
|
| 43 |
+
filepath=str(output_glb),
|
| 44 |
+
export_format='GLB', # Binary format
|
| 45 |
+
export_skins=True, # Keep skinning data
|
| 46 |
+
export_texcoords=False, # Reduce file size
|
| 47 |
+
export_normals=False, # Reduce file size
|
| 48 |
+
export_colors=False, # Reduce file size
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
print("Conversion completed successfully")
|
| 52 |
+
|
| 53 |
+
except Exception as e:
|
| 54 |
+
print(f"Error: {str(e)}")
|
| 55 |
+
sys.exit(1)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
if __name__ == "__main__":
|
| 59 |
+
main()
|
generateARKITGLBWithBlender.py
ADDED
|
@@ -0,0 +1,270 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Copyright (c) 2024-2025, The Alibaba 3DAIGC Team Authors.
|
| 3 |
+
|
| 4 |
+
FLAME Model FBX/GLB Converter
|
| 5 |
+
A pipeline for processing FLAME 3D models including:
|
| 6 |
+
1. Shape parameter injection into FBX templates
|
| 7 |
+
2. FBX format conversion (ASCII <-> Binary)
|
| 8 |
+
3. GLB export via Blender
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
import os.path
|
| 13 |
+
|
| 14 |
+
import numpy as np
|
| 15 |
+
import logging
|
| 16 |
+
import subprocess
|
| 17 |
+
from pathlib import Path
|
| 18 |
+
import trimesh
|
| 19 |
+
import shlex
|
| 20 |
+
|
| 21 |
+
try:
|
| 22 |
+
import fbx
|
| 23 |
+
except ImportError:
|
| 24 |
+
raise RuntimeError(
|
| 25 |
+
"FBX SDK required: https://www.autodesk.com/developer-network/platform-technologies/fbx-sdk-2020-2")
|
| 26 |
+
|
| 27 |
+
# Configure logging
|
| 28 |
+
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
|
| 29 |
+
logger = logging.getLogger(__name__)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def update_flame_shape(
|
| 33 |
+
input_mesh: Path,
|
| 34 |
+
output_ascii_fbx: Path,
|
| 35 |
+
template_fbx: Path
|
| 36 |
+
) -> None:
|
| 37 |
+
"""
|
| 38 |
+
Injects FLAME shape parameters into FBX template
|
| 39 |
+
|
| 40 |
+
Args:
|
| 41 |
+
input_mesh: Path to FLAME mesh (OBJ format)
|
| 42 |
+
output_ascii_fbx: Output path for modified ASCII FBX
|
| 43 |
+
template_fbx: Template FBX with FLAME structure
|
| 44 |
+
|
| 45 |
+
Raises:
|
| 46 |
+
FileNotFoundError: If input files are missing
|
| 47 |
+
ValueError: If template format mismatch
|
| 48 |
+
"""
|
| 49 |
+
logger.info(f"Updating FLAME shape in {template_fbx}")
|
| 50 |
+
|
| 51 |
+
# Validate inputs
|
| 52 |
+
if not all([input_mesh.exists(), template_fbx.exists()]):
|
| 53 |
+
raise FileNotFoundError("Missing input file(s)")
|
| 54 |
+
|
| 55 |
+
# Load and process FLAME mesh
|
| 56 |
+
mesh = trimesh.load(input_mesh)
|
| 57 |
+
bs_verts = np.array(mesh.vertices).flatten()
|
| 58 |
+
verts_csv = ",".join([f"{v:.6f}" for v in bs_verts]) + ","
|
| 59 |
+
|
| 60 |
+
# Read template FBX
|
| 61 |
+
with template_fbx.open('r',encoding='utf-8') as f:
|
| 62 |
+
template_lines = f.readlines()
|
| 63 |
+
f.close()
|
| 64 |
+
# Replace vertex data section
|
| 65 |
+
output_lines = []
|
| 66 |
+
vertex_section = False
|
| 67 |
+
VERTEX_HEADER = "Vertices: *60054 {" # FLAME-specific vertex count
|
| 68 |
+
|
| 69 |
+
for line in template_lines:
|
| 70 |
+
if VERTEX_HEADER in line:
|
| 71 |
+
vertex_section = True
|
| 72 |
+
output_lines.append(line)
|
| 73 |
+
# Inject new vertex data
|
| 74 |
+
output_lines.extend([f" {v}\n" for v in verts_csv.split(",") if v])
|
| 75 |
+
continue
|
| 76 |
+
|
| 77 |
+
if vertex_section:
|
| 78 |
+
if '}' in line:
|
| 79 |
+
vertex_section = False
|
| 80 |
+
output_lines.append(line)
|
| 81 |
+
continue
|
| 82 |
+
|
| 83 |
+
output_lines.append(line)
|
| 84 |
+
|
| 85 |
+
# Write modified FBX
|
| 86 |
+
with output_ascii_fbx.open('w',encoding='utf-8') as f:
|
| 87 |
+
f.writelines(output_lines)
|
| 88 |
+
f.close()
|
| 89 |
+
logger.info(f"Generated updated ASCII FBX: {output_ascii_fbx}")
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def convert_ascii_to_binary(
|
| 93 |
+
input_ascii: Path,
|
| 94 |
+
output_binary: Path
|
| 95 |
+
) -> None:
|
| 96 |
+
"""
|
| 97 |
+
Converts FBX between ASCII and Binary formats
|
| 98 |
+
|
| 99 |
+
Args:
|
| 100 |
+
input_ascii: Path to ASCII FBX
|
| 101 |
+
output_binary: Output path for binary FBX
|
| 102 |
+
|
| 103 |
+
Raises:
|
| 104 |
+
RuntimeError: If conversion fails
|
| 105 |
+
"""
|
| 106 |
+
logger.info(f"Converting {input_ascii} to binary FBX")
|
| 107 |
+
|
| 108 |
+
manager = fbx.FbxManager.Create()
|
| 109 |
+
ios = fbx.FbxIOSettings.Create(manager, fbx.IOSROOT)
|
| 110 |
+
manager.SetIOSettings(ios)
|
| 111 |
+
|
| 112 |
+
try:
|
| 113 |
+
# Initialize scene
|
| 114 |
+
scene = fbx.FbxScene.Create(manager, "ConversionScene")
|
| 115 |
+
|
| 116 |
+
# Import ASCII
|
| 117 |
+
importer = fbx.FbxImporter.Create(manager, "")
|
| 118 |
+
if not importer.Initialize(str(input_ascii), -1, manager.GetIOSettings()):
|
| 119 |
+
raise RuntimeError(f"FBX import failed: {importer.GetStatus().GetErrorString()}")
|
| 120 |
+
importer.Import(scene)
|
| 121 |
+
|
| 122 |
+
# Export Binary
|
| 123 |
+
exporter = fbx.FbxExporter.Create(manager, "")
|
| 124 |
+
if not exporter.Initialize(str(output_binary), 0, manager.GetIOSettings()):
|
| 125 |
+
raise RuntimeError(f"FBX export failed: {exporter.GetStatus().GetErrorString()}")
|
| 126 |
+
exporter.Export(scene)
|
| 127 |
+
|
| 128 |
+
finally:
|
| 129 |
+
# Cleanup FBX SDK resources
|
| 130 |
+
scene.Destroy()
|
| 131 |
+
importer.Destroy()
|
| 132 |
+
exporter.Destroy()
|
| 133 |
+
manager.Destroy()
|
| 134 |
+
|
| 135 |
+
logger.info(f"Binary FBX saved to {output_binary}")
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def convert_with_blender(
|
| 139 |
+
input_fbx: Path,
|
| 140 |
+
output_glb: Path,
|
| 141 |
+
blender_exec: Path = Path("blender"),
|
| 142 |
+
input_mesh: Path = Path("input_mesh.obj"),
|
| 143 |
+
) -> None:
|
| 144 |
+
"""
|
| 145 |
+
Converts FBX to GLB using Blender
|
| 146 |
+
|
| 147 |
+
Args:
|
| 148 |
+
input_fbx: Path to input FBX
|
| 149 |
+
output_glb: Output GLB path
|
| 150 |
+
blender_exec: Path to Blender executable
|
| 151 |
+
|
| 152 |
+
Raises:
|
| 153 |
+
CalledProcessError: If Blender conversion fails
|
| 154 |
+
"""
|
| 155 |
+
logger.info(f"Starting Blender conversion to GLB")
|
| 156 |
+
|
| 157 |
+
print("blender_exec exist: {}".format(os.path.exists(blender_exec)))
|
| 158 |
+
|
| 159 |
+
cmd = [
|
| 160 |
+
str(blender_exec),
|
| 161 |
+
"--background",
|
| 162 |
+
"--python", "convertFBX2GLB.py", # Path to conversion script
|
| 163 |
+
"--", str(input_fbx), str(output_glb)
|
| 164 |
+
]
|
| 165 |
+
|
| 166 |
+
cmd_str = ' '.join(shlex.quote(arg) for arg in cmd)
|
| 167 |
+
print("Run {}".format(cmd_str))
|
| 168 |
+
|
| 169 |
+
# 执行命令
|
| 170 |
+
os.system(cmd_str)
|
| 171 |
+
|
| 172 |
+
# try:
|
| 173 |
+
# subprocess.run(cmd, check=True, capture_output=True, text=True, encoding='utf-8')
|
| 174 |
+
#
|
| 175 |
+
# except subprocess.CalledProcessError as e:
|
| 176 |
+
# logger.error(f"Blender conversion failed: {e.stderr}")
|
| 177 |
+
# raise
|
| 178 |
+
logger.info(f"GLB output saved to {output_glb}")
|
| 179 |
+
|
| 180 |
+
def gen_vertex_order_with_blender(
|
| 181 |
+
input_mesh: Path,
|
| 182 |
+
output_json: Path,
|
| 183 |
+
blender_exec: Path = Path("blender"),
|
| 184 |
+
) -> None:
|
| 185 |
+
"""
|
| 186 |
+
Args:
|
| 187 |
+
input_mesh: Path to input mesh
|
| 188 |
+
output_json: Output json path
|
| 189 |
+
blender_exec: Path to Blender executable
|
| 190 |
+
|
| 191 |
+
Raises:
|
| 192 |
+
CalledProcessError: If Blender conversion fails
|
| 193 |
+
"""
|
| 194 |
+
logger.info(f"Starting Generation Vertex Order")
|
| 195 |
+
|
| 196 |
+
cmd = [
|
| 197 |
+
str(blender_exec),
|
| 198 |
+
"--background",
|
| 199 |
+
"--python", "generateVertexIndices.py", # Path to conversion script
|
| 200 |
+
"--", str(input_mesh), str(output_json)
|
| 201 |
+
]
|
| 202 |
+
|
| 203 |
+
# try:
|
| 204 |
+
# subprocess.run(cmd, check=True, capture_output=True, text=True, encoding='utf-8')
|
| 205 |
+
# except subprocess.CalledProcessError as e:
|
| 206 |
+
# logger.error(f"Blender conversion failed: {e.stderr}")
|
| 207 |
+
# raise
|
| 208 |
+
cmd_str = ' '.join(shlex.quote(arg) for arg in cmd)
|
| 209 |
+
print("Run {}".format(cmd_str))
|
| 210 |
+
|
| 211 |
+
# 执行命令
|
| 212 |
+
os.system(cmd_str)
|
| 213 |
+
|
| 214 |
+
logger.info(f"Vertex Order output saved to {output_json}")
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
def generate_glb(
|
| 218 |
+
input_mesh: Path,
|
| 219 |
+
template_fbx: Path,
|
| 220 |
+
output_glb: Path,
|
| 221 |
+
blender_exec: Path = Path("blender"),
|
| 222 |
+
cleanup: bool = True
|
| 223 |
+
) -> None:
|
| 224 |
+
"""
|
| 225 |
+
Complete pipeline for FLAME GLB generation
|
| 226 |
+
|
| 227 |
+
Args:
|
| 228 |
+
input_mesh: Input FLAME mesh (OBJ)
|
| 229 |
+
template_fbx: Template FBX file
|
| 230 |
+
output_glb: Final GLB output
|
| 231 |
+
blender_exec: Blender executable path
|
| 232 |
+
cleanup: Remove temporary files
|
| 233 |
+
"""
|
| 234 |
+
temp_files = {
|
| 235 |
+
"ascii": Path("./temp_ascii.fbx"),
|
| 236 |
+
"binary": Path("./temp_bin.fbx")
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
try:
|
| 240 |
+
# Step 1: Shape parameter injection
|
| 241 |
+
update_flame_shape(input_mesh, temp_files["ascii"], template_fbx)
|
| 242 |
+
|
| 243 |
+
# Step 2: FBX format conversion
|
| 244 |
+
convert_ascii_to_binary(temp_files["ascii"], temp_files["binary"])
|
| 245 |
+
|
| 246 |
+
# Step 3: Blender conversion
|
| 247 |
+
convert_with_blender(temp_files["binary"], output_glb, blender_exec)
|
| 248 |
+
|
| 249 |
+
# Step 4: Vertex Order Generation
|
| 250 |
+
gen_vertex_order_with_blender(input_mesh,
|
| 251 |
+
Path(os.path.join(os.path.dirname(output_glb),'vertex_order.json')),
|
| 252 |
+
blender_exec)
|
| 253 |
+
|
| 254 |
+
finally:
|
| 255 |
+
# Cleanup temporary files
|
| 256 |
+
if cleanup:
|
| 257 |
+
for f in temp_files.values():
|
| 258 |
+
if f.exists():
|
| 259 |
+
f.unlink()
|
| 260 |
+
logger.info("Cleaned up temporary files")
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
if __name__ == "__main__":
|
| 264 |
+
# Example usage
|
| 265 |
+
generate_glb(
|
| 266 |
+
input_mesh=Path("./asserts/sample_oac/nature.obj"),
|
| 267 |
+
template_fbx=Path("./asserts/sample_oac/template_file.fbx"),
|
| 268 |
+
output_glb=Path("./asserts/sample_oac/skin.glb"),
|
| 269 |
+
blender_exec=Path("./blender-4.0.0-linux-x64/blender")
|
| 270 |
+
)
|
generateVertexIndices.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Copyright (c) 2024-2025, The Alibaba 3DAIGC Team Authors.
|
| 3 |
+
|
| 4 |
+
Blender FBX to GLB Converter
|
| 5 |
+
Converts 3D models from FBX to glTF Binary (GLB) format with optimized settings.
|
| 6 |
+
Requires Blender to run in background mode.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import bpy
|
| 10 |
+
import sys
|
| 11 |
+
import os
|
| 12 |
+
import json
|
| 13 |
+
from pathlib import Path
|
| 14 |
+
|
| 15 |
+
def import_obj(filepath):
|
| 16 |
+
if not os.path.exists(filepath):
|
| 17 |
+
raise FileNotFoundError(f"文件不存在:{filepath}")
|
| 18 |
+
bpy.ops.wm.obj_import(filepath=filepath)
|
| 19 |
+
print(f"成功导入:{filepath}")
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def clean_scene():
|
| 23 |
+
"""Clear all objects and data from the current Blender scene"""
|
| 24 |
+
bpy.ops.object.select_all(action='SELECT')
|
| 25 |
+
bpy.ops.object.delete()
|
| 26 |
+
for collection in [bpy.data.meshes, bpy.data.materials, bpy.data.textures]:
|
| 27 |
+
for item in collection:
|
| 28 |
+
collection.remove(item)
|
| 29 |
+
|
| 30 |
+
def apply_rotation(obj):
|
| 31 |
+
obj.rotation_euler = (1.5708, 0, 0)
|
| 32 |
+
bpy.context.view_layer.update()
|
| 33 |
+
obj.select_set(True)
|
| 34 |
+
bpy.context.view_layer.objects.active = obj
|
| 35 |
+
bpy.ops.object.transform_apply(location=False, rotation=True, scale=False) # 应用旋转
|
| 36 |
+
print(f"Applied 90-degree rotation to object: {obj.name}")
|
| 37 |
+
|
| 38 |
+
def main():
|
| 39 |
+
try:
|
| 40 |
+
# Parse command line arguments after "--"
|
| 41 |
+
argv = sys.argv[sys.argv.index("--") + 1:]
|
| 42 |
+
input_mesh = Path(argv[0])
|
| 43 |
+
output_vertex_order_file = argv[1]
|
| 44 |
+
|
| 45 |
+
# Validate input file
|
| 46 |
+
if not input_mesh.exists():
|
| 47 |
+
raise FileNotFoundError(f"Input FBX file not found: {input_mesh}")
|
| 48 |
+
|
| 49 |
+
# Prepare scene
|
| 50 |
+
clean_scene()
|
| 51 |
+
|
| 52 |
+
# Import FBX with default settings
|
| 53 |
+
print(f"Importing {input_mesh}...")
|
| 54 |
+
import_obj(str(input_mesh))
|
| 55 |
+
base_obj = bpy.context.view_layer.objects.active
|
| 56 |
+
|
| 57 |
+
apply_rotation(base_obj)
|
| 58 |
+
|
| 59 |
+
bpy.context.view_layer.objects.active = base_obj
|
| 60 |
+
base_obj.select_set(True)
|
| 61 |
+
bpy.ops.object.mode_set(mode='OBJECT')
|
| 62 |
+
|
| 63 |
+
base_objects = [obj for obj in bpy.context.scene.objects if obj.type == 'MESH']
|
| 64 |
+
if len(base_objects) != 1:
|
| 65 |
+
raise ValueError("Scene should contain exactly one base mesh object.")
|
| 66 |
+
base_obj = base_objects[0]
|
| 67 |
+
|
| 68 |
+
vertices = [(i, v.co.z) for i, v in enumerate(base_obj.data.vertices)]
|
| 69 |
+
|
| 70 |
+
sorted_vertices = sorted(vertices, key=lambda x: x[1]) # 按 Z 坐标从小到大排序
|
| 71 |
+
sorted_vertex_indices = [idx for idx, z in sorted_vertices]
|
| 72 |
+
|
| 73 |
+
with open(str(output_vertex_order_file), "w") as f:
|
| 74 |
+
json.dump(sorted_vertex_indices, f, indent=4) # 保存为 JSON 数组
|
| 75 |
+
print(f"Exported vertex order to: {str(output_vertex_order_file)}")
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
except Exception as e:
|
| 79 |
+
print(f"Error: {str(e)}")
|
| 80 |
+
sys.exit(1)
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
if __name__ == "__main__":
|
| 84 |
+
main()
|
original_app.py
ADDED
|
@@ -0,0 +1,645 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2024-2025, Yisheng He, Yuan Dong
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# https://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import os
|
| 16 |
+
|
| 17 |
+
os.system("rm -rf /data-nvme/zerogpu-offload/")
|
| 18 |
+
os.system("pip install chumpy")
|
| 19 |
+
# os.system("pip uninstall -y basicsr")
|
| 20 |
+
os.system("pip install Cython")
|
| 21 |
+
os.system("pip install ./new_wheels/diff_gaussian_rasterization-0.0.0-cp310-cp310-linux_x86_64.whl")
|
| 22 |
+
os.system("pip install ./wheels/simple_knn-0.0.0-cp310-cp310-linux_x86_64.whl")
|
| 23 |
+
os.system("pip install ./wheels/nvdiffrast-0.3.3-cp310-cp310-linux_x86_64.whl --force-reinstall")
|
| 24 |
+
os.system(
|
| 25 |
+
"pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py310_cu121_pyt240/download.html")
|
| 26 |
+
os.system("pip install numpy==1.23.0")
|
| 27 |
+
|
| 28 |
+
import cv2
|
| 29 |
+
import sys
|
| 30 |
+
import base64
|
| 31 |
+
import subprocess
|
| 32 |
+
|
| 33 |
+
import argparse
|
| 34 |
+
from glob import glob
|
| 35 |
+
import gradio as gr
|
| 36 |
+
import numpy as np
|
| 37 |
+
from PIL import Image
|
| 38 |
+
from omegaconf import OmegaConf
|
| 39 |
+
|
| 40 |
+
import torch
|
| 41 |
+
import moviepy.editor as mpy
|
| 42 |
+
from lam.runners.infer.head_utils import prepare_motion_seqs, preprocess_image
|
| 43 |
+
from lam.utils.ffmpeg_utils import images_to_video
|
| 44 |
+
|
| 45 |
+
import spaces
|
| 46 |
+
import shutil
|
| 47 |
+
import time
|
| 48 |
+
from pathlib import Path
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def compile_module(subfolder, script):
|
| 52 |
+
try:
|
| 53 |
+
# Save the current working directory
|
| 54 |
+
current_dir = os.getcwd()
|
| 55 |
+
# Change directory to the subfolder
|
| 56 |
+
os.chdir(os.path.join(current_dir, subfolder))
|
| 57 |
+
# Run the compilation command
|
| 58 |
+
result = subprocess.run(
|
| 59 |
+
["sh", script],
|
| 60 |
+
capture_output=True,
|
| 61 |
+
text=True,
|
| 62 |
+
check=True
|
| 63 |
+
)
|
| 64 |
+
# Print the compilation output
|
| 65 |
+
print("Compilation output:", result.stdout)
|
| 66 |
+
|
| 67 |
+
except Exception as e:
|
| 68 |
+
# Print any error that occurred
|
| 69 |
+
print(f"An error occurred: {e}")
|
| 70 |
+
finally:
|
| 71 |
+
# Ensure returning to the original directory
|
| 72 |
+
os.chdir(current_dir)
|
| 73 |
+
print("Returned to the original directory.")
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
# compile flame_tracking dependence submodule
|
| 77 |
+
compile_module("external/landmark_detection/FaceBoxesV2/utils/", "make.sh")
|
| 78 |
+
from flame_tracking_single_image import FlameTrackingSingleImage
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def launch_pretrained():
|
| 82 |
+
from huggingface_hub import snapshot_download, hf_hub_download
|
| 83 |
+
# launch pretrained for flame tracking.
|
| 84 |
+
hf_hub_download(repo_id='yuandong513/flametracking_model',
|
| 85 |
+
repo_type='model',
|
| 86 |
+
filename='pretrain_model.tar',
|
| 87 |
+
local_dir='./')
|
| 88 |
+
os.system('tar -xf pretrain_model.tar && rm pretrain_model.tar')
|
| 89 |
+
# launch human model files
|
| 90 |
+
hf_hub_download(repo_id='3DAIGC/LAM-assets',
|
| 91 |
+
repo_type='model',
|
| 92 |
+
filename='LAM_human_model.tar',
|
| 93 |
+
local_dir='./')
|
| 94 |
+
os.system('tar -xf LAM_human_model.tar && rm LAM_human_model.tar')
|
| 95 |
+
# launch pretrained for LAM
|
| 96 |
+
model_dir = hf_hub_download(repo_id="3DAIGC/LAM-20K", repo_type="model", local_dir="./exps/releases/lam/lam-20k/step_045500/", filename="config.json")
|
| 97 |
+
print(model_dir)
|
| 98 |
+
model_dir = hf_hub_download(repo_id="3DAIGC/LAM-20K", repo_type="model", local_dir="./exps/releases/lam/lam-20k/step_045500/", filename="model.safetensors")
|
| 99 |
+
print(model_dir)
|
| 100 |
+
model_dir = hf_hub_download(repo_id="3DAIGC/LAM-20K", repo_type="model", local_dir="./exps/releases/lam/lam-20k/step_045500/", filename="README.md")
|
| 101 |
+
print(model_dir)
|
| 102 |
+
# launch example for LAM
|
| 103 |
+
hf_hub_download(repo_id='3DAIGC/LAM-assets',
|
| 104 |
+
repo_type='model',
|
| 105 |
+
filename='LAM_assets.tar',
|
| 106 |
+
local_dir='./')
|
| 107 |
+
os.system('tar -xf LAM_assets.tar && rm LAM_assets.tar')
|
| 108 |
+
hf_hub_download(repo_id='3DAIGC/LAM-assets',
|
| 109 |
+
repo_type='model',
|
| 110 |
+
filename='config.json',
|
| 111 |
+
local_dir='./tmp/')
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def launch_env_not_compile_with_cuda():
|
| 115 |
+
os.system('pip install chumpy')
|
| 116 |
+
os.system('pip install numpy==1.23.0')
|
| 117 |
+
os.system(
|
| 118 |
+
'pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py310_cu121_pyt251/download.html'
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def assert_input_image(input_image):
|
| 123 |
+
if input_image is None:
|
| 124 |
+
raise gr.Error('No image selected or uploaded!')
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
def prepare_working_dir():
|
| 128 |
+
import tempfile
|
| 129 |
+
working_dir = tempfile.TemporaryDirectory()
|
| 130 |
+
return working_dir
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def init_preprocessor():
|
| 134 |
+
from lam.utils.preprocess import Preprocessor
|
| 135 |
+
global preprocessor
|
| 136 |
+
preprocessor = Preprocessor()
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def preprocess_fn(image_in: np.ndarray, remove_bg: bool, recenter: bool,
|
| 140 |
+
working_dir):
|
| 141 |
+
image_raw = os.path.join(working_dir.name, 'raw.png')
|
| 142 |
+
with Image.fromarray(image_in) as img:
|
| 143 |
+
img.save(image_raw)
|
| 144 |
+
image_out = os.path.join(working_dir.name, 'rembg.png')
|
| 145 |
+
success = preprocessor.preprocess(image_path=image_raw,
|
| 146 |
+
save_path=image_out,
|
| 147 |
+
rmbg=remove_bg,
|
| 148 |
+
recenter=recenter)
|
| 149 |
+
assert success, f'Failed under preprocess_fn!'
|
| 150 |
+
return image_out
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def get_image_base64(path):
|
| 154 |
+
with open(path, 'rb') as image_file:
|
| 155 |
+
encoded_string = base64.b64encode(image_file.read()).decode()
|
| 156 |
+
return f'data:image/png;base64,{encoded_string}'
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
def save_imgs_2_video(imgs, v_pth, fps=30):
|
| 160 |
+
# moviepy example
|
| 161 |
+
from moviepy.editor import ImageSequenceClip, VideoFileClip
|
| 162 |
+
images = [image.astype(np.uint8) for image in imgs]
|
| 163 |
+
clip = ImageSequenceClip(images, fps=fps)
|
| 164 |
+
# final_duration = len(images) / fps
|
| 165 |
+
# clip = clip.subclip(0, final_duration)
|
| 166 |
+
clip = clip.subclip(0, len(images) / fps)
|
| 167 |
+
clip.write_videofile(v_pth, codec='libx264')
|
| 168 |
+
|
| 169 |
+
import cv2
|
| 170 |
+
cap = cv2.VideoCapture(v_pth)
|
| 171 |
+
nf = cap.get(cv2.CAP_PROP_FRAME_COUNT)
|
| 172 |
+
if nf != len(images):
|
| 173 |
+
print("="*100+f"\n{v_pth} moviepy saved video frame error."+"\n"+"="*100)
|
| 174 |
+
print(f"Video saved successfully at {v_pth}")
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
def add_audio_to_video(video_path, out_path, audio_path, fps=30):
|
| 178 |
+
# Import necessary modules from moviepy
|
| 179 |
+
from moviepy.editor import VideoFileClip, AudioFileClip
|
| 180 |
+
|
| 181 |
+
# Load video file into VideoFileClip object
|
| 182 |
+
video_clip = VideoFileClip(video_path)
|
| 183 |
+
|
| 184 |
+
# Load audio file into AudioFileClip object
|
| 185 |
+
audio_clip = AudioFileClip(audio_path)
|
| 186 |
+
|
| 187 |
+
# Hard code clip audio
|
| 188 |
+
if audio_clip.duration > 10:
|
| 189 |
+
audio_clip = audio_clip.subclip(0, 10)
|
| 190 |
+
|
| 191 |
+
# Attach audio clip to video clip (replaces existing audio)
|
| 192 |
+
video_clip_with_audio = video_clip.set_audio(audio_clip)
|
| 193 |
+
|
| 194 |
+
# Export final video with audio using standard codecs
|
| 195 |
+
video_clip_with_audio.write_videofile(out_path, codec='libx264', audio_codec='aac', fps=fps)
|
| 196 |
+
|
| 197 |
+
print(f"Audio added successfully at {out_path}")
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
def parse_configs():
|
| 201 |
+
parser = argparse.ArgumentParser()
|
| 202 |
+
parser.add_argument("--config", type=str)
|
| 203 |
+
parser.add_argument("--infer", type=str)
|
| 204 |
+
args, unknown = parser.parse_known_args()
|
| 205 |
+
|
| 206 |
+
cfg = OmegaConf.create()
|
| 207 |
+
cli_cfg = OmegaConf.from_cli(unknown)
|
| 208 |
+
|
| 209 |
+
# parse from ENV
|
| 210 |
+
if os.environ.get("APP_INFER") is not None:
|
| 211 |
+
args.infer = os.environ.get("APP_INFER")
|
| 212 |
+
if os.environ.get("APP_MODEL_NAME") is not None:
|
| 213 |
+
cli_cfg.model_name = os.environ.get("APP_MODEL_NAME")
|
| 214 |
+
|
| 215 |
+
args.config = args.infer if args.config is None else args.config
|
| 216 |
+
|
| 217 |
+
if args.config is not None:
|
| 218 |
+
cfg_train = OmegaConf.load(args.config)
|
| 219 |
+
cfg.source_size = cfg_train.dataset.source_image_res
|
| 220 |
+
try:
|
| 221 |
+
cfg.src_head_size = cfg_train.dataset.src_head_size
|
| 222 |
+
except:
|
| 223 |
+
cfg.src_head_size = 112
|
| 224 |
+
cfg.render_size = cfg_train.dataset.render_image.high
|
| 225 |
+
_relative_path = os.path.join(
|
| 226 |
+
cfg_train.experiment.parent,
|
| 227 |
+
cfg_train.experiment.child,
|
| 228 |
+
os.path.basename(cli_cfg.model_name).split("_")[-1],
|
| 229 |
+
)
|
| 230 |
+
|
| 231 |
+
cfg.save_tmp_dump = os.path.join("exps", "save_tmp", _relative_path)
|
| 232 |
+
cfg.image_dump = os.path.join("exps", "images", _relative_path)
|
| 233 |
+
cfg.video_dump = os.path.join("exps", "videos", _relative_path) # output path
|
| 234 |
+
|
| 235 |
+
if args.infer is not None:
|
| 236 |
+
cfg_infer = OmegaConf.load(args.infer)
|
| 237 |
+
cfg.merge_with(cfg_infer)
|
| 238 |
+
cfg.setdefault(
|
| 239 |
+
"save_tmp_dump", os.path.join("exps", cli_cfg.model_name, "save_tmp")
|
| 240 |
+
)
|
| 241 |
+
cfg.setdefault("image_dump", os.path.join("exps", cli_cfg.model_name, "images"))
|
| 242 |
+
cfg.setdefault(
|
| 243 |
+
"video_dump", os.path.join("dumps", cli_cfg.model_name, "videos")
|
| 244 |
+
)
|
| 245 |
+
cfg.setdefault("mesh_dump", os.path.join("dumps", cli_cfg.model_name, "meshes"))
|
| 246 |
+
|
| 247 |
+
cfg.motion_video_read_fps = 30
|
| 248 |
+
cfg.merge_with(cli_cfg)
|
| 249 |
+
|
| 250 |
+
cfg.setdefault("logger", "INFO")
|
| 251 |
+
|
| 252 |
+
assert cfg.model_name is not None, "model_name is required"
|
| 253 |
+
|
| 254 |
+
return cfg, cfg_train
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
def upload2oss(enable_oac_file, filepath):
|
| 258 |
+
print(f"Upload to OSS: enable_oac_file={enable_oac_file}, filepath={filepath}")
|
| 259 |
+
if(enable_oac_file):
|
| 260 |
+
print(f"ZIP file ready for download: {filepath}")
|
| 261 |
+
return "Upload completed"
|
| 262 |
+
|
| 263 |
+
def demo_lam(flametracking, lam, cfg):
|
| 264 |
+
@spaces.GPU(duration=80)
|
| 265 |
+
def core_fn(image_path: str, video_params, working_dir, enable_oac_file):
|
| 266 |
+
image_raw = os.path.join(working_dir.name, "raw.png")
|
| 267 |
+
with Image.open(image_path).convert('RGB') as img:
|
| 268 |
+
img.save(image_raw)
|
| 269 |
+
|
| 270 |
+
base_vid = os.path.basename(video_params).split(".")[0]
|
| 271 |
+
flame_params_dir = os.path.join("./assets/sample_motion/export", base_vid, "flame_param")
|
| 272 |
+
base_iid = os.path.basename(image_path).split('.')[0]
|
| 273 |
+
image_path = os.path.join("./assets/sample_input", base_iid, "images/00000_00.png")
|
| 274 |
+
|
| 275 |
+
dump_video_path = os.path.join(working_dir.name, "output.mp4")
|
| 276 |
+
dump_image_path = os.path.join(working_dir.name, "output.png")
|
| 277 |
+
|
| 278 |
+
# prepare dump paths
|
| 279 |
+
omit_prefix = os.path.dirname(image_raw)
|
| 280 |
+
image_name = os.path.basename(image_raw)
|
| 281 |
+
uid = image_name.split(".")[0]
|
| 282 |
+
subdir_path = os.path.dirname(image_raw).replace(omit_prefix, "")
|
| 283 |
+
subdir_path = (
|
| 284 |
+
subdir_path[1:] if subdir_path.startswith("/") else subdir_path
|
| 285 |
+
)
|
| 286 |
+
print("subdir_path and uid:", subdir_path, uid)
|
| 287 |
+
|
| 288 |
+
motion_seqs_dir = flame_params_dir
|
| 289 |
+
|
| 290 |
+
dump_image_dir = os.path.dirname(dump_image_path)
|
| 291 |
+
os.makedirs(dump_image_dir, exist_ok=True)
|
| 292 |
+
|
| 293 |
+
print(image_raw, motion_seqs_dir, dump_image_dir, dump_video_path)
|
| 294 |
+
|
| 295 |
+
dump_tmp_dir = dump_image_dir
|
| 296 |
+
|
| 297 |
+
if os.path.exists(dump_video_path):
|
| 298 |
+
return dump_image_path, dump_video_path
|
| 299 |
+
|
| 300 |
+
motion_img_need_mask = cfg.get("motion_img_need_mask", False) # False
|
| 301 |
+
vis_motion = cfg.get("vis_motion", False) # False
|
| 302 |
+
|
| 303 |
+
# preprocess input image: segmentation, flame params estimation
|
| 304 |
+
# """
|
| 305 |
+
return_code = flametracking.preprocess(image_raw)
|
| 306 |
+
assert (return_code == 0), "flametracking preprocess failed!"
|
| 307 |
+
return_code = flametracking.optimize()
|
| 308 |
+
assert (return_code == 0), "flametracking optimize failed!"
|
| 309 |
+
return_code, output_dir = flametracking.export()
|
| 310 |
+
assert (return_code == 0), "flametracking export failed!"
|
| 311 |
+
image_path = os.path.join(output_dir, "images/00000_00.png")
|
| 312 |
+
# """
|
| 313 |
+
|
| 314 |
+
mask_path = image_path.replace("/images/", "/fg_masks/").replace(".jpg", ".png")
|
| 315 |
+
print(image_path, mask_path)
|
| 316 |
+
|
| 317 |
+
aspect_standard = 1.0 / 1.0
|
| 318 |
+
source_size = cfg.source_size
|
| 319 |
+
render_size = cfg.render_size
|
| 320 |
+
render_fps = 30
|
| 321 |
+
# prepare reference image
|
| 322 |
+
image, _, _, shape_param = preprocess_image(image_path, mask_path=mask_path, intr=None, pad_ratio=0,
|
| 323 |
+
bg_color=1.,
|
| 324 |
+
max_tgt_size=None, aspect_standard=aspect_standard,
|
| 325 |
+
enlarge_ratio=[1.0, 1.0],
|
| 326 |
+
render_tgt_size=source_size, multiply=14, need_mask=True,
|
| 327 |
+
get_shape_param=True)
|
| 328 |
+
|
| 329 |
+
# save masked image for vis
|
| 330 |
+
save_ref_img_path = os.path.join(dump_tmp_dir, "output.png")
|
| 331 |
+
vis_ref_img = (image[0].permute(1, 2, 0).cpu().detach().numpy() * 255).astype(np.uint8)
|
| 332 |
+
Image.fromarray(vis_ref_img).save(save_ref_img_path)
|
| 333 |
+
|
| 334 |
+
# prepare motion seq
|
| 335 |
+
src = image_path.split('/')[-3]
|
| 336 |
+
driven = motion_seqs_dir.split('/')[-2]
|
| 337 |
+
src_driven = [src, driven]
|
| 338 |
+
motion_seq = prepare_motion_seqs(motion_seqs_dir, None, save_root=dump_tmp_dir, fps=render_fps,
|
| 339 |
+
bg_color=1., aspect_standard=aspect_standard, enlarge_ratio=[1.0, 1, 0],
|
| 340 |
+
render_image_res=render_size, multiply=16,
|
| 341 |
+
need_mask=motion_img_need_mask, vis_motion=vis_motion,
|
| 342 |
+
shape_param=shape_param, test_sample=False, cross_id=False,
|
| 343 |
+
src_driven=src_driven, max_squen_length=300)
|
| 344 |
+
|
| 345 |
+
# start inference
|
| 346 |
+
motion_seq["flame_params"]["betas"] = shape_param.unsqueeze(0)
|
| 347 |
+
device, dtype = "cuda", torch.float32
|
| 348 |
+
print("start to inference...................")
|
| 349 |
+
with torch.no_grad():
|
| 350 |
+
# TODO check device and dtype
|
| 351 |
+
res = lam.infer_single_view(image.unsqueeze(0).to(device, dtype), None, None,
|
| 352 |
+
render_c2ws=motion_seq["render_c2ws"].to(device),
|
| 353 |
+
render_intrs=motion_seq["render_intrs"].to(device),
|
| 354 |
+
render_bg_colors=motion_seq["render_bg_colors"].to(device),
|
| 355 |
+
flame_params={k: v.to(device) for k, v in motion_seq["flame_params"].items()})
|
| 356 |
+
|
| 357 |
+
rgb = res["comp_rgb"].detach().cpu().numpy() # [Nv, H, W, 3], 0-1
|
| 358 |
+
mask = res["comp_mask"].detach().cpu().numpy() # [Nv, H, W, 3], 0-1
|
| 359 |
+
mask[mask < 0.5] = 0.0
|
| 360 |
+
rgb = rgb * mask + (1 - mask) * 1
|
| 361 |
+
rgb = (np.clip(rgb, 0, 1.0) * 255).astype(np.uint8)
|
| 362 |
+
if vis_motion:
|
| 363 |
+
vis_ref_img = np.tile(
|
| 364 |
+
cv2.resize(vis_ref_img, (rgb[0].shape[1], rgb[0].shape[0]), interpolation=cv2.INTER_AREA)[None, :, :,
|
| 365 |
+
:],
|
| 366 |
+
(rgb.shape[0], 1, 1, 1),
|
| 367 |
+
)
|
| 368 |
+
rgb = np.concatenate([vis_ref_img, rgb, motion_seq["vis_motion_render"]], axis=2)
|
| 369 |
+
|
| 370 |
+
os.makedirs(os.path.dirname(dump_video_path), exist_ok=True)
|
| 371 |
+
|
| 372 |
+
print("==="*36, "\nrgb length:", rgb.shape, render_fps, "==="*36)
|
| 373 |
+
save_imgs_2_video(rgb, dump_video_path, render_fps)
|
| 374 |
+
# images_to_video(rgb, output_path=dump_video_path, fps=30, gradio_codec=False, verbose=True)
|
| 375 |
+
audio_path = os.path.join("./assets/sample_motion/export", base_vid, base_vid + ".wav")
|
| 376 |
+
dump_video_path_wa = dump_video_path.replace(".mp4", "_audio.mp4")
|
| 377 |
+
add_audio_to_video(dump_video_path, dump_video_path_wa, audio_path)
|
| 378 |
+
|
| 379 |
+
output_zip_path = ''
|
| 380 |
+
download_command = ''
|
| 381 |
+
|
| 382 |
+
# ZIP 생성 로직
|
| 383 |
+
if enable_oac_file:
|
| 384 |
+
try:
|
| 385 |
+
from generateARKITGLBWithBlender import generate_glb
|
| 386 |
+
|
| 387 |
+
base_iid_zip = f"chatting_avatar_{int(time.time())}"
|
| 388 |
+
oac_dir = os.path.join('./', base_iid_zip)
|
| 389 |
+
os.makedirs(oac_dir, exist_ok=True)
|
| 390 |
+
|
| 391 |
+
# 1. 실제 얼굴 mesh 저장
|
| 392 |
+
saved_head_path = lam.renderer.flame_model.save_shaped_mesh(shape_param.unsqueeze(0).cuda(), fd=oac_dir)
|
| 393 |
+
print(f"✅ 실제 얼굴 mesh 저장: {saved_head_path}")
|
| 394 |
+
|
| 395 |
+
# 2. offset.ply 생성
|
| 396 |
+
res['cano_gs_lst'][0].save_ply(os.path.join(oac_dir, "offset.ply"), rgb2sh=False, offset2xyz=True)
|
| 397 |
+
print(f"✅ offset.ply 생성 완료")
|
| 398 |
+
|
| 399 |
+
# 3. skin.glb 생성 (Blender 사용)
|
| 400 |
+
generate_glb(
|
| 401 |
+
input_mesh=Path(saved_head_path),
|
| 402 |
+
template_fbx=Path("./assets/sample_oac/template_file.fbx"),
|
| 403 |
+
output_glb=Path(os.path.join(oac_dir, "skin.glb")),
|
| 404 |
+
blender_exec=Path("./blender-4.0.2-linux-x64/blender")
|
| 405 |
+
)
|
| 406 |
+
print(f"✅ skin.glb 생성 완료")
|
| 407 |
+
|
| 408 |
+
# 4. animation.glb 복사
|
| 409 |
+
shutil.copy(
|
| 410 |
+
src='./assets/sample_oac/animation.glb',
|
| 411 |
+
dst=os.path.join(oac_dir, 'animation.glb')
|
| 412 |
+
)
|
| 413 |
+
print(f"✅ animation.glb 복사 완료")
|
| 414 |
+
|
| 415 |
+
# 5. 임시 mesh 파일 삭제
|
| 416 |
+
os.remove(saved_head_path)
|
| 417 |
+
|
| 418 |
+
# 6. ZIP 파일 생성
|
| 419 |
+
output_zip_path = os.path.join('./', base_iid_zip + '.zip')
|
| 420 |
+
if os.path.exists(output_zip_path):
|
| 421 |
+
os.remove(output_zip_path)
|
| 422 |
+
os.system('zip -r {} {}'.format(output_zip_path, oac_dir))
|
| 423 |
+
|
| 424 |
+
# 7. 디렉토리 정리
|
| 425 |
+
shutil.rmtree(oac_dir)
|
| 426 |
+
|
| 427 |
+
# 8. HuggingFace용 다운로드 명령어
|
| 428 |
+
download_command = f'wget https://ych144-lam2.hf.space/file={output_zip_path}\n✅ ZIP file generated: {os.path.basename(output_zip_path)}'
|
| 429 |
+
print(f"✅ ZIP 생성 완료: {output_zip_path}")
|
| 430 |
+
|
| 431 |
+
except Exception as e:
|
| 432 |
+
output_zip_path = f"Archive creation failed: {str(e)}"
|
| 433 |
+
download_command = f"❌ ZIP 생성 실패: {str(e)}"
|
| 434 |
+
print(f"❌ ZIP 생성 실패: {e}")
|
| 435 |
+
|
| 436 |
+
return dump_image_path, dump_video_path_wa, output_zip_path, download_command
|
| 437 |
+
|
| 438 |
+
def core_fn_space(image_path: str, video_params, working_dir):
|
| 439 |
+
return core_fn(image_path, video_params, working_dir, False)
|
| 440 |
+
|
| 441 |
+
with gr.Blocks(analytics_enabled=False, delete_cache=[3600, 3600]) as demo:
|
| 442 |
+
|
| 443 |
+
logo_url = './assets/images/logo.jpeg'
|
| 444 |
+
logo_base64 = get_image_base64(logo_url)
|
| 445 |
+
gr.HTML(f"""
|
| 446 |
+
<div style="display: flex; justify-content: center; align-items: center; text-align: center;">
|
| 447 |
+
<div>
|
| 448 |
+
<h1> <img src="{logo_base64}" style='height:35px; display:inline-block;'/> Large Avatar Model for One-shot Animatable Gaussian Head</h1>
|
| 449 |
+
</div>
|
| 450 |
+
</div>
|
| 451 |
+
""")
|
| 452 |
+
|
| 453 |
+
gr.HTML(
|
| 454 |
+
"""
|
| 455 |
+
<div style="display: flex; justify-content: center; align-items: center; text-align: center; margin: 20px; gap: 10px;">
|
| 456 |
+
<a class="flex-item" href="https://arxiv.org/abs/2502.17796" target="_blank">
|
| 457 |
+
<img src="https://img.shields.io/badge/Paper-arXiv-darkred.svg" alt="arXiv Paper">
|
| 458 |
+
</a>
|
| 459 |
+
<a class="flex-item" href="https://aigc3d.github.io/projects/LAM/" target="_blank">
|
| 460 |
+
<img src="https://img.shields.io/badge/Project-LAM-blue" alt="Project Page">
|
| 461 |
+
</a>
|
| 462 |
+
<a class="flex-item" href="https://github.com/aigc3d/LAM" target="_blank">
|
| 463 |
+
<img src="https://img.shields.io/github/stars/aigc3d/LAM?label=Github%20★&logo=github&color=C8C" alt="badge-github-stars">
|
| 464 |
+
</a>
|
| 465 |
+
<a class="flex-item" href="https://youtu.be/FrfE3RYSKhk" target="_blank">
|
| 466 |
+
<img src="https://img.shields.io/badge/Youtube-Video-red.svg" alt="Video">
|
| 467 |
+
</a>
|
| 468 |
+
</div>
|
| 469 |
+
"""
|
| 470 |
+
)
|
| 471 |
+
|
| 472 |
+
|
| 473 |
+
gr.HTML("""<div style="margin-top: -10px">
|
| 474 |
+
<p style="margin: 4px 0; line-height: 1.2"><h4 style="color: black; margin: 2px 0">Notes1: Inputing front-face images or face orientation close to the driven signal gets better results.</h4></p>
|
| 475 |
+
<p style="margin: 4px 0; line-height: 1.2"><h4 style="color: black; margin: 2px 0">Notes2: Due to computational constraints with Hugging Face's ZeroGPU infrastructure, 3D avatar generation requires ~1 minute per instance.</h4></p>
|
| 476 |
+
<p style="margin: 4px 0; line-height: 1.2"><h4 style="color: black; margin: 2px 0">Notes3: Using LAM-20K model (lower quality than premium LAM-80K) to mitigate processing latency.</h4></p>
|
| 477 |
+
</div>""")
|
| 478 |
+
|
| 479 |
+
|
| 480 |
+
|
| 481 |
+
|
| 482 |
+
# DISPLAY
|
| 483 |
+
with gr.Row():
|
| 484 |
+
with gr.Column(variant='panel', scale=1):
|
| 485 |
+
with gr.Tabs(elem_id='lam_input_image'):
|
| 486 |
+
with gr.TabItem('Input Image'):
|
| 487 |
+
with gr.Row():
|
| 488 |
+
input_image = gr.Image(label='Input Image',
|
| 489 |
+
image_mode='RGB',
|
| 490 |
+
height=480,
|
| 491 |
+
width=270,
|
| 492 |
+
sources='upload',
|
| 493 |
+
type='filepath',
|
| 494 |
+
elem_id='content_image')
|
| 495 |
+
# EXAMPLES
|
| 496 |
+
with gr.Row():
|
| 497 |
+
examples = [
|
| 498 |
+
['assets/sample_input/messi.png'],
|
| 499 |
+
['assets/sample_input/status.png'],
|
| 500 |
+
['assets/sample_input/james.png'],
|
| 501 |
+
['assets/sample_input/cluo.jpg'],
|
| 502 |
+
['assets/sample_input/dufu.jpg'],
|
| 503 |
+
['assets/sample_input/libai.jpg'],
|
| 504 |
+
['assets/sample_input/barbara.jpg'],
|
| 505 |
+
['assets/sample_input/pop.png'],
|
| 506 |
+
['assets/sample_input/musk.jpg'],
|
| 507 |
+
['assets/sample_input/speed.jpg'],
|
| 508 |
+
['assets/sample_input/zhouxingchi.jpg'],
|
| 509 |
+
]
|
| 510 |
+
gr.Examples(
|
| 511 |
+
examples=examples,
|
| 512 |
+
inputs=[input_image],
|
| 513 |
+
examples_per_page=20
|
| 514 |
+
)
|
| 515 |
+
|
| 516 |
+
|
| 517 |
+
with gr.Column():
|
| 518 |
+
with gr.Tabs(elem_id='lam_input_video'):
|
| 519 |
+
with gr.TabItem('Input Video'):
|
| 520 |
+
with gr.Row():
|
| 521 |
+
video_input = gr.Video(label='Input Video',
|
| 522 |
+
height=480,
|
| 523 |
+
width=270,
|
| 524 |
+
interactive=False)
|
| 525 |
+
|
| 526 |
+
examples = ['./assets/sample_motion/export/Speeding_Scandal/Speeding_Scandal.mp4',
|
| 527 |
+
'./assets/sample_motion/export/Look_In_My_Eyes/Look_In_My_Eyes.mp4',
|
| 528 |
+
'./assets/sample_motion/export/D_ANgelo_Dinero/D_ANgelo_Dinero.mp4',
|
| 529 |
+
'./assets/sample_motion/export/Michael_Wayne_Rosen/Michael_Wayne_Rosen.mp4',
|
| 530 |
+
'./assets/sample_motion/export/I_Am_Iron_Man/I_Am_Iron_Man.mp4',
|
| 531 |
+
'./assets/sample_motion/export/Anti_Drugs/Anti_Drugs.mp4',
|
| 532 |
+
'./assets/sample_motion/export/Pen_Pineapple_Apple_Pen/Pen_Pineapple_Apple_Pen.mp4',
|
| 533 |
+
'./assets/sample_motion/export/Joe_Biden/Joe_Biden.mp4',
|
| 534 |
+
'./assets/sample_motion/export/Donald_Trump/Donald_Trump.mp4',
|
| 535 |
+
'./assets/sample_motion/export/Taylor_Swift/Taylor_Swift.mp4',
|
| 536 |
+
'./assets/sample_motion/export/GEM/GEM.mp4',
|
| 537 |
+
'./assets/sample_motion/export/The_Shawshank_Redemption/The_Shawshank_Redemption.mp4'
|
| 538 |
+
]
|
| 539 |
+
print("Video example list {}".format(examples))
|
| 540 |
+
|
| 541 |
+
gr.Examples(
|
| 542 |
+
examples=examples,
|
| 543 |
+
inputs=[video_input],
|
| 544 |
+
examples_per_page=20,
|
| 545 |
+
)
|
| 546 |
+
with gr.Column(variant='panel', scale=1):
|
| 547 |
+
with gr.Tabs(elem_id='lam_processed_image'):
|
| 548 |
+
with gr.TabItem('Processed Image'):
|
| 549 |
+
with gr.Row():
|
| 550 |
+
processed_image = gr.Image(
|
| 551 |
+
label='Processed Image',
|
| 552 |
+
image_mode='RGBA',
|
| 553 |
+
type='filepath',
|
| 554 |
+
elem_id='processed_image',
|
| 555 |
+
height=480,
|
| 556 |
+
width=270,
|
| 557 |
+
interactive=False)
|
| 558 |
+
|
| 559 |
+
with gr.Column(variant='panel', scale=1):
|
| 560 |
+
with gr.Tabs(elem_id='lam_render_video'):
|
| 561 |
+
with gr.TabItem('Rendered Video'):
|
| 562 |
+
with gr.Row():
|
| 563 |
+
output_video = gr.Video(label='Rendered Video',
|
| 564 |
+
format='mp4',
|
| 565 |
+
height=480,
|
| 566 |
+
width=270,
|
| 567 |
+
autoplay=True)
|
| 568 |
+
|
| 569 |
+
# SETTING
|
| 570 |
+
with gr.Row():
|
| 571 |
+
with gr.Column(variant='panel', scale=1):
|
| 572 |
+
enable_oac_file = gr.Checkbox(label="Export ZIP file for Chatting Avatar",
|
| 573 |
+
value=False, interactive=True)
|
| 574 |
+
submit = gr.Button('Generate',
|
| 575 |
+
elem_id='lam_generate',
|
| 576 |
+
variant='primary')
|
| 577 |
+
download_command = gr.Textbox(
|
| 578 |
+
label="📦 Download ZIP Command",
|
| 579 |
+
interactive=False,
|
| 580 |
+
placeholder="Check 'Export ZIP file' and generate to get download link...",
|
| 581 |
+
)
|
| 582 |
+
|
| 583 |
+
main_fn = core_fn
|
| 584 |
+
output_zip_textbox = gr.Textbox(visible=False)
|
| 585 |
+
|
| 586 |
+
working_dir = gr.State()
|
| 587 |
+
submit.click(
|
| 588 |
+
fn=assert_input_image,
|
| 589 |
+
inputs=[input_image],
|
| 590 |
+
queue=False,
|
| 591 |
+
).success(
|
| 592 |
+
fn=prepare_working_dir,
|
| 593 |
+
outputs=[working_dir],
|
| 594 |
+
queue=False,
|
| 595 |
+
).success(
|
| 596 |
+
fn=main_fn,
|
| 597 |
+
inputs=[input_image, video_input,
|
| 598 |
+
working_dir, enable_oac_file], # video_params refer to smpl dir
|
| 599 |
+
outputs=[processed_image, output_video, output_zip_textbox, download_command],
|
| 600 |
+
).success(
|
| 601 |
+
fn=upload2oss,
|
| 602 |
+
inputs=[enable_oac_file, output_zip_textbox]
|
| 603 |
+
)
|
| 604 |
+
|
| 605 |
+
demo.queue()
|
| 606 |
+
demo.launch()
|
| 607 |
+
|
| 608 |
+
|
| 609 |
+
def _build_model(cfg):
|
| 610 |
+
from lam.models import model_dict
|
| 611 |
+
from lam.utils.hf_hub import wrap_model_hub
|
| 612 |
+
|
| 613 |
+
hf_model_cls = wrap_model_hub(model_dict["lam"])
|
| 614 |
+
model = hf_model_cls.from_pretrained(cfg.model_name)
|
| 615 |
+
|
| 616 |
+
return model
|
| 617 |
+
|
| 618 |
+
|
| 619 |
+
def launch_gradio_app():
|
| 620 |
+
os.environ.update({
|
| 621 |
+
'APP_ENABLED': '1',
|
| 622 |
+
'APP_MODEL_NAME':
|
| 623 |
+
'./exps/releases/lam/lam-20k/step_045500/',
|
| 624 |
+
'APP_INFER': './configs/inference/lam-20k-8gpu.yaml',
|
| 625 |
+
'APP_TYPE': 'infer.lam',
|
| 626 |
+
'NUMBA_THREADING_LAYER': 'omp',
|
| 627 |
+
})
|
| 628 |
+
|
| 629 |
+
cfg, _ = parse_configs()
|
| 630 |
+
lam = _build_model(cfg)
|
| 631 |
+
lam.to('cuda')
|
| 632 |
+
|
| 633 |
+
flametracking = FlameTrackingSingleImage(output_dir='tracking_output',
|
| 634 |
+
alignment_model_path='./pretrain_model/68_keypoints_model.pkl',
|
| 635 |
+
vgghead_model_path='./pretrain_model/vgghead/vgg_heads_l.trcd',
|
| 636 |
+
human_matting_path='./pretrain_model/matting/stylematte_synth.pt',
|
| 637 |
+
facebox_model_path='./pretrain_model/FaceBoxesV2.pth',
|
| 638 |
+
detect_iris_landmarks=False)
|
| 639 |
+
|
| 640 |
+
demo_lam(flametracking, lam, cfg)
|
| 641 |
+
|
| 642 |
+
|
| 643 |
+
if __name__ == '__main__':
|
| 644 |
+
launch_pretrained()
|
| 645 |
+
launch_gradio_app()
|