MogensR commited on
Commit
23dc493
Β·
1 Parent(s): bf805fb
Files changed (7) hide show
  1. Dockerfile +104 -104
  2. requirements.txt +6 -5
  3. ui.py +30 -17
  4. ui_core_functionality.py +318 -337
  5. ui_core_interface.py +74 -96
  6. utils/paths.py +29 -0
  7. utils/perf_tuning.py +21 -0
Dockerfile CHANGED
@@ -1,145 +1,145 @@
1
  # ===============================
2
- # Optimized Dockerfile for Hugging Face Spaces
3
- # PyTorch 2.5.1 + CUDA 12.1 (SAM2 compatible versions)
 
4
  # ===============================
5
 
6
- # Base image with CUDA 12.1.1
7
  FROM nvidia/cuda:12.1.1-cudnn8-runtime-ubuntu22.04
8
 
9
- # Environment variables
10
  ENV DEBIAN_FRONTEND=noninteractive \
11
  PYTHONUNBUFFERED=1 \
12
  PYTHONDONTWRITEBYTECODE=1 \
13
  PIP_NO_CACHE_DIR=1 \
14
  PIP_DISABLE_PIP_VERSION_CHECK=1 \
15
  TORCH_CUDA_ARCH_LIST="7.5 8.0 8.6+PTX" \
16
- CUDA_VISIBLE_DEVICES="0"
17
-
18
- # Create non-root user
 
 
 
 
 
 
 
 
 
 
19
  RUN useradd -m -u 1000 user
20
  ENV HOME=/home/user
21
  WORKDIR $HOME/app
22
 
23
- # Install system dependencies in a single layer
24
  RUN apt-get update && apt-get install -y --no-install-recommends \
25
- git \
26
- ffmpeg \
27
- wget \
28
- python3 \
29
- python3-pip \
30
- python3-venv \
31
- python3-dev \
32
- build-essential \
33
- gcc \
34
- g++ \
35
- pkg-config \
36
- libffi-dev \
37
- libssl-dev \
38
- libc6-dev \
39
- libgl1-mesa-glx \
40
- libglib2.0-0 \
41
- libsm6 \
42
- libxext6 \
43
- libxrender1 \
44
- libgomp1 \
45
- && rm -rf /var/lib/apt/lists/*
46
-
47
- # Set up Python environment
48
- RUN python3 -m pip install --no-cache-dir --upgrade pip setuptools wheel
49
-
50
- # Install PyTorch 2.5.1 (SAM2 minimum requirement)
51
- RUN python3 -m pip install --no-cache-dir \
52
- torch==2.5.1 \
53
- torchvision==0.20.1 \
54
- torchaudio==2.5.1 \
55
- && python3 -c "import torch; print(f'PyTorch version: {torch.__version__}'); print(f'CUDA available: {torch.cuda.is_available()}'); print(f'CUDA version: {torch.version.cuda if torch.cuda.is_available() else \"N/A\"}'); print(f'cuDNN version: {torch.backends.cudnn.version() if torch.cuda.is_available() else \"N/A\"}')"
56
-
57
- # Copy requirements files first for better caching
58
- COPY --chown=user requirements.txt ./
59
-
60
- # Install Python dependencies
61
  RUN python3 -m pip install --no-cache-dir -r requirements.txt
62
 
63
- # Install MatAnyone with retry logic and fallback dependencies
64
- RUN echo "Installing problematic dependencies first..." && \
65
- python3 -m pip install --no-cache-dir chardet charset-normalizer && \
66
- echo "Installing MatAnyone..." && \
67
  (python3 -m pip install --no-cache-dir -v git+https://github.com/pq-yang/MatAnyone@main#egg=matanyone || \
68
- (echo "Retrying MatAnyone installation..." && \
69
  python3 -m pip install --no-cache-dir -v git+https://github.com/pq-yang/MatAnyone@main#egg=matanyone)) && \
70
- python3 -c "import matanyone; print('MatAnyone import successful')"
71
 
72
- # Copy application code
73
- COPY --chown=user . .
74
 
75
- # Install SAM2
76
- RUN echo "Installing SAM2..." && \
 
77
  git clone --depth=1 https://github.com/facebookresearch/segment-anything-2.git third_party/sam2 && \
78
- cd third_party/sam2 && \
79
- python3 -m pip install --no-cache-dir -e .
80
 
81
- # Set up environment variables - IMPORTANT: Disable bytecode generation
82
  ENV PYTHONPATH=/home/user/app:/home/user/app/third_party:/home/user/app/third_party/sam2 \
83
  FFMPEG_BIN=ffmpeg \
84
  THIRD_PARTY_SAM2_DIR=/home/user/app/third_party/sam2 \
85
  ENABLE_MATANY=1 \
86
  SAM2_DEVICE=cuda \
87
  MATANY_DEVICE=cuda \
88
- OMP_NUM_THREADS=1 \
89
  TF_CPP_MIN_LOG_LEVEL=2 \
90
- SAM2_CHECKPOINT=/home/user/app/checkpoints/sam2_hiera_large.pt \
91
- PYTHONDONTWRITEBYTECODE=1
92
-
93
- # Create checkpoints directory
94
- RUN mkdir -p /home/user/app/checkpoints
95
-
96
- # Note: SAM2 model will be downloaded at runtime via lazy loading
97
 
98
- # Fix permissions thoroughly
99
- RUN chown -R user:user /home/user/app && \
 
100
  chmod -R 755 /home/user/app && \
101
  find /home/user/app -type d -exec chmod 755 {} \; && \
102
  find /home/user/app -type f -exec chmod 644 {} \; && \
103
- chmod +x /home/user/app/ui.py
104
 
105
- # Health check
106
- HEALTHCHECK --interval=30s --timeout=5s --retries=3 CMD python3 -c "import torch; print(f'PyTorch: {torch.__version__}, CUDA: {torch.cuda.is_available()}')"
 
 
 
107
 
108
- # Run as non-root user
109
  USER user
110
  EXPOSE 7860
111
 
112
- # Start the application with comprehensive debugging and no bytecode compilation
113
  CMD ["sh", "-c", "\
114
- echo '===========================================' && \
115
- echo '=== BACKGROUNDFX PRO CONTAINER STARTUP ===' && \
116
- echo '===========================================' && \
117
- echo 'Timestamp:' $(date) && \
118
- echo 'Current directory:' $(pwd) && \
119
- echo 'Current user:' $(whoami) && \
120
- echo 'User ID:' $(id) && \
121
- echo '' && \
122
- echo '=== FILE SYSTEM CHECK ===' && \
123
- echo 'Files in app directory:' && \
124
- ls -la && \
125
- echo '' && \
126
- echo '=== UI.PY VERIFICATION ===' && \
127
- if [ -f ui.py ]; then \
128
- echo 'βœ… ui.py found' && \
129
- echo 'File size:' $(wc -c < ui.py) 'bytes' && \
130
- echo 'File permissions:' $(ls -l ui.py) && \
131
- echo 'Testing Python imports...' && \
132
- python3 -B -c 'import gradio; print(\"βœ… Gradio:\", gradio.__version__)' && \
133
- python3 -B -c 'import torch; print(\"βœ… Torch:\", torch.__version__)' && \
134
- echo 'Testing ui.py import...' && \
135
- python3 -B -c 'import sys; sys.path.insert(0, \".\"); import ui; print(\"βœ… ui.py imports successfully\")' && \
136
- echo 'βœ… All checks passed!'; \
137
- else \
138
- echo '❌ ERROR: ui.py not found!' && \
139
- exit 1; \
140
- fi && \
141
- echo '' && \
142
- echo '=== STARTING APPLICATION ===' && \
143
- echo 'Launching ui.py with bytecode disabled...' && \
144
- python3 -B -u ui.py \
145
- "]
 
1
  # ===============================
2
+ # Hugging Face Space β€” Stable Dockerfile
3
+ # CUDA 12.1.1 + PyTorch 2.5.1 (cu121) + Gradio 4.41.3
4
+ # SAM2 installed from source; MatAnyone via pip (repo)
5
  # ===============================
6
 
 
7
  FROM nvidia/cuda:12.1.1-cudnn8-runtime-ubuntu22.04
8
 
9
+ # ---- Environment (runtime hygiene) ----
10
  ENV DEBIAN_FRONTEND=noninteractive \
11
  PYTHONUNBUFFERED=1 \
12
  PYTHONDONTWRITEBYTECODE=1 \
13
  PIP_NO_CACHE_DIR=1 \
14
  PIP_DISABLE_PIP_VERSION_CHECK=1 \
15
  TORCH_CUDA_ARCH_LIST="7.5 8.0 8.6+PTX" \
16
+ CUDA_VISIBLE_DEVICES="0" \
17
+ # Threads (fixes libgomp warnings)
18
+ OMP_NUM_THREADS=4 \
19
+ OPENBLAS_NUM_THREADS=1 \
20
+ MKL_NUM_THREADS=1 \
21
+ NUMEXPR_NUM_THREADS=1 \
22
+ # Caches inside repo volume (stable on Spaces)
23
+ HF_HOME=/home/user/app/.hf \
24
+ TORCH_HOME=/home/user/app/.torch \
25
+ # Gradio port coherency (HF proxy)
26
+ GRADIO_SERVER_PORT=7860
27
+
28
+ # ---- Non-root user ----
29
  RUN useradd -m -u 1000 user
30
  ENV HOME=/home/user
31
  WORKDIR $HOME/app
32
 
33
+ # ---- System deps ----
34
  RUN apt-get update && apt-get install -y --no-install-recommends \
35
+ git ffmpeg wget curl \
36
+ python3 python3-pip python3-venv python3-dev \
37
+ build-essential gcc g++ pkg-config \
38
+ libffi-dev libssl-dev libc6-dev \
39
+ libgl1-mesa-glx libglib2.0-0 libsm6 libxext6 libxrender1 libgomp1 \
40
+ && rm -rf /var/lib/apt/lists/*
41
+
42
+ # ---- Python bootstrap ----
43
+ RUN python3 -m pip install --upgrade pip setuptools wheel
44
+
45
+ # ---- Install PyTorch (CUDA 12.1 wheels) ----
46
+ # Using the official cu121 wheel index keeps runtime aligned with base image
47
+ RUN python3 -m pip install --no-cache-dir --index-url https://download.pytorch.org/whl/cu121 \
48
+ torch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 \
49
+ && python3 - <<'PY'
50
+ import torch
51
+ print("PyTorch:", torch.__version__)
52
+ print("CUDA available:", torch.cuda.is_available())
53
+ print("torch.version.cuda:", getattr(torch.version, "cuda", None))
54
+ try:
55
+ import torchaudio, torchvision
56
+ print("torchaudio:", torchaudio.__version__)
57
+ import torchvision as tv; print("torchvision:", tv.__version__)
58
+ except Exception as e:
59
+ print("aux libs check:", e)
60
+ PY
61
+
62
+ # ---- Copy deps first (better caching) ----
63
+ COPY --chown=user:user requirements.txt ./
64
+
65
+ # ---- Install remaining Python deps ----
66
+ # (requirements.txt contains gradio==4.41.3 and libs; NOT the SAM2 repo)
 
 
 
 
67
  RUN python3 -m pip install --no-cache-dir -r requirements.txt
68
 
69
+ # ---- MatAnyone (pip install from repo with retry) ----
70
+ RUN echo "Installing MatAnyone..." && \
 
 
71
  (python3 -m pip install --no-cache-dir -v git+https://github.com/pq-yang/MatAnyone@main#egg=matanyone || \
72
+ (echo "Retrying MatAnyone..." && \
73
  python3 -m pip install --no-cache-dir -v git+https://github.com/pq-yang/MatAnyone@main#egg=matanyone)) && \
74
+ python3 -c "import matanyone; print('MatAnyone import OK')"
75
 
76
+ # ---- App code ----
77
+ COPY --chown=user:user . .
78
 
79
+ # ---- SAM2 from source (editable) ----
80
+ # Keep SAM2 out of requirements.txt to avoid duplicate installs
81
+ RUN echo "Installing SAM2 (editable)..." && \
82
  git clone --depth=1 https://github.com/facebookresearch/segment-anything-2.git third_party/sam2 && \
83
+ cd third_party/sam2 && python3 -m pip install --no-cache-dir -e .
 
84
 
85
+ # ---- App env ----
86
  ENV PYTHONPATH=/home/user/app:/home/user/app/third_party:/home/user/app/third_party/sam2 \
87
  FFMPEG_BIN=ffmpeg \
88
  THIRD_PARTY_SAM2_DIR=/home/user/app/third_party/sam2 \
89
  ENABLE_MATANY=1 \
90
  SAM2_DEVICE=cuda \
91
  MATANY_DEVICE=cuda \
 
92
  TF_CPP_MIN_LOG_LEVEL=2 \
93
+ SAM2_CHECKPOINT=/home/user/app/checkpoints/sam2_hiera_large.pt
 
 
 
 
 
 
94
 
95
+ # ---- Create writable dirs (caches + checkpoints) ----
96
+ RUN mkdir -p /home/user/app/checkpoints /home/user/app/.hf /home/user/app/.torch && \
97
+ chown -R user:user /home/user/app && \
98
  chmod -R 755 /home/user/app && \
99
  find /home/user/app -type d -exec chmod 755 {} \; && \
100
  find /home/user/app -type f -exec chmod 644 {} \; && \
101
+ chmod +x /home/user/app/ui.py || true
102
 
103
+ # ---- Healthcheck (lightweight) ----
104
+ HEALTHCHECK --interval=30s --timeout=5s --retries=3 CMD python3 - <<'PY' || exit 1
105
+ import torch, sys
106
+ print(f"torch {torch.__version__} | cuda {getattr(torch.version,'cuda',None)} | ok={torch.cuda.is_available()}")
107
+ PY
108
 
109
+ # ---- Runtime ----
110
  USER user
111
  EXPOSE 7860
112
 
 
113
  CMD ["sh", "-c", "\
114
+ echo '===========================================' && \
115
+ echo '=== BACKGROUNDFX PRO CONTAINER STARTUP ===' && \
116
+ echo '===========================================' && \
117
+ echo 'Timestamp:' $(date) && \
118
+ echo 'Current directory:' $(pwd) && \
119
+ echo 'Current user:' $(whoami) && \
120
+ echo 'User ID:' $(id) && \
121
+ echo '' && \
122
+ echo '=== FILE SYSTEM CHECK ===' && \
123
+ echo 'Files in app directory:' && \
124
+ ls -la && \
125
+ echo '' && \
126
+ echo '=== UI.PY VERIFICATION ===' && \
127
+ if [ -f ui.py ]; then \
128
+ echo 'βœ… ui.py found' && \
129
+ echo 'File size:' $(wc -c < ui.py) 'bytes' && \
130
+ echo 'File permissions:' $(ls -l ui.py) && \
131
+ echo 'Testing Python imports...' && \
132
+ python3 -B -c 'import gradio; print(\"βœ… Gradio:\", gradio.__version__)' && \
133
+ python3 -B -c 'import torch; print(\"βœ… Torch:\", torch.__version__)' && \
134
+ echo 'Testing ui.py import...' && \
135
+ python3 -B -c 'import sys; sys.path.insert(0, \".\"); import ui; print(\"βœ… ui.py imports successfully\")' && \
136
+ echo 'βœ… All checks passed!'; \
137
+ else \
138
+ echo '❌ ERROR: ui.py not found!' && \
139
+ exit 1; \
140
+ fi && \
141
+ echo '' && \
142
+ echo '=== STARTING APPLICATION ===' && \
143
+ echo 'Launching ui.py with bytecode disabled...' && \
144
+ python3 -B -u ui.py \
145
+ "]
requirements.txt CHANGED
@@ -1,7 +1,7 @@
1
  # ===== Core Dependencies =====
2
- # PyTorch is installed in Dockerfile with CUDA 12.1 - UPDATED FOR SAM2 COMPATIBILITY
3
  # torch==2.5.1
4
- # torchvision==0.20.0
5
  # torchaudio==2.5.1
6
 
7
  # ===== Base Dependencies =====
@@ -28,6 +28,7 @@ einops>=0.6.0,<0.9.0
28
  timm>=0.9.0,<1.1.0
29
  pyyaml>=6.0.0,<7.0.0
30
  matplotlib>=3.5.0,<4.0.0
 
31
 
32
  # ===== MatAnyone Dependencies =====
33
  # MatAnyone is installed separately in Dockerfile
@@ -35,7 +36,7 @@ kornia>=0.7.0,<0.8.0
35
  tqdm>=4.60.0,<5.0.0
36
 
37
  # ===== UI and API =====
38
- gradio>=5.0.0,<6.0.0
39
 
40
  # ===== Helpers and Utilities =====
41
  huggingface-hub>=0.20.0,<1.0.0
@@ -55,6 +56,6 @@ loguru>=0.6.0,<1.0.0
55
  # File handling
56
  python-multipart>=0.0.5,<1.0.0
57
 
58
- # Web server
59
  uvicorn>=0.20.0,<1.0.0
60
- fastapi>=0.95.0,<1.0.0
 
1
  # ===== Core Dependencies =====
2
+ # PyTorch is installed in Dockerfile with CUDA 12.1 β€” REQUIRED for SAM2
3
  # torch==2.5.1
4
+ # torchvision==0.20.1
5
  # torchaudio==2.5.1
6
 
7
  # ===== Base Dependencies =====
 
28
  timm>=0.9.0,<1.1.0
29
  pyyaml>=6.0.0,<7.0.0
30
  matplotlib>=3.5.0,<4.0.0
31
+ iopath>=0.1.10,<0.2.0
32
 
33
  # ===== MatAnyone Dependencies =====
34
  # MatAnyone is installed separately in Dockerfile
 
36
  tqdm>=4.60.0,<5.0.0
37
 
38
  # ===== UI and API =====
39
+ gradio==4.41.3
40
 
41
  # ===== Helpers and Utilities =====
42
  huggingface-hub>=0.20.0,<1.0.0
 
56
  # File handling
57
  python-multipart>=0.0.5,<1.0.0
58
 
59
+ # Web server (if you use your own API endpoints besides Gradio)
60
  uvicorn>=0.20.0,<1.0.0
61
+ fastapi>=0.110.0,<0.116.0
ui.py CHANGED
@@ -1,6 +1,6 @@
1
  #!/usr/bin/env python3
2
  """
3
- BackgroundFX Pro β€” Main UI Application
4
  Clean, focused main file that coordinates the application
5
  """
6
 
@@ -36,40 +36,53 @@
36
  import torch
37
  import gradio as gr
38
 
39
- # Import our modules
40
  from ui_core_functionality import startup_probe, logger
41
  from ui_core_interface import create_interface
42
 
43
- # ===============================================================================
 
44
  # MAIN APPLICATION
45
- # ===============================================================================
46
 
47
  def main():
48
  """Main application entry point"""
49
  try:
50
- # Run startup probe
51
  startup_probe()
52
-
53
- # Create and launch interface
54
  logger.info("πŸš€ Launching Gradio interface...")
 
 
 
 
 
 
 
55
  demo = create_interface()
56
-
57
- # Fixed queue configuration for Gradio 5.46.0
58
- demo.queue()
59
-
 
 
 
60
  demo.launch(
61
  server_name="0.0.0.0",
62
- server_port=7860,
63
  share=False,
64
- show_api=True,
65
  show_error=True,
66
  quiet=False,
67
- debug=True
 
68
  )
69
-
70
  except Exception as e:
71
- logger.error(f"❌ Application startup failed: {e}")
72
  raise
73
 
 
74
  if __name__ == "__main__":
75
- main()
 
1
  #!/usr/bin/env python3
2
  """
3
+ BackgroundFX Pro β€” Main UI Application (Gradio 4.41.3)
4
  Clean, focused main file that coordinates the application
5
  """
6
 
 
36
  import torch
37
  import gradio as gr
38
 
39
+ # Import our modules (logger + probe + interface factory live here)
40
  from ui_core_functionality import startup_probe, logger
41
  from ui_core_interface import create_interface
42
 
43
+
44
+ # =======================================================================
45
  # MAIN APPLICATION
46
+ # =======================================================================
47
 
48
  def main():
49
  """Main application entry point"""
50
  try:
51
+ # Fast sanity check for writable FS etc.
52
  startup_probe()
53
+
54
+ # Log versions for quick triage
55
  logger.info("πŸš€ Launching Gradio interface...")
56
+ logger.info("Gradio=%s | torch=%s | cu=%s | cuda_available=%s",
57
+ getattr(gr, "__version__", "?"),
58
+ torch.__version__,
59
+ getattr(torch.version, "cuda", None),
60
+ torch.cuda.is_available())
61
+
62
+ # Build interface
63
  demo = create_interface()
64
+
65
+ # --- Gradio 4.x runtime settings ---
66
+ # Keep queue small (prevents RAM spikes) and single-thread the server.
67
+ demo.queue(max_size=2)
68
+
69
+ # Bind to the Space-provided port (avoids proxy kills)
70
+ port = int(os.environ.get("PORT", os.environ.get("GRADIO_SERVER_PORT", "7860")))
71
  demo.launch(
72
  server_name="0.0.0.0",
73
+ server_port=port,
74
  share=False,
75
+ show_api=False, # safer on public Spaces
76
  show_error=True,
77
  quiet=False,
78
+ debug=False,
79
+ max_threads=1 # 4.x: prefer max_threads over concurrency_count
80
  )
81
+
82
  except Exception as e:
83
+ logger.error("❌ Application startup failed: %s", e)
84
  raise
85
 
86
+
87
  if __name__ == "__main__":
88
+ main()
ui_core_functionality.py CHANGED
@@ -2,7 +2,7 @@
2
  """
3
  BackgroundFX Pro β€” Core Functionality
4
  All processing logic, utilities, background generators, and handlers
5
- Enhanced with ChatGPT's file safety and error handling improvements
6
  """
7
 
8
  import os
@@ -28,103 +28,171 @@
28
  import numpy as np
29
  from PIL import Image, ImageDraw, ImageFont
30
  import cv2
31
- import gradio as gr
32
- import importlib.metadata
33
 
34
- # Get paths from main module
35
- APP_ROOT = Path(__file__).resolve().parent
 
 
 
 
36
  DATA_ROOT = APP_ROOT / "data"
37
- TMP_ROOT = APP_ROOT / "tmp"
38
- JOB_ROOT = TMP_ROOT / "backgroundfx_jobs"
39
-
40
- # Configure logging
41
- logger = logging.getLogger("ui")
42
- if not logger.handlers:
43
- h = logging.StreamHandler()
44
- h.setFormatter(logging.Formatter("[%(asctime)s] %(levelname)s: %(message)s"))
45
- logger.addHandler(h)
46
- logger.setLevel(logging.INFO)
47
-
48
- # ===============================================================================
49
- # STARTUP VALIDATION
50
- # ===============================================================================
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
  def _disk_stats(p: Path) -> str:
53
- """Get disk usage statistics for a path"""
54
  try:
55
  total, used, free = shutil.disk_usage(str(p))
56
- to_mb = lambda x: x // (1024 * 1024)
57
- return f"disk(total={to_mb(total)}MB, used={to_mb(used)}MB, free={to_mb(free)}MB)"
58
  except Exception:
59
  return "disk(n/a)"
60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  def startup_probe():
62
  """Comprehensive startup probe - validates system readiness"""
63
  try:
64
  logger.info("πŸš€ BACKGROUNDFX PRO STARTUP PROBE")
65
- logger.info(f"οΏ½οΏ½οΏ½οΏ½ Working directory: {os.getcwd()}")
66
- logger.info(f"🐍 Python executable: {sys.executable}")
67
-
68
- # Write probe (critical - fail fast if can't write)
69
  probe_file = TMP_ROOT / "startup_probe.txt"
70
  probe_file.write_text("startup_test_ok", encoding="utf-8")
71
  assert probe_file.read_text(encoding="utf-8") == "startup_test_ok"
72
- logger.info(f"βœ… WRITE PROBE OK: {probe_file} | {_disk_stats(APP_ROOT)}")
73
-
74
  probe_file.unlink(missing_ok=True)
75
-
76
  # GPU/Torch status
77
  try:
78
- logger.info(f"πŸ”§ Torch version: {torch.__version__}")
79
- if hasattr(torch.version, 'cuda') and torch.version.cuda:
80
- logger.info(f"πŸ”§ CUDA version: {torch.version.cuda}")
81
-
82
  if torch.cuda.is_available():
83
  gpu_count = torch.cuda.device_count()
84
- gpu_name = torch.cuda.get_device_name(0) if gpu_count > 0 else "Unknown"
85
- gpu_memory = torch.cuda.get_device_properties(0).total_memory / 1e9
86
- logger.info(f"πŸ”₯ GPU Available: {gpu_name} ({gpu_count} devices)")
87
- logger.info(f"πŸ’Ύ GPU Memory: {gpu_memory:.1f}GB")
88
  else:
89
- logger.warning("⚠️ No GPU available - using CPU only")
90
  except Exception as e:
91
- logger.warning(f"⚠️ Torch check failed: {e}")
92
-
93
  # Directory verification
94
- required_dirs = ['checkpoints', 'models', 'utils']
95
- for dir_name in required_dirs:
96
- dir_path = APP_ROOT / dir_name
97
- if dir_path.exists():
98
- logger.info(f"βœ… Directory found: {dir_name}")
99
- else:
100
- logger.warning(f"⚠️ Missing directory: {dir_name}")
101
-
102
- # Job directory test
103
- test_job_dir = JOB_ROOT / "startup_test_job"
104
- test_job_dir.mkdir(parents=True, exist_ok=True)
105
- test_file = test_job_dir / "test.tmp"
106
- test_file.write_text("job_isolation_test")
107
- assert test_file.read_text() == "job_isolation_test"
108
- logger.info(f"βœ… Job isolation directory ready: {JOB_ROOT}")
109
-
110
- shutil.rmtree(test_job_dir, ignore_errors=True)
111
-
112
- # Environment summary
113
- logger.info("🌍 Environment:")
114
- logger.info(f" β€’ OMP_NUM_THREADS: {os.environ.get('OMP_NUM_THREADS', 'unset')}")
115
- logger.info(f" β€’ HF_HOME: {os.environ.get('HF_HOME', 'default')}")
116
- logger.info(f" β€’ TORCH_HOME: {os.environ.get('TORCH_HOME', 'default')}")
117
-
118
- logger.info("🎯 Startup probe completed successfully - system ready!")
119
-
120
  except Exception as e:
121
- logger.error(f"❌ STARTUP PROBE FAILED: {e}")
122
- logger.error(f"πŸ“Š Disk stats: {_disk_stats(APP_ROOT)}")
123
- raise RuntimeError(f"Startup probe failed - system not ready: {e}") from e
124
 
125
- # ===============================================================================
126
- # CHATGPT ENHANCED FILE SAFETY UTILITIES
127
- # ===============================================================================
128
 
129
  def new_tmp_path(suffix: str) -> Path:
130
  """Generate safe temporary path within TMP_ROOT"""
@@ -136,8 +204,8 @@ def atomic_write_bytes(dst: Path, data: bytes):
136
  try:
137
  with open(tmp, "wb") as f:
138
  f.write(data)
139
- tmp.replace(dst) # atomic on same filesystem
140
- logger.debug(f"βœ… Atomic write completed: {dst}")
141
  except Exception as e:
142
  if tmp.exists():
143
  tmp.unlink(missing_ok=True)
@@ -155,35 +223,36 @@ def place_uploaded(in_path: str, sub="uploads") -> Path:
155
  target_dir.mkdir(exist_ok=True, parents=True)
156
  out = target_dir / safe_name(Path(in_path).name)
157
  shutil.copy2(in_path, out)
158
- logger.info(f"πŸ“ Uploaded file placed: {out}")
159
  return out
160
 
161
  def tmp_video_path(ext=".mp4") -> Path:
162
- """Generate temporary video path"""
163
  return new_tmp_path(ext)
164
 
165
  def tmp_image_path(ext=".png") -> Path:
166
- """Generate temporary image path"""
167
  return new_tmp_path(ext)
168
 
169
  def run_safely(fn: Callable, *args, **kwargs):
170
  """Execute function with comprehensive error logging"""
171
  try:
172
  return fn(*args, **kwargs)
173
- except Exception as e:
174
  logger.error("PROCESSING FAILED\n%s", "".join(traceback.format_exc()))
175
  logger.error("CWD=%s | DATA_ROOT=%s | TMP_ROOT=%s | %s",
176
- os.getcwd(), DATA_ROOT, TMP_ROOT, _disk_stats(APP_ROOT))
177
- logger.error("Env: OMP_NUM_THREADS=%s | CUDA=%s | torch=%s | cu=%s",
178
- os.environ.get("OMP_NUM_THREADS"),
179
- os.environ.get("CUDA_VISIBLE_DEVICES", "default"),
180
- torch.__version__,
181
- torch.version.cuda)
 
 
 
182
  raise
183
 
184
- # ===============================================================================
185
  # SYSTEM UTILITIES
186
- # ===============================================================================
187
 
188
  def get_device():
189
  """Get optimal device for processing"""
@@ -198,7 +267,7 @@ def clear_gpu_memory():
198
  gc.collect()
199
  logger.info("🧹 GPU memory cleared")
200
  except Exception as e:
201
- logger.warning(f"⚠️ GPU cleanup warning: {e}")
202
 
203
  def safe_file_operation(operation: Callable, *args, max_retries: int = 3, **kwargs):
204
  """Safely execute file operations with retries"""
@@ -210,119 +279,87 @@ def safe_file_operation(operation: Callable, *args, max_retries: int = 3, **kwar
210
  last_error = e
211
  if attempt < max_retries - 1:
212
  time.sleep(0.1 * (attempt + 1))
213
- logger.warning(f"File operation retry {attempt + 1}: {e}")
214
  else:
215
- logger.error(f"File operation failed after {max_retries} attempts: {e}")
216
  raise last_error
217
 
218
- # ===============================================================================
219
  # BACKGROUND GENERATORS
220
- # ===============================================================================
221
 
222
  def generate_ai_background(prompt: str, width: int, height: int) -> Image.Image:
223
- """Generate AI background using prompt-based styling"""
224
  try:
225
- logger.info(f"Generating AI background: '{prompt}' ({width}x{height})")
226
-
227
- # Create base image
228
  img = np.zeros((height, width, 3), dtype=np.uint8)
229
-
230
- # Analyze prompt for style/color cues
231
  prompt_lower = prompt.lower()
232
-
233
- if any(word in prompt_lower for word in ['city', 'urban', 'futuristic', 'cyberpunk']):
234
- # Dark cityscape style
235
  for i in range(height):
236
- ratio = i / height
237
- r = int(20 + 80 * ratio)
238
- g = int(30 + 100 * ratio)
239
- b = int(60 + 120 * ratio)
240
  img[i, :] = [r, g, b]
241
-
242
- elif any(word in prompt_lower for word in ['beach', 'tropical', 'ocean', 'sea']):
243
- # Beach/ocean style
244
  for i in range(height):
245
- ratio = i / height
246
- r = int(135 + 120 * ratio)
247
- g = int(206 + 49 * ratio)
248
- b = int(235 + 20 * ratio)
249
  img[i, :] = [r, g, b]
250
-
251
- elif any(word in prompt_lower for word in ['forest', 'jungle', 'nature', 'green']):
252
- # Forest style
253
  for i in range(height):
254
- ratio = i / height
255
- r = int(34 + 105 * ratio)
256
- g = int(139 + 30 * ratio)
257
- b = int(34 - 15 * ratio)
258
  img[i, :] = [max(0, r), max(0, g), max(0, b)]
259
-
260
- elif any(word in prompt_lower for word in ['space', 'galaxy', 'stars', 'cosmic']):
261
- # Space style
262
  for i in range(height):
263
- ratio = i / height
264
- r = int(10 + 50 * ratio)
265
- g = int(0 + 30 * ratio)
266
- b = int(30 + 100 * ratio)
267
  img[i, :] = [r, g, b]
268
-
269
- elif any(word in prompt_lower for word in ['desert', 'sand', 'canyon']):
270
- # Desert style
271
  for i in range(height):
272
- ratio = i / height
273
- r = int(238 + 17 * ratio)
274
- g = int(203 + 52 * ratio)
275
- b = int(173 + 82 * ratio)
276
  img[i, :] = [min(255, r), min(255, g), min(255, b)]
277
-
278
  else:
279
- # Generic colorful gradient based on prompt mood
280
- colors = [(255, 182, 193), (255, 218, 185), (176, 224, 230)] # Soft colors
281
  color = colors[len(prompt) % len(colors)]
282
-
283
  for i in range(height):
284
- ratio = i / height
285
- r = int(color[0] * (1 - ratio * 0.3))
286
- g = int(color[1] * (1 - ratio * 0.3))
287
- b = int(color[2] * (1 - ratio * 0.3))
288
- img[i, :] = [r, g, b]
289
-
290
- # Add some texture/noise for more interesting look
291
  noise = np.random.randint(-15, 15, (height, width, 3))
292
  img = np.clip(img.astype(np.int16) + noise, 0, 255).astype(np.uint8)
293
-
294
  return Image.fromarray(img)
295
-
296
  except Exception as e:
297
- logger.warning(f"AI background generation failed: {e}, using fallback")
298
  return create_gradient_background("sunset", width, height)
299
 
300
  def create_gradient_background(gradient_type: str, width: int, height: int) -> Image.Image:
301
- """Create gradient background"""
302
  img = np.zeros((height, width, 3), dtype=np.uint8)
303
-
304
  gradients = {
305
  "sunset": [(255, 165, 0), (128, 64, 128)],
306
- "ocean": [(0, 100, 255), (30, 144, 255)],
307
  "forest": [(34, 139, 34), (139, 69, 19)],
308
- "sky": [(135, 206, 235), (206, 235, 255)]
309
  }
310
-
311
  if gradient_type in gradients:
312
- start_color, end_color = gradients[gradient_type]
313
  for i in range(height):
314
- ratio = i / height
315
- r = int(start_color[0] * (1 - ratio) + end_color[0] * ratio)
316
- g = int(start_color[1] * (1 - ratio) + end_color[1] * ratio)
317
- b = int(start_color[2] * (1 - ratio) + end_color[2] * ratio)
318
  img[i, :] = [r, g, b]
319
  else:
320
  img.fill(128)
321
-
322
  return Image.fromarray(img)
323
 
324
  def create_solid_background(color: str, width: int, height: int) -> Image.Image:
325
- """Create solid color background"""
326
  color_map = {
327
  "white": (255, 255, 255), "black": (0, 0, 0), "red": (255, 0, 0),
328
  "green": (0, 255, 0), "blue": (0, 0, 255), "yellow": (255, 255, 0),
@@ -333,77 +370,62 @@ def create_solid_background(color: str, width: int, height: int) -> Image.Image:
333
  return Image.new("RGB", (width, height), rgb)
334
 
335
  def download_unsplash_image(query: str, width: int, height: int) -> Image.Image:
336
- """Download image from Unsplash"""
337
  try:
338
  url = f"https://source.unsplash.com/{width}x{height}/?{query}"
339
- response = requests.get(url, timeout=10)
340
- response.raise_for_status()
341
-
342
- image = Image.open(io.BytesIO(response.content))
343
- if image.size != (width, height):
344
- image = image.resize((width, height), Image.Resampling.LANCZOS)
345
-
346
- return image.convert("RGB")
347
  except Exception as e:
348
- logger.warning(f"Failed to download Unsplash image: {e}")
349
  return create_solid_background("gray", width, height)
350
 
351
- # ===============================================================================
352
- # VIDEO PROCESSING UTILITIES
353
- # ===============================================================================
354
 
355
  def get_video_info(video_path: str) -> Dict[str, Any]:
356
- """Get video information using OpenCV"""
357
  try:
358
  cap = cv2.VideoCapture(video_path)
359
  if not cap.isOpened():
360
  raise ValueError("Cannot open video file")
361
-
362
- fps = cap.get(cv2.CAP_PROP_FPS)
363
- frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
364
- width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
365
- height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
366
- duration = frame_count / fps if fps > 0 else 0
367
-
368
  cap.release()
369
-
370
- return {
371
- "fps": fps, "frame_count": frame_count, "width": width,
372
- "height": height, "duration": duration
373
- }
374
  except Exception as e:
375
- logger.error(f"Failed to get video info: {e}")
376
  return {"fps": 30.0, "frame_count": 0, "width": 1920, "height": 1080, "duration": 0}
377
 
378
  def extract_frame(video_path: str, frame_number: int) -> Optional[np.ndarray]:
379
- """Extract specific frame from video"""
380
  try:
381
  cap = cv2.VideoCapture(video_path)
382
  cap.set(cv2.CAP_PROP_POS_FRAMES, frame_number)
383
  ret, frame = cap.read()
384
  cap.release()
385
-
386
  if ret:
387
  return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
388
  return None
389
  except Exception as e:
390
- logger.error(f"Failed to extract frame: {e}")
391
  return None
392
 
393
  def ffmpeg_safe_call(inp: Path, out: Path, extra=()):
394
- """Safe FFmpeg call with proper path quoting"""
395
- cmd = ["ffmpeg", "-y", "-hide_banner", "-loglevel", "error",
396
- "-i", str(inp), *extra, str(out)]
397
- logger.info("FFMPEG: %s", " ".join(cmd))
398
  subprocess.run(cmd, check=True, timeout=300)
399
 
400
- # ===============================================================================
401
  # PROGRESS TRACKING
402
- # ===============================================================================
403
 
404
  class ProgressTracker:
405
  """Thread-safe progress tracking for video processing"""
406
-
407
  def __init__(self):
408
  self.current_step = ""
409
  self.progress = 0.0
@@ -411,101 +433,92 @@ def __init__(self):
411
  self.processed_frames = 0
412
  self.start_time = time.time()
413
  self.lock = threading.Lock()
414
-
415
  def update(self, step: str, progress: float = None):
416
- """Update progress safely"""
417
  with self.lock:
418
  self.current_step = step
419
  if progress is not None:
420
  self.progress = max(0.0, min(1.0, progress))
421
-
422
  def update_frames(self, processed: int, total: int = None):
423
- """Update frame processing progress"""
424
  with self.lock:
425
  self.processed_frames = processed
426
  if total is not None:
427
  self.total_frames = total
428
  if self.total_frames > 0:
429
  self.progress = self.processed_frames / self.total_frames
430
-
431
  def get_status(self) -> Dict[str, Any]:
432
- """Get current status safely"""
433
  with self.lock:
434
  elapsed = time.time() - self.start_time
435
  eta = 0
436
  if self.progress > 0.01:
437
  eta = elapsed * (1.0 - self.progress) / self.progress
438
-
439
  return {
440
  "step": self.current_step, "progress": self.progress,
441
  "processed_frames": self.processed_frames, "total_frames": self.total_frames,
442
  "elapsed": elapsed, "eta": eta
443
  }
444
 
445
- # Global progress tracker
446
  progress_tracker = ProgressTracker()
447
 
448
- # ===============================================================================
449
- # SAFE FILE OPERATIONS (ENHANCED)
450
- # ===============================================================================
451
 
452
  def create_job_directory() -> Path:
453
- """Create unique job directory for processing"""
454
  job_id = str(uuid.uuid4())[:8]
455
  job_dir = JOB_ROOT / f"job_{job_id}_{int(time.time())}"
456
  job_dir.mkdir(parents=True, exist_ok=True)
457
- logger.info(f"πŸ“ Created job directory: {job_dir}")
458
  return job_dir
459
 
460
  def atomic_file_write(filepath: Path, content: bytes):
461
- """Write file atomically to prevent corruption"""
462
  temp_path = filepath.with_suffix(f"{filepath.suffix}.tmp")
463
  try:
464
  with open(temp_path, 'wb') as f:
465
  f.write(content)
466
  temp_path.rename(filepath)
467
- logger.debug(f"βœ… Atomic write completed: {filepath}")
468
  except Exception as e:
469
  if temp_path.exists():
470
  temp_path.unlink(missing_ok=True)
471
  raise e
472
 
473
  def safe_download(url: str, filepath: Path, max_size: int = 500 * 1024 * 1024):
474
- """Safely download file with size checks"""
475
  temp_path = filepath.with_suffix(f"{filepath.suffix}.download")
476
-
477
  try:
478
- response = requests.get(url, stream=True, timeout=30)
479
- response.raise_for_status()
480
-
481
- content_length = response.headers.get('content-length')
482
- if content_length and int(content_length) > max_size:
483
- raise ValueError(f"File too large: {content_length} bytes")
484
-
485
  downloaded = 0
486
  with open(temp_path, 'wb') as f:
487
- for chunk in response.iter_content(chunk_size=8192):
488
  if chunk:
489
  downloaded += len(chunk)
490
  if downloaded > max_size:
491
  raise ValueError(f"Download exceeded size limit: {downloaded} bytes")
492
  f.write(chunk)
493
-
494
  if not temp_path.exists() or temp_path.stat().st_size == 0:
495
  raise ValueError("Download resulted in empty file")
496
-
497
  temp_path.rename(filepath)
498
- logger.info(f"βœ… Download completed: {filepath} ({downloaded} bytes)")
499
-
500
  except Exception as e:
501
  if temp_path.exists():
502
  temp_path.unlink(missing_ok=True)
503
- logger.error(f"❌ Download failed: {e}")
504
  raise
505
 
506
- # ===============================================================================
507
  # ENHANCED PIPELINE INTEGRATION
508
- # ===============================================================================
509
 
510
  def process_video_pipeline(
511
  video_path: str,
@@ -516,100 +529,75 @@ def process_video_pipeline(
516
  progress_callback: Optional[Callable] = None
517
  ) -> str:
518
  """Process video using the two-stage pipeline with enhanced safety and monitoring"""
519
-
520
- # Wrap entire function with ChatGPT's safe execution wrapper
521
  def _inner_process():
522
  logger.info("=" * 60)
523
- logger.info("=== ENHANCED TWO-STAGE PIPELINE (WITH CHATGPT SAFETY) ===")
524
  logger.info("=" * 60)
525
-
526
- # Pre-flight checks with enhanced validation
527
- logger.info(f"DEBUG: Video path: {video_path}")
528
- logger.info(f"DEBUG: Video exists: {Path(video_path).exists()}")
529
- logger.info(f"DEBUG: Video file size: {Path(video_path).stat().st_size if Path(video_path).exists() else 'N/A'} bytes")
530
- logger.info(f"DEBUG: Job directory: {job_dir}")
531
- logger.info(f"DEBUG: Job directory writable: {os.access(job_dir, os.W_OK)}")
532
- logger.info(f"DEBUG: Background image size: {background_image.size if background_image else 'None'}")
533
- logger.info(f"DEBUG: Background type: {background_type}")
534
- logger.info(f"DEBUG: Disk space: {_disk_stats(APP_ROOT)}")
535
-
536
- # Safely handle uploaded video
537
  if not Path(video_path).exists():
538
  raise FileNotFoundError(f"Video file not found: {video_path}")
539
-
540
- # Create safe video path within our controlled environment
541
  safe_video_path = place_uploaded(video_path, "videos")
542
- logger.info(f"DEBUG: Safe video path: {safe_video_path}")
543
-
544
- # Import with error context
545
- logger.info("DEBUG: Attempting to import two-stage pipeline...")
546
  try:
547
  from two_stage_pipeline import process_two_stage as pipeline_process
548
- logger.info("βœ“ Two-stage pipeline imported successfully")
549
  except ImportError as e:
550
- logger.error(f"Failed to import two-stage pipeline: {e}")
551
  raise
552
-
553
- progress_tracker.update("Initializing enhanced two-stage pipeline...")
554
-
555
- # Enhanced progress callback with stage monitoring and memory tracking
556
  current_stage = {"stage": "init", "start_time": time.time()}
557
-
558
  def safe_progress_callback(step: str, progress: float = None):
559
  try:
560
- current_time = time.time()
561
- elapsed = current_time - current_stage["start_time"]
562
-
563
- # Stage detection with timing
564
- if "Stage 1" in step:
565
- if current_stage["stage"] != "stage1":
566
- current_stage["stage"] = "stage1"
567
- current_stage["start_time"] = current_time
568
- logger.info("πŸ”„ STAGE TRANSITION: Entering Stage 1 (SAM2)")
569
- logger.info(f"Memory before Stage 1: {_disk_stats(APP_ROOT)}")
570
-
571
- elif "Stage 2" in step:
572
- if current_stage["stage"] != "stage2":
573
- stage1_duration = current_time - current_stage["start_time"]
574
- current_stage["stage"] = "stage2"
575
- current_stage["start_time"] = current_time
576
- logger.info("πŸ”„ STAGE TRANSITION: Entering Stage 2 (Composition)")
577
- logger.info(f"Stage 1 completed in {stage1_duration:.1f}s")
578
- logger.info(f"Memory after Stage 1: {_disk_stats(APP_ROOT)}")
579
-
580
- elif "Done" in step:
581
- if current_stage["stage"] != "complete":
582
- stage2_duration = current_time - current_stage["start_time"]
583
- current_stage["stage"] = "complete"
584
- logger.info("πŸ”„ STAGE TRANSITION: Pipeline Complete")
585
- logger.info(f"Stage 2 completed in {stage2_duration:.1f}s")
586
- logger.info(f"Final memory: {_disk_stats(APP_ROOT)}")
587
-
588
- logger.info(f"PROGRESS [{current_stage['stage'].upper()}] ({elapsed:.1f}s): {step} ({progress})")
589
  progress_tracker.update(step, progress)
590
-
591
  if progress_callback:
592
- if progress is not None:
593
- progress_callback(f"Progress: {progress:.1%} - {step}")
594
- else:
595
- progress_callback(step)
596
-
597
- # Memory warning if Stage 1 takes too long
598
  if current_stage["stage"] == "stage1" and elapsed > 15:
599
- logger.warning(f"⚠️ Stage 1 running for {elapsed:.1f}s - monitoring for memory issues")
600
-
601
  except Exception as e:
602
- logger.error(f"Progress callback error: {e}")
603
-
604
- # Validation with enhanced error context
605
  if background_image is None:
606
  raise ValueError("Background image is required")
607
-
608
- logger.info("DEBUG: Pre-pipeline validation complete")
609
- logger.info(f"DEBUG: Job dir contents before: {list(job_dir.iterdir()) if job_dir.exists() else 'does not exist'}")
610
-
611
- # Call two-stage pipeline with safe paths
612
- logger.info("DEBUG: Calling two-stage pipeline with enhanced monitoring...")
613
  result_path = pipeline_process(
614
  video_path=str(safe_video_path),
615
  background_image=background_image,
@@ -617,50 +605,43 @@ def safe_progress_callback(step: str, progress: float = None):
617
  progress=safe_progress_callback,
618
  use_matany=True
619
  )
620
-
621
- logger.info(f"DEBUG: Pipeline returned: {result_path}")
622
- logger.info(f"DEBUG: Result path type: {type(result_path)}")
623
-
624
- # Post-processing validation with enhanced checks
625
  if result_path:
626
  result_file = Path(result_path)
627
- logger.info(f"DEBUG: Result file exists: {result_file.exists()}")
628
  if result_file.exists():
629
- file_size = result_file.stat().st_size
630
- logger.info(f"DEBUG: Result file size: {file_size} bytes")
631
- if file_size == 0:
632
  raise RuntimeError("Pipeline produced empty output file")
633
- logger.info(f"DEBUG: Job dir contents after: {list(job_dir.iterdir())}")
634
-
635
- # Verify the output is a valid video
636
- try:
637
- cap = cv2.VideoCapture(str(result_file))
638
- if cap.isOpened():
639
- frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
640
- logger.info(f"DEBUG: Output video frame count: {frame_count}")
641
- cap.release()
642
- else:
643
- logger.warning("⚠️ Output file may not be a valid video")
644
- except Exception as e:
645
- logger.warning(f"⚠️ Could not verify output video: {e}")
646
-
647
  if not result_path or not Path(result_path).exists():
648
- raise RuntimeError("Two-stage pipeline processing failed - no output produced")
649
-
650
  logger.info("=" * 60)
651
- logger.info(f"βœ… ENHANCED TWO-STAGE PIPELINE COMPLETED: {result_path}")
652
  logger.info("=" * 60)
653
  return result_path
654
-
655
- # Execute with ChatGPT's comprehensive error wrapper
656
  try:
657
  return run_safely(_inner_process)
658
  except Exception as e:
659
- # Enhanced error cleanup
660
- logger.error("🧹 Performing error cleanup...")
661
  clear_gpu_memory()
662
-
663
- # Additional error context
664
- logger.error(f"Job directory state: {list(job_dir.iterdir()) if job_dir.exists() else 'does not exist'}")
665
-
666
- raise
 
2
  """
3
  BackgroundFX Pro β€” Core Functionality
4
  All processing logic, utilities, background generators, and handlers
5
+ Enhanced with file safety, robust logging, and runtime diagnostics.
6
  """
7
 
8
  import os
 
28
  import numpy as np
29
  from PIL import Image, ImageDraw, ImageFont
30
  import cv2
 
 
31
 
32
+ # ==============================================================================
33
+ # PATHS & ENV
34
+ # ==============================================================================
35
+
36
+ # Repo root (…/app)
37
+ APP_ROOT = Path(__file__).resolve().parent.parent
38
  DATA_ROOT = APP_ROOT / "data"
39
+ TMP_ROOT = APP_ROOT / "tmp"
40
+ JOB_ROOT = TMP_ROOT / "backgroundfx_jobs"
41
+
42
+ for p in (DATA_ROOT, TMP_ROOT, JOB_ROOT, APP_ROOT / ".hf", APP_ROOT / ".torch"):
43
+ p.mkdir(parents=True, exist_ok=True)
44
+
45
+ # Cache dirs (stable on Spaces)
46
+ os.environ.setdefault("HF_HOME", str(APP_ROOT / ".hf"))
47
+ os.environ.setdefault("TORCH_HOME", str(APP_ROOT / ".torch"))
48
+
49
+ # Quiet BLAS/OpenMP spam (in case ui.py wasn’t first)
50
+ os.environ.setdefault("OMP_NUM_THREADS", "4")
51
+ os.environ.setdefault("OPENBLAS_NUM_THREADS", "1")
52
+ os.environ.setdefault("MKL_NUM_THREADS", "1")
53
+ os.environ.setdefault("NUMEXPR_NUM_THREADS", "1")
54
+ os.environ.setdefault("PYTHONFAULTHANDLER", "1")
55
+
56
+ # ==============================================================================
57
+ # LOGGING + DIAGNOSTICS (console + file + heartbeat)
58
+ # ==============================================================================
59
+
60
+ # Line-buffer logs so Space UI shows them promptly
61
+ try:
62
+ sys.stdout.reconfigure(line_buffering=True)
63
+ sys.stderr.reconfigure(line_buffering=True)
64
+ except Exception:
65
+ pass
66
+
67
+ LOG_FILE = DATA_ROOT / "run.log"
68
+ logging.basicConfig(
69
+ level=logging.INFO,
70
+ format="%(asctime)s | %(levelname)s | %(message)s",
71
+ handlers=[logging.StreamHandler(sys.stdout),
72
+ logging.FileHandler(LOG_FILE, encoding="utf-8")],
73
+ force=True,
74
+ )
75
+ logger = logging.getLogger("bgfx")
76
+
77
+ # Faulthandler (native crashes -> stacks)
78
+ try:
79
+ import faulthandler, signal # type: ignore
80
+ faulthandler.enable(all_threads=True)
81
+ if hasattr(signal, "SIGUSR1"):
82
+ faulthandler.register(signal.SIGUSR1, file=sys.stderr, all_threads=True)
83
+ except Exception as e:
84
+ logger.warning("faulthandler setup skipped: %s", e)
85
 
86
  def _disk_stats(p: Path) -> str:
 
87
  try:
88
  total, used, free = shutil.disk_usage(str(p))
89
+ mb = lambda x: x // (1024 * 1024)
90
+ return f"disk(total={mb(total)}MB, used={mb(used)}MB, free={mb(free)}MB)"
91
  except Exception:
92
  return "disk(n/a)"
93
 
94
+ def _cgroup_limit_bytes():
95
+ for fp in ("/sys/fs/cgroup/memory.max", "/sys/fs/cgroup/memory/memory.limit_in_bytes"):
96
+ try:
97
+ s = Path(fp).read_text().strip()
98
+ if s and s != "max":
99
+ return int(s)
100
+ except Exception:
101
+ pass
102
+
103
+ def _rss_bytes():
104
+ try:
105
+ for line in Path("/proc/self/status").read_text().splitlines():
106
+ if line.startswith("VmRSS:"):
107
+ return int(line.split()[1]) * 1024
108
+ except Exception:
109
+ return None
110
+
111
+ def _heartbeat():
112
+ lim = _cgroup_limit_bytes()
113
+ while True:
114
+ rss = _rss_bytes()
115
+ logger.info(
116
+ "HEARTBEAT | rss=%s MB | limit=%s MB | %s",
117
+ f"{rss//2**20}" if rss else "n/a",
118
+ f"{lim//2**20}" if lim else "n/a",
119
+ _disk_stats(APP_ROOT),
120
+ )
121
+ time.sleep(2)
122
+
123
+ # Start heartbeat as a daemon thread (only once)
124
+ try:
125
+ threading.Thread(target=_heartbeat, name="heartbeat", daemon=True).start()
126
+ except Exception as e:
127
+ logger.warning("heartbeat skipped: %s", e)
128
+
129
+ import atexit
130
+ @atexit.register
131
+ def _on_exit():
132
+ logger.info("PROCESS EXITING (atexit) β€” if you don't see this, it was a hard kill (OOM/SIGKILL)")
133
+
134
+ # ==============================================================================
135
+ # STARTUP VALIDATION
136
+ # ==============================================================================
137
+
138
  def startup_probe():
139
  """Comprehensive startup probe - validates system readiness"""
140
  try:
141
  logger.info("πŸš€ BACKGROUNDFX PRO STARTUP PROBE")
142
+ logger.info("πŸ“ Working directory: %s", os.getcwd())
143
+ logger.info("🐍 Python executable: %s", sys.executable)
144
+
145
+ # Write probe (fail fast if not writable)
146
  probe_file = TMP_ROOT / "startup_probe.txt"
147
  probe_file.write_text("startup_test_ok", encoding="utf-8")
148
  assert probe_file.read_text(encoding="utf-8") == "startup_test_ok"
149
+ logger.info("βœ… WRITE PROBE OK: %s | %s", probe_file, _disk_stats(APP_ROOT))
 
150
  probe_file.unlink(missing_ok=True)
151
+
152
  # GPU/Torch status
153
  try:
154
+ logger.info("πŸ”§ Torch=%s | cu=%s | cuda_available=%s",
155
+ torch.__version__, getattr(torch.version, "cuda", None), torch.cuda.is_available())
 
 
156
  if torch.cuda.is_available():
157
  gpu_count = torch.cuda.device_count()
158
+ name = torch.cuda.get_device_name(0) if gpu_count else "Unknown"
159
+ vram_gb = torch.cuda.get_device_properties(0).total_memory / (1024**3) if gpu_count else 0
160
+ logger.info("πŸ”₯ GPU Available: %s (%d device(s)) β€” VRAM %.1f GB", name, gpu_count, vram_gb)
 
161
  else:
162
+ logger.warning("⚠️ No GPU available β€” using CPU")
163
  except Exception as e:
164
+ logger.warning("⚠️ Torch check failed: %s", e)
165
+
166
  # Directory verification
167
+ for d in ("checkpoints", "models", "utils"):
168
+ dp = APP_ROOT / d
169
+ logger.info("βœ… Directory %s: %s", d, "OK" if dp.exists() else "MISSING")
170
+
171
+ # Job dir isolation test
172
+ test_job = JOB_ROOT / "startup_test_job"
173
+ test_job.mkdir(parents=True, exist_ok=True)
174
+ tfile = test_job / "test.tmp"
175
+ tfile.write_text("job_isolation_test")
176
+ assert tfile.read_text() == "job_isolation_test"
177
+ logger.info("βœ… Job isolation directory ready: %s", JOB_ROOT)
178
+ shutil.rmtree(test_job, ignore_errors=True)
179
+
180
+ # Env summary
181
+ logger.info("🌍 Env: OMP_NUM_THREADS=%s | HF_HOME=%s | TORCH_HOME=%s",
182
+ os.environ.get("OMP_NUM_THREADS", "unset"),
183
+ os.environ.get("HF_HOME", "default"),
184
+ os.environ.get("TORCH_HOME", "default"))
185
+
186
+ logger.info("🎯 Startup probe completed β€” system ready!")
187
+
 
 
 
 
 
188
  except Exception as e:
189
+ logger.error("❌ STARTUP PROBE FAILED: %s", e)
190
+ logger.error("πŸ“Š %s", _disk_stats(APP_ROOT))
191
+ raise RuntimeError(f"Startup probe failed β€” system not ready: {e}") from e
192
 
193
+ # ==============================================================================
194
+ # FILE SAFETY UTILITIES
195
+ # ==============================================================================
196
 
197
  def new_tmp_path(suffix: str) -> Path:
198
  """Generate safe temporary path within TMP_ROOT"""
 
204
  try:
205
  with open(tmp, "wb") as f:
206
  f.write(data)
207
+ tmp.replace(dst) # atomic on same FS
208
+ logger.debug("βœ… Atomic write: %s", dst)
209
  except Exception as e:
210
  if tmp.exists():
211
  tmp.unlink(missing_ok=True)
 
223
  target_dir.mkdir(exist_ok=True, parents=True)
224
  out = target_dir / safe_name(Path(in_path).name)
225
  shutil.copy2(in_path, out)
226
+ logger.info("πŸ“ Uploaded file placed: %s", out)
227
  return out
228
 
229
  def tmp_video_path(ext=".mp4") -> Path:
 
230
  return new_tmp_path(ext)
231
 
232
  def tmp_image_path(ext=".png") -> Path:
 
233
  return new_tmp_path(ext)
234
 
235
  def run_safely(fn: Callable, *args, **kwargs):
236
  """Execute function with comprehensive error logging"""
237
  try:
238
  return fn(*args, **kwargs)
239
+ except Exception:
240
  logger.error("PROCESSING FAILED\n%s", "".join(traceback.format_exc()))
241
  logger.error("CWD=%s | DATA_ROOT=%s | TMP_ROOT=%s | %s",
242
+ os.getcwd(), DATA_ROOT, TMP_ROOT, _disk_stats(APP_ROOT))
243
+ try:
244
+ logger.error("Env: OMP_NUM_THREADS=%s | CUDA=%s | torch=%s | cu=%s",
245
+ os.environ.get("OMP_NUM_THREADS"),
246
+ os.environ.get("CUDA_VISIBLE_DEVICES", "default"),
247
+ torch.__version__,
248
+ getattr(torch.version, "cuda", None))
249
+ except Exception:
250
+ pass
251
  raise
252
 
253
+ # ==============================================================================
254
  # SYSTEM UTILITIES
255
+ # ==============================================================================
256
 
257
  def get_device():
258
  """Get optimal device for processing"""
 
267
  gc.collect()
268
  logger.info("🧹 GPU memory cleared")
269
  except Exception as e:
270
+ logger.warning("GPU cleanup warning: %s", e)
271
 
272
  def safe_file_operation(operation: Callable, *args, max_retries: int = 3, **kwargs):
273
  """Safely execute file operations with retries"""
 
279
  last_error = e
280
  if attempt < max_retries - 1:
281
  time.sleep(0.1 * (attempt + 1))
282
+ logger.warning("File op retry %d: %s", attempt + 1, e)
283
  else:
284
+ logger.error("File op failed after %d attempts: %s", max_retries, e)
285
  raise last_error
286
 
287
+ # ==============================================================================
288
  # BACKGROUND GENERATORS
289
+ # ==============================================================================
290
 
291
  def generate_ai_background(prompt: str, width: int, height: int) -> Image.Image:
292
+ """Generate AI-like background using prompt cues (procedural)"""
293
  try:
294
+ logger.info("Generating AI background: '%s' (%dx%d)", prompt, width, height)
 
 
295
  img = np.zeros((height, width, 3), dtype=np.uint8)
 
 
296
  prompt_lower = prompt.lower()
297
+
298
+ if any(w in prompt_lower for w in ('city', 'urban', 'futuristic', 'cyberpunk')):
 
299
  for i in range(height):
300
+ r = int(20 + 80 * (i / height))
301
+ g = int(30 + 100 * (i / height))
302
+ b = int(60 + 120 * (i / height))
 
303
  img[i, :] = [r, g, b]
304
+ elif any(w in prompt_lower for w in ('beach', 'tropical', 'ocean', 'sea')):
 
 
305
  for i in range(height):
306
+ r = int(135 + 120 * (i / height))
307
+ g = int(206 + 49 * (i / height))
308
+ b = int(235 + 20 * (i / height))
 
309
  img[i, :] = [r, g, b]
310
+ elif any(w in prompt_lower for w in ('forest', 'jungle', 'nature', 'green')):
 
 
311
  for i in range(height):
312
+ r = int(34 + 105 * (i / height))
313
+ g = int(139 + 30 * (i / height))
314
+ b = int(34 - 15 * (i / height))
 
315
  img[i, :] = [max(0, r), max(0, g), max(0, b)]
316
+ elif any(w in prompt_lower for w in ('space', 'galaxy', 'stars', 'cosmic')):
 
 
317
  for i in range(height):
318
+ r = int(10 + 50 * (i / height))
319
+ g = int(0 + 30 * (i / height))
320
+ b = int(30 + 100 * (i / height))
 
321
  img[i, :] = [r, g, b]
322
+ elif any(w in prompt_lower for w in ('desert', 'sand', 'canyon')):
 
 
323
  for i in range(height):
324
+ r = int(238 + 17 * (i / height))
325
+ g = int(203 + 52 * (i / height))
326
+ b = int(173 + 82 * (i / height))
 
327
  img[i, :] = [min(255, r), min(255, g), min(255, b)]
 
328
  else:
329
+ colors = [(255, 182, 193), (255, 218, 185), (176, 224, 230)]
 
330
  color = colors[len(prompt) % len(colors)]
 
331
  for i in range(height):
332
+ t = 1 - (i / height) * 0.3
333
+ img[i, :] = [int(color[0] * t), int(color[1] * t), int(color[2] * t)]
334
+
 
 
 
 
335
  noise = np.random.randint(-15, 15, (height, width, 3))
336
  img = np.clip(img.astype(np.int16) + noise, 0, 255).astype(np.uint8)
 
337
  return Image.fromarray(img)
338
+
339
  except Exception as e:
340
+ logger.warning("AI background generation failed: %s β€” using fallback", e)
341
  return create_gradient_background("sunset", width, height)
342
 
343
  def create_gradient_background(gradient_type: str, width: int, height: int) -> Image.Image:
 
344
  img = np.zeros((height, width, 3), dtype=np.uint8)
 
345
  gradients = {
346
  "sunset": [(255, 165, 0), (128, 64, 128)],
347
+ "ocean": [(0, 100, 255), (30, 144, 255)],
348
  "forest": [(34, 139, 34), (139, 69, 19)],
349
+ "sky": [(135, 206, 235), (206, 235, 255)],
350
  }
 
351
  if gradient_type in gradients:
352
+ start, end = gradients[gradient_type]
353
  for i in range(height):
354
+ r = int(start[0] * (1 - i/height) + end[0] * (i/height))
355
+ g = int(start[1] * (1 - i/height) + end[1] * (i/height))
356
+ b = int(start[2] * (1 - i/height) + end[2] * (i/height))
 
357
  img[i, :] = [r, g, b]
358
  else:
359
  img.fill(128)
 
360
  return Image.fromarray(img)
361
 
362
  def create_solid_background(color: str, width: int, height: int) -> Image.Image:
 
363
  color_map = {
364
  "white": (255, 255, 255), "black": (0, 0, 0), "red": (255, 0, 0),
365
  "green": (0, 255, 0), "blue": (0, 0, 255), "yellow": (255, 255, 0),
 
370
  return Image.new("RGB", (width, height), rgb)
371
 
372
  def download_unsplash_image(query: str, width: int, height: int) -> Image.Image:
 
373
  try:
374
  url = f"https://source.unsplash.com/{width}x{height}/?{query}"
375
+ resp = requests.get(url, timeout=10)
376
+ resp.raise_for_status()
377
+ img = Image.open(io.BytesIO(resp.content))
378
+ if img.size != (width, height):
379
+ img = img.resize((width, height), Image.Resampling.LANCZOS)
380
+ return img.convert("RGB")
 
 
381
  except Exception as e:
382
+ logger.warning("Unsplash download failed: %s", e)
383
  return create_solid_background("gray", width, height)
384
 
385
+ # ==============================================================================
386
+ # VIDEO UTILITIES
387
+ # ==============================================================================
388
 
389
  def get_video_info(video_path: str) -> Dict[str, Any]:
 
390
  try:
391
  cap = cv2.VideoCapture(video_path)
392
  if not cap.isOpened():
393
  raise ValueError("Cannot open video file")
394
+ fps = cap.get(cv2.CAP_PROP_FPS)
395
+ frames= int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
396
+ w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
397
+ h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
 
 
 
398
  cap.release()
399
+ return {"fps": fps, "frame_count": frames, "width": w, "height": h,
400
+ "duration": (frames / fps if fps > 0 else 0)}
 
 
 
401
  except Exception as e:
402
+ logger.error("get_video_info failed: %s", e)
403
  return {"fps": 30.0, "frame_count": 0, "width": 1920, "height": 1080, "duration": 0}
404
 
405
  def extract_frame(video_path: str, frame_number: int) -> Optional[np.ndarray]:
 
406
  try:
407
  cap = cv2.VideoCapture(video_path)
408
  cap.set(cv2.CAP_PROP_POS_FRAMES, frame_number)
409
  ret, frame = cap.read()
410
  cap.release()
 
411
  if ret:
412
  return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
413
  return None
414
  except Exception as e:
415
+ logger.error("extract_frame failed: %s", e)
416
  return None
417
 
418
  def ffmpeg_safe_call(inp: Path, out: Path, extra=()):
419
+ cmd = ["ffmpeg", "-y", "-hide_banner", "-loglevel", "error", "-i", str(inp), *extra, str(out)]
420
+ logger.info("FFMPEG %s", " ".join(cmd))
 
 
421
  subprocess.run(cmd, check=True, timeout=300)
422
 
423
+ # ==============================================================================
424
  # PROGRESS TRACKING
425
+ # ==============================================================================
426
 
427
  class ProgressTracker:
428
  """Thread-safe progress tracking for video processing"""
 
429
  def __init__(self):
430
  self.current_step = ""
431
  self.progress = 0.0
 
433
  self.processed_frames = 0
434
  self.start_time = time.time()
435
  self.lock = threading.Lock()
436
+
437
  def update(self, step: str, progress: float = None):
 
438
  with self.lock:
439
  self.current_step = step
440
  if progress is not None:
441
  self.progress = max(0.0, min(1.0, progress))
442
+
443
  def update_frames(self, processed: int, total: int = None):
 
444
  with self.lock:
445
  self.processed_frames = processed
446
  if total is not None:
447
  self.total_frames = total
448
  if self.total_frames > 0:
449
  self.progress = self.processed_frames / self.total_frames
450
+
451
  def get_status(self) -> Dict[str, Any]:
 
452
  with self.lock:
453
  elapsed = time.time() - self.start_time
454
  eta = 0
455
  if self.progress > 0.01:
456
  eta = elapsed * (1.0 - self.progress) / self.progress
 
457
  return {
458
  "step": self.current_step, "progress": self.progress,
459
  "processed_frames": self.processed_frames, "total_frames": self.total_frames,
460
  "elapsed": elapsed, "eta": eta
461
  }
462
 
463
+ # Global tracker
464
  progress_tracker = ProgressTracker()
465
 
466
+ # ==============================================================================
467
+ # SAFE FILE OPS
468
+ # ==============================================================================
469
 
470
  def create_job_directory() -> Path:
 
471
  job_id = str(uuid.uuid4())[:8]
472
  job_dir = JOB_ROOT / f"job_{job_id}_{int(time.time())}"
473
  job_dir.mkdir(parents=True, exist_ok=True)
474
+ logger.info("πŸ“ Created job directory: %s", job_dir)
475
  return job_dir
476
 
477
  def atomic_file_write(filepath: Path, content: bytes):
 
478
  temp_path = filepath.with_suffix(f"{filepath.suffix}.tmp")
479
  try:
480
  with open(temp_path, 'wb') as f:
481
  f.write(content)
482
  temp_path.rename(filepath)
483
+ logger.debug("βœ… Atomic write: %s", filepath)
484
  except Exception as e:
485
  if temp_path.exists():
486
  temp_path.unlink(missing_ok=True)
487
  raise e
488
 
489
  def safe_download(url: str, filepath: Path, max_size: int = 500 * 1024 * 1024):
 
490
  temp_path = filepath.with_suffix(f"{filepath.suffix}.download")
 
491
  try:
492
+ r = requests.get(url, stream=True, timeout=30)
493
+ r.raise_for_status()
494
+ cl = r.headers.get('content-length')
495
+ if cl and int(cl) > max_size:
496
+ raise ValueError(f"File too large: {cl} bytes")
497
+
 
498
  downloaded = 0
499
  with open(temp_path, 'wb') as f:
500
+ for chunk in r.iter_content(chunk_size=8192):
501
  if chunk:
502
  downloaded += len(chunk)
503
  if downloaded > max_size:
504
  raise ValueError(f"Download exceeded size limit: {downloaded} bytes")
505
  f.write(chunk)
506
+
507
  if not temp_path.exists() or temp_path.stat().st_size == 0:
508
  raise ValueError("Download resulted in empty file")
509
+
510
  temp_path.rename(filepath)
511
+ logger.info("βœ… Downloaded: %s (%d bytes)", filepath, downloaded)
512
+
513
  except Exception as e:
514
  if temp_path.exists():
515
  temp_path.unlink(missing_ok=True)
516
+ logger.error("❌ Download failed: %s", e)
517
  raise
518
 
519
+ # ==============================================================================
520
  # ENHANCED PIPELINE INTEGRATION
521
+ # ==============================================================================
522
 
523
  def process_video_pipeline(
524
  video_path: str,
 
529
  progress_callback: Optional[Callable] = None
530
  ) -> str:
531
  """Process video using the two-stage pipeline with enhanced safety and monitoring"""
532
+
 
533
  def _inner_process():
534
  logger.info("=" * 60)
535
+ logger.info("=== ENHANCED TWO-STAGE PIPELINE (WITH SAFETY) ===")
536
  logger.info("=" * 60)
537
+
538
+ logger.info("DEBUG video_path=%s exists=%s size=%s bytes",
539
+ video_path, Path(video_path).exists(),
540
+ (Path(video_path).stat().st_size if Path(video_path).exists() else "N/A"))
541
+ logger.info("DEBUG job_dir=%s writable=%s", job_dir, os.access(job_dir, os.W_OK))
542
+ logger.info("DEBUG bg_image=%s bg_type=%s | %s",
543
+ (background_image.size if background_image else None),
544
+ background_type, _disk_stats(APP_ROOT))
545
+
 
 
 
546
  if not Path(video_path).exists():
547
  raise FileNotFoundError(f"Video file not found: {video_path}")
548
+
549
+ # Copy into controlled area
550
  safe_video_path = place_uploaded(video_path, "videos")
551
+ logger.info("DEBUG safe_video_path=%s", safe_video_path)
552
+
553
+ logger.info("DEBUG importing two-stage pipeline…")
 
554
  try:
555
  from two_stage_pipeline import process_two_stage as pipeline_process
556
+ logger.info("βœ“ two-stage pipeline import OK")
557
  except ImportError as e:
558
+ logger.error("Import two_stage_pipeline failed: %s", e)
559
  raise
560
+
561
+ progress_tracker.update("Initializing enhanced two-stage pipeline…")
562
+
 
563
  current_stage = {"stage": "init", "start_time": time.time()}
564
+
565
  def safe_progress_callback(step: str, progress: float = None):
566
  try:
567
+ now = time.time()
568
+ elapsed = now - current_stage["start_time"]
569
+
570
+ if "Stage 1" in step and current_stage["stage"] != "stage1":
571
+ current_stage["stage"] = "stage1"
572
+ current_stage["start_time"] = now
573
+ logger.info("πŸ”„ Entering Stage 1 (SAM2) | %s", _disk_stats(APP_ROOT))
574
+ elif "Stage 2" in step and current_stage["stage"] != "stage2":
575
+ d1 = now - current_stage["start_time"]
576
+ current_stage["stage"] = "stage2"
577
+ current_stage["start_time"] = now
578
+ logger.info("πŸ”„ Entering Stage 2 (Composition) β€” Stage 1 time %.1fs | %s", d1, _disk_stats(APP_ROOT))
579
+ elif "Done" in step and current_stage["stage"] != "complete":
580
+ d2 = now - current_stage["start_time"]
581
+ current_stage["stage"] = "complete"
582
+ logger.info("πŸ”„ Pipeline complete β€” Stage 2 time %.1fs | %s", d2, _disk_stats(APP_ROOT))
583
+
584
+ logger.info("PROGRESS [%s] (%.1fs): %s (%s)",
585
+ current_stage['stage'].upper(), elapsed, step, progress)
 
 
 
 
 
 
 
 
 
 
586
  progress_tracker.update(step, progress)
587
+
588
  if progress_callback:
589
+ progress_callback(f"Progress: {progress:.1%} - {step}" if progress is not None else step)
590
+
 
 
 
 
591
  if current_stage["stage"] == "stage1" and elapsed > 15:
592
+ logger.warning("⚠️ Stage 1 running for %.1fs β€” monitoring memory", elapsed)
593
+
594
  except Exception as e:
595
+ logger.error("Progress callback error: %s", e)
596
+
 
597
  if background_image is None:
598
  raise ValueError("Background image is required")
599
+
600
+ logger.info("DEBUG: calling two-stage pipeline…")
 
 
 
 
601
  result_path = pipeline_process(
602
  video_path=str(safe_video_path),
603
  background_image=background_image,
 
605
  progress=safe_progress_callback,
606
  use_matany=True
607
  )
608
+
609
+ logger.info("DEBUG: pipeline returned %s (%s)", result_path, type(result_path))
610
+
 
 
611
  if result_path:
612
  result_file = Path(result_path)
613
+ logger.info("DEBUG: result exists=%s", result_file.exists())
614
  if result_file.exists():
615
+ size = result_file.stat().st_size
616
+ logger.info("DEBUG: result size=%d bytes", size)
617
+ if size == 0:
618
  raise RuntimeError("Pipeline produced empty output file")
619
+
620
+ # Quick validity check
621
+ try:
622
+ cap = cv2.VideoCapture(str(result_file))
623
+ if cap.isOpened():
624
+ frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
625
+ logger.info("DEBUG: output frame_count=%d", frames)
626
+ cap.release()
627
+ else:
628
+ logger.warning("⚠️ Output may not be a valid video (cannot open)")
629
+ except Exception as e:
630
+ logger.warning("⚠️ Could not verify output video: %s", e)
631
+
 
632
  if not result_path or not Path(result_path).exists():
633
+ raise RuntimeError("Two-stage pipeline failed β€” no output produced")
634
+
635
  logger.info("=" * 60)
636
+ logger.info("βœ… ENHANCED TWO-STAGE PIPELINE COMPLETED: %s", result_path)
637
  logger.info("=" * 60)
638
  return result_path
639
+
 
640
  try:
641
  return run_safely(_inner_process)
642
  except Exception as e:
643
+ logger.error("🧹 Error cleanup…")
 
644
  clear_gpu_memory()
645
+ logger.error("Job dir state: %s",
646
+ (list(job_dir.iterdir()) if job_dir.exists() else "does not exist"))
647
+ raise
 
 
ui_core_interface.py CHANGED
@@ -30,16 +30,12 @@ def handle_custom_background_upload(image: Optional[Image.Image]) -> Tuple[Optio
30
  """Handle custom background image upload"""
31
  if image is None:
32
  return None, "No image uploaded"
33
-
34
  try:
35
  if image.mode != "RGB":
36
  image = image.convert("RGB")
37
-
38
  status = f"βœ… Custom background uploaded: {image.size[0]}x{image.size[1]}"
39
  logger.info(status)
40
-
41
  return image, status
42
-
43
  except Exception as e:
44
  error_msg = f"❌ Background upload failed: {str(e)}"
45
  logger.error(error_msg)
@@ -48,7 +44,6 @@ def handle_custom_background_upload(image: Optional[Image.Image]) -> Tuple[Optio
48
  def handle_background_type_change(bg_type: str):
49
  """Handle background type selection - show/hide relevant controls"""
50
  logger.info(f"🎨 Background type changed to: {bg_type}")
51
-
52
  if bg_type == "upload":
53
  return (
54
  gr.update(visible=True, label="Upload Custom Background Image"),
@@ -59,10 +54,9 @@ def handle_background_type_change(bg_type: str):
59
  prompt_placeholder = {
60
  "ai_generate": "Describe the scene: 'futuristic city', 'tropical beach', 'mystical forest'...",
61
  "gradient": "Choose style: 'sunset', 'ocean', 'forest', 'sky'",
62
- "solid": "Choose color: 'red', 'blue', 'green', 'white', 'black'...",
63
  "unsplash": "Search query: 'mountain landscape', 'city skyline', 'nature'..."
64
  }
65
-
66
  return (
67
  gr.update(visible=False),
68
  gr.update(visible=True, placeholder=prompt_placeholder.get(bg_type, "Enter your prompt...")),
@@ -73,24 +67,19 @@ def handle_video_upload(video_file) -> Tuple[Optional[str], str]:
73
  """Handle video file upload"""
74
  if video_file is None:
75
  return None, "No video file provided"
76
-
77
  try:
78
  job_dir = create_job_directory()
79
-
80
- video_path = job_dir / "input_video.mp4"
81
- safe_file_operation(
82
- lambda src, dst: shutil.copy2(src, dst),
83
- video_file, video_path
84
- )
85
-
86
  info = get_video_info(str(video_path))
87
  duration_text = f"{info['duration']:.1f}s"
88
-
89
  status = f"βœ… Video uploaded: {info['width']}x{info['height']}, {info['fps']:.1f}fps, {duration_text}"
90
  logger.info(status)
91
-
92
  return str(video_path), status
93
-
94
  except Exception as e:
95
  error_msg = f"❌ Video upload failed: {str(e)}"
96
  logger.error(error_msg)
@@ -100,53 +89,41 @@ def handle_background_generation(bg_type: str, bg_prompt: str, video_path: str)
100
  """Handle background generation (for non-upload types)"""
101
  if not video_path:
102
  return None, "No video loaded"
103
-
104
  if bg_type == "upload":
105
  return None, "Use the upload field above for custom backgrounds"
106
-
107
  try:
108
  info = get_video_info(video_path)
109
  width, height = info['width'], info['height']
110
-
111
  if bg_type == "ai_generate":
112
  background = generate_ai_background(bg_prompt, width, height)
113
  status = f"βœ… Generated AI background: '{bg_prompt}'"
114
-
115
  elif bg_type == "gradient":
116
  gradients = ["sunset", "ocean", "forest", "sky"]
117
- gradient_type = gradients[0]
118
- if any(g in bg_prompt.lower() for g in gradients):
119
- for g in gradients:
120
- if g in bg_prompt.lower():
121
- gradient_type = g
122
- break
123
-
124
  background = create_gradient_background(gradient_type, width, height)
125
  status = f"βœ… Generated {gradient_type} gradient background"
126
-
127
  elif bg_type == "solid":
128
  colors = ["white", "black", "red", "green", "blue", "yellow", "purple", "orange", "pink", "gray"]
129
- color = "white"
130
- for c in colors:
131
- if c in bg_prompt.lower():
132
- color = c
133
- break
134
-
135
  background = create_solid_background(color, width, height)
136
  status = f"βœ… Generated {color} solid background"
137
-
138
  elif bg_type == "unsplash":
139
- query = bg_prompt if bg_prompt.strip() else "nature"
140
  background = download_unsplash_image(query, width, height)
141
  status = f"βœ… Downloaded background from Unsplash: '{query}'"
142
-
143
  else:
144
  background = create_solid_background("gray", width, height)
145
  status = "βœ… Generated default gray background"
146
-
147
  logger.info(status)
148
  return background, status
149
-
150
  except Exception as e:
151
  error_msg = f"❌ Background generation failed: {str(e)}"
152
  logger.error(error_msg)
@@ -162,17 +139,16 @@ def handle_video_processing(
162
  """Handle complete video processing"""
163
  if not video_path:
164
  return None, "❌ No video provided"
165
-
166
  if not background_image:
167
  return None, "❌ No background provided"
168
-
169
  try:
170
  progress(0, "Starting video processing...")
171
  logger.info("🎬 Starting video processing")
172
-
173
  job_dir = create_job_directory()
174
  progress_tracker.update("Creating job directory...")
175
-
176
  def update_progress(message: str):
177
  try:
178
  status = progress_tracker.get_status()
@@ -181,7 +157,7 @@ def update_progress(message: str):
181
  logger.info(f"Progress: {progress_val:.1%} - {message}")
182
  except Exception as e:
183
  logger.warning(f"Progress update failed: {e}")
184
-
185
  result_path = process_video_pipeline(
186
  video_path=video_path,
187
  background_image=background_image,
@@ -190,38 +166,31 @@ def update_progress(message: str):
190
  job_dir=job_dir,
191
  progress_callback=update_progress
192
  )
193
-
194
  progress(1.0, "Processing complete!")
195
-
196
  clear_gpu_memory()
197
-
198
- status = f"βœ… Video processing completed successfully!"
199
  logger.info(status)
200
-
201
  return result_path, status
202
-
203
  except Exception as e:
204
  error_msg = f"❌ Processing failed: {str(e)}"
205
  logger.error(error_msg)
206
- logger.error(f"Traceback: {traceback.format_exc()}")
207
-
208
  clear_gpu_memory()
209
-
210
  return None, error_msg
211
 
212
  def handle_preview_generation(video_path: str, frame_number: int = 0) -> Tuple[Optional[Image.Image], str]:
213
  """Generate preview frame from video"""
214
  if not video_path:
215
  return None, "No video loaded"
216
-
217
  try:
218
  frame = extract_frame(video_path, frame_number)
219
  if frame is None:
220
  return None, "Failed to extract frame"
221
-
222
  preview_image = Image.fromarray(frame)
223
  return preview_image, f"βœ… Preview generated (frame {frame_number})"
224
-
225
  except Exception as e:
226
  error_msg = f"❌ Preview generation failed: {str(e)}"
227
  logger.error(error_msg)
@@ -233,7 +202,7 @@ def handle_preview_generation(video_path: str, frame_number: int = 0) -> Tuple[O
233
 
234
  def create_interface():
235
  """Create the main Gradio interface"""
236
-
237
  custom_css = """
238
  .container { max-width: 1200px; margin: auto; }
239
  .header { text-align: center; margin-bottom: 30px; }
@@ -241,23 +210,24 @@ def create_interface():
241
  .status { font-family: monospace; font-size: 12px; }
242
  .progress-bar { margin: 10px 0; }
243
  """
244
-
245
  with gr.Blocks(
246
  title="BackgroundFX Pro",
247
  css=custom_css,
248
- theme=gr.themes.Soft()
 
249
  ) as demo:
250
-
251
  gr.HTML("""
252
  <div class="header">
253
  <h1>🎬 BackgroundFX Pro</h1>
254
  <p>Professional AI-powered video background replacement using SAM2 and MatAnyone</p>
255
  </div>
256
  """)
257
-
258
  video_path_state = gr.State(value=None)
259
  background_image_state = gr.State(value=None)
260
-
261
  with gr.Row():
262
  with gr.Column(scale=1):
263
  with gr.Group():
@@ -272,28 +242,35 @@ def create_interface():
272
  interactive=False,
273
  height=300
274
  )
 
 
 
 
 
 
 
275
  video_status = gr.Textbox(
276
  label="Video Status",
277
  interactive=False,
278
  elem_classes=["status"]
279
  )
280
-
281
  with gr.Group():
282
  gr.HTML("<h3>🎨 Background Selection</h3>")
283
-
284
  gr.HTML("""
285
  <div style='background: #f0f8ff; padding: 10px; border-radius: 5px; margin-bottom: 15px;'>
286
  <b>Choose your background method:</b><br>
287
  β€’ <b>Upload:</b> Use your own image<br>
288
- β€’ <b>AI Generate:</b> Create with AI prompt<br>
289
  β€’ <b>Gradient/Solid/Unsplash:</b> Quick generation
290
  </div>
291
  """)
292
-
293
  background_type = gr.Radio(
294
  choices=[
295
  ("πŸ“€ Upload Image", "upload"),
296
- ("πŸ€– AI Generate", "ai_generate"),
297
  ("🌈 Gradient", "gradient"),
298
  ("🎯 Solid Color", "solid"),
299
  ("πŸ“Έ Unsplash Photo", "unsplash")
@@ -301,7 +278,7 @@ def create_interface():
301
  label="Background Type",
302
  value="upload"
303
  )
304
-
305
  custom_bg_upload = gr.Image(
306
  label="Upload Custom Background",
307
  type="pil",
@@ -309,62 +286,62 @@ def create_interface():
309
  height=250,
310
  visible=True
311
  )
312
-
313
  background_prompt = gr.Textbox(
314
  label="Background Prompt",
315
- placeholder="AI: 'futuristic city', 'tropical beach' | Gradient: 'sunset', 'ocean' | Solid: 'red', 'blue' | Unsplash: 'mountain landscape'",
 
316
  value="futuristic city skyline at sunset",
317
  visible=False
318
  )
319
-
320
  generate_bg_btn = gr.Button(
321
  "Generate Background",
322
  variant="secondary",
323
- visible=False
324
  )
325
-
326
  background_preview = gr.Image(
327
  label="Background Preview",
328
  interactive=False,
329
  height=300
330
  )
331
-
332
  background_status = gr.Textbox(
333
  label="Background Status",
334
  interactive=False,
335
  elem_classes=["status"]
336
  )
337
-
338
  with gr.Column(scale=1):
339
  with gr.Group():
340
  gr.HTML("<h3>⚑ Processing</h3>")
341
-
342
  process_btn = gr.Button(
343
  "πŸš€ Process Video",
344
  variant="primary",
345
- size="lg"
346
  )
347
-
348
  processing_status = gr.Textbox(
349
  label="Processing Status",
350
  interactive=False,
351
  elem_classes=["status"]
352
  )
353
-
354
  with gr.Group():
355
  gr.HTML("<h3>πŸ“½οΈ Results</h3>")
356
-
357
  result_video = gr.Video(
358
  label="Processed Video",
359
  height=400
360
  )
361
-
362
- download_btn = gr.Button(
 
363
  "πŸ“₯ Download Result",
364
- variant="secondary",
365
  visible=False
366
  )
367
-
368
  with gr.Accordion("πŸ”§ System Information", open=False):
369
  system_info = gr.HTML(f"""
370
  <div class="system-info">
@@ -375,14 +352,14 @@ def create_interface():
375
  <p><strong>App Root:</strong> {APP_ROOT}</p>
376
  </div>
377
  """)
378
-
379
  # Event Handlers
380
  background_type.change(
381
  fn=handle_background_type_change,
382
  inputs=[background_type],
383
  outputs=[custom_bg_upload, background_prompt, generate_bg_btn]
384
  )
385
-
386
  custom_bg_upload.change(
387
  fn=handle_custom_background_upload,
388
  inputs=[custom_bg_upload],
@@ -392,7 +369,7 @@ def create_interface():
392
  inputs=[background_image_state],
393
  outputs=[background_preview]
394
  )
395
-
396
  video_upload.change(
397
  fn=handle_video_upload,
398
  inputs=[video_upload],
@@ -400,9 +377,9 @@ def create_interface():
400
  ).then(
401
  fn=handle_preview_generation,
402
  inputs=[video_path_state],
403
- outputs=[video_preview, gr.Textbox(visible=False)]
404
  )
405
-
406
  generate_bg_btn.click(
407
  fn=handle_background_generation,
408
  inputs=[background_type, background_prompt, video_path_state],
@@ -412,20 +389,21 @@ def create_interface():
412
  inputs=[background_image_state],
413
  outputs=[background_preview]
414
  )
415
-
416
  process_btn.click(
417
  fn=handle_video_processing,
418
  inputs=[
419
  video_path_state,
420
- background_image_state,
421
  background_type,
422
  background_prompt
423
  ],
424
  outputs=[result_video, processing_status]
425
  ).then(
426
- fn=lambda video: gr.update(visible=video is not None),
 
427
  inputs=[result_video],
428
  outputs=[download_btn]
429
  )
430
-
431
- return demo
 
30
  """Handle custom background image upload"""
31
  if image is None:
32
  return None, "No image uploaded"
 
33
  try:
34
  if image.mode != "RGB":
35
  image = image.convert("RGB")
 
36
  status = f"βœ… Custom background uploaded: {image.size[0]}x{image.size[1]}"
37
  logger.info(status)
 
38
  return image, status
 
39
  except Exception as e:
40
  error_msg = f"❌ Background upload failed: {str(e)}"
41
  logger.error(error_msg)
 
44
  def handle_background_type_change(bg_type: str):
45
  """Handle background type selection - show/hide relevant controls"""
46
  logger.info(f"🎨 Background type changed to: {bg_type}")
 
47
  if bg_type == "upload":
48
  return (
49
  gr.update(visible=True, label="Upload Custom Background Image"),
 
54
  prompt_placeholder = {
55
  "ai_generate": "Describe the scene: 'futuristic city', 'tropical beach', 'mystical forest'...",
56
  "gradient": "Choose style: 'sunset', 'ocean', 'forest', 'sky'",
57
+ "solid": "Choose color: 'red', 'blue', 'green', 'white', 'black'...",
58
  "unsplash": "Search query: 'mountain landscape', 'city skyline', 'nature'..."
59
  }
 
60
  return (
61
  gr.update(visible=False),
62
  gr.update(visible=True, placeholder=prompt_placeholder.get(bg_type, "Enter your prompt...")),
 
67
  """Handle video file upload"""
68
  if video_file is None:
69
  return None, "No video file provided"
 
70
  try:
71
  job_dir = create_job_directory()
72
+ # Preserve original extension if possible
73
+ src_path = Path(video_file)
74
+ ext = src_path.suffix if src_path.suffix else ".mp4"
75
+ video_path = job_dir / f"input_video{ext}"
76
+ safe_file_operation(lambda src, dst: shutil.copy2(src, dst), str(src_path), str(video_path))
77
+
 
78
  info = get_video_info(str(video_path))
79
  duration_text = f"{info['duration']:.1f}s"
 
80
  status = f"βœ… Video uploaded: {info['width']}x{info['height']}, {info['fps']:.1f}fps, {duration_text}"
81
  logger.info(status)
 
82
  return str(video_path), status
 
83
  except Exception as e:
84
  error_msg = f"❌ Video upload failed: {str(e)}"
85
  logger.error(error_msg)
 
89
  """Handle background generation (for non-upload types)"""
90
  if not video_path:
91
  return None, "No video loaded"
 
92
  if bg_type == "upload":
93
  return None, "Use the upload field above for custom backgrounds"
94
+
95
  try:
96
  info = get_video_info(video_path)
97
  width, height = info['width'], info['height']
98
+
99
  if bg_type == "ai_generate":
100
  background = generate_ai_background(bg_prompt, width, height)
101
  status = f"βœ… Generated AI background: '{bg_prompt}'"
102
+
103
  elif bg_type == "gradient":
104
  gradients = ["sunset", "ocean", "forest", "sky"]
105
+ gradient_type = next((g for g in gradients if g in bg_prompt.lower()), gradients[0])
 
 
 
 
 
 
106
  background = create_gradient_background(gradient_type, width, height)
107
  status = f"βœ… Generated {gradient_type} gradient background"
108
+
109
  elif bg_type == "solid":
110
  colors = ["white", "black", "red", "green", "blue", "yellow", "purple", "orange", "pink", "gray"]
111
+ color = next((c for c in colors if c in bg_prompt.lower()), "white")
 
 
 
 
 
112
  background = create_solid_background(color, width, height)
113
  status = f"βœ… Generated {color} solid background"
114
+
115
  elif bg_type == "unsplash":
116
+ query = bg_prompt.strip() or "nature"
117
  background = download_unsplash_image(query, width, height)
118
  status = f"βœ… Downloaded background from Unsplash: '{query}'"
119
+
120
  else:
121
  background = create_solid_background("gray", width, height)
122
  status = "βœ… Generated default gray background"
123
+
124
  logger.info(status)
125
  return background, status
126
+
127
  except Exception as e:
128
  error_msg = f"❌ Background generation failed: {str(e)}"
129
  logger.error(error_msg)
 
139
  """Handle complete video processing"""
140
  if not video_path:
141
  return None, "❌ No video provided"
 
142
  if not background_image:
143
  return None, "❌ No background provided"
144
+
145
  try:
146
  progress(0, "Starting video processing...")
147
  logger.info("🎬 Starting video processing")
148
+
149
  job_dir = create_job_directory()
150
  progress_tracker.update("Creating job directory...")
151
+
152
  def update_progress(message: str):
153
  try:
154
  status = progress_tracker.get_status()
 
157
  logger.info(f"Progress: {progress_val:.1%} - {message}")
158
  except Exception as e:
159
  logger.warning(f"Progress update failed: {e}")
160
+
161
  result_path = process_video_pipeline(
162
  video_path=video_path,
163
  background_image=background_image,
 
166
  job_dir=job_dir,
167
  progress_callback=update_progress
168
  )
169
+
170
  progress(1.0, "Processing complete!")
 
171
  clear_gpu_memory()
172
+
173
+ status = "βœ… Video processing completed successfully!"
174
  logger.info(status)
 
175
  return result_path, status
176
+
177
  except Exception as e:
178
  error_msg = f"❌ Processing failed: {str(e)}"
179
  logger.error(error_msg)
180
+ logger.error("Traceback: %s", traceback.format_exc())
 
181
  clear_gpu_memory()
 
182
  return None, error_msg
183
 
184
  def handle_preview_generation(video_path: str, frame_number: int = 0) -> Tuple[Optional[Image.Image], str]:
185
  """Generate preview frame from video"""
186
  if not video_path:
187
  return None, "No video loaded"
 
188
  try:
189
  frame = extract_frame(video_path, frame_number)
190
  if frame is None:
191
  return None, "Failed to extract frame"
 
192
  preview_image = Image.fromarray(frame)
193
  return preview_image, f"βœ… Preview generated (frame {frame_number})"
 
194
  except Exception as e:
195
  error_msg = f"❌ Preview generation failed: {str(e)}"
196
  logger.error(error_msg)
 
202
 
203
  def create_interface():
204
  """Create the main Gradio interface"""
205
+
206
  custom_css = """
207
  .container { max-width: 1200px; margin: auto; }
208
  .header { text-align: center; margin-bottom: 30px; }
 
210
  .status { font-family: monospace; font-size: 12px; }
211
  .progress-bar { margin: 10px 0; }
212
  """
213
+
214
  with gr.Blocks(
215
  title="BackgroundFX Pro",
216
  css=custom_css,
217
+ theme=gr.themes.Soft(),
218
+ analytics_enabled=False, # keep things quiet/stable on 4.x
219
  ) as demo:
220
+
221
  gr.HTML("""
222
  <div class="header">
223
  <h1>🎬 BackgroundFX Pro</h1>
224
  <p>Professional AI-powered video background replacement using SAM2 and MatAnyone</p>
225
  </div>
226
  """)
227
+
228
  video_path_state = gr.State(value=None)
229
  background_image_state = gr.State(value=None)
230
+
231
  with gr.Row():
232
  with gr.Column(scale=1):
233
  with gr.Group():
 
242
  interactive=False,
243
  height=300
244
  )
245
+ # NEW: a fixed preview status box (hidden)
246
+ preview_status = gr.Textbox(
247
+ label="Preview Status",
248
+ interactive=False,
249
+ visible=False,
250
+ elem_classes=["status"]
251
+ )
252
  video_status = gr.Textbox(
253
  label="Video Status",
254
  interactive=False,
255
  elem_classes=["status"]
256
  )
257
+
258
  with gr.Group():
259
  gr.HTML("<h3>🎨 Background Selection</h3>")
260
+
261
  gr.HTML("""
262
  <div style='background: #f0f8ff; padding: 10px; border-radius: 5px; margin-bottom: 15px;'>
263
  <b>Choose your background method:</b><br>
264
  β€’ <b>Upload:</b> Use your own image<br>
265
+ β€’ <b>AI Generate:</b> Create with AI prompt<br>
266
  β€’ <b>Gradient/Solid/Unsplash:</b> Quick generation
267
  </div>
268
  """)
269
+
270
  background_type = gr.Radio(
271
  choices=[
272
  ("πŸ“€ Upload Image", "upload"),
273
+ ("πŸ€– AI Generate", "ai_generate"),
274
  ("🌈 Gradient", "gradient"),
275
  ("🎯 Solid Color", "solid"),
276
  ("πŸ“Έ Unsplash Photo", "unsplash")
 
278
  label="Background Type",
279
  value="upload"
280
  )
281
+
282
  custom_bg_upload = gr.Image(
283
  label="Upload Custom Background",
284
  type="pil",
 
286
  height=250,
287
  visible=True
288
  )
289
+
290
  background_prompt = gr.Textbox(
291
  label="Background Prompt",
292
+ placeholder=("AI: 'futuristic city', 'tropical beach' | Gradient: 'sunset', 'ocean' | "
293
+ "Solid: 'red', 'blue' | Unsplash: 'mountain landscape'"),
294
  value="futuristic city skyline at sunset",
295
  visible=False
296
  )
297
+
298
  generate_bg_btn = gr.Button(
299
  "Generate Background",
300
  variant="secondary",
301
+ # (remove size=..., not guaranteed in 4.41.x)
302
  )
303
+
304
  background_preview = gr.Image(
305
  label="Background Preview",
306
  interactive=False,
307
  height=300
308
  )
309
+
310
  background_status = gr.Textbox(
311
  label="Background Status",
312
  interactive=False,
313
  elem_classes=["status"]
314
  )
315
+
316
  with gr.Column(scale=1):
317
  with gr.Group():
318
  gr.HTML("<h3>⚑ Processing</h3>")
319
+
320
  process_btn = gr.Button(
321
  "πŸš€ Process Video",
322
  variant="primary",
 
323
  )
324
+
325
  processing_status = gr.Textbox(
326
  label="Processing Status",
327
  interactive=False,
328
  elem_classes=["status"]
329
  )
330
+
331
  with gr.Group():
332
  gr.HTML("<h3>πŸ“½οΈ Results</h3>")
333
+
334
  result_video = gr.Video(
335
  label="Processed Video",
336
  height=400
337
  )
338
+
339
+ # NEW: real downloadable output
340
+ download_btn = gr.DownloadButton(
341
  "πŸ“₯ Download Result",
 
342
  visible=False
343
  )
344
+
345
  with gr.Accordion("πŸ”§ System Information", open=False):
346
  system_info = gr.HTML(f"""
347
  <div class="system-info">
 
352
  <p><strong>App Root:</strong> {APP_ROOT}</p>
353
  </div>
354
  """)
355
+
356
  # Event Handlers
357
  background_type.change(
358
  fn=handle_background_type_change,
359
  inputs=[background_type],
360
  outputs=[custom_bg_upload, background_prompt, generate_bg_btn]
361
  )
362
+
363
  custom_bg_upload.change(
364
  fn=handle_custom_background_upload,
365
  inputs=[custom_bg_upload],
 
369
  inputs=[background_image_state],
370
  outputs=[background_preview]
371
  )
372
+
373
  video_upload.change(
374
  fn=handle_video_upload,
375
  inputs=[video_upload],
 
377
  ).then(
378
  fn=handle_preview_generation,
379
  inputs=[video_path_state],
380
+ outputs=[video_preview, preview_status] # FIX: use a real, pre-defined component
381
  )
382
+
383
  generate_bg_btn.click(
384
  fn=handle_background_generation,
385
  inputs=[background_type, background_prompt, video_path_state],
 
389
  inputs=[background_image_state],
390
  outputs=[background_preview]
391
  )
392
+
393
  process_btn.click(
394
  fn=handle_video_processing,
395
  inputs=[
396
  video_path_state,
397
+ background_image_state,
398
  background_type,
399
  background_prompt
400
  ],
401
  outputs=[result_video, processing_status]
402
  ).then(
403
+ # FIX: wire the download button (set value=path and visible accordingly)
404
+ fn=lambda path: gr.update(value=path, visible=bool(path)),
405
  inputs=[result_video],
406
  outputs=[download_btn]
407
  )
408
+
409
+ return demo
utils/paths.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # utils/paths.py
2
+ from pathlib import Path
3
+ import os, re, uuid, shutil
4
+
5
+ APP_ROOT = Path(__file__).resolve().parents[1]
6
+ DATA_ROOT = APP_ROOT / "data"
7
+ TMP_ROOT = APP_ROOT / "tmp"
8
+ for p in (DATA_ROOT, TMP_ROOT, APP_ROOT / ".hf", APP_ROOT / ".torch"):
9
+ p.mkdir(parents=True, exist_ok=True)
10
+
11
+ os.environ.setdefault("HF_HOME", str(APP_ROOT / ".hf"))
12
+ os.environ.setdefault("TORCH_HOME", str(APP_ROOT / ".torch"))
13
+
14
+ def safe_name(name: str, default="file"):
15
+ base = re.sub(r"[^A-Za-z0-9._-]+", "_", (name or default))
16
+ return (base or default)[:120]
17
+
18
+ def job_dir(prefix="job"):
19
+ d = DATA_ROOT / f"{prefix}-{uuid.uuid4().hex[:8]}"
20
+ d.mkdir(parents=True, exist_ok=True)
21
+ return d
22
+
23
+ def disk_stats(p: Path = APP_ROOT) -> str:
24
+ try:
25
+ total, used, free = shutil.disk_usage(str(p))
26
+ mb = lambda x: x // (1024 * 1024)
27
+ return f"disk(total={mb(total)}MB, used={mb(used)}MB, free={mb(free)}MB)"
28
+ except Exception:
29
+ return "disk(n/a)"
utils/perf_tuning.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # utils/perf_tuning.py
2
+ import os, logging
3
+ try:
4
+ import cv2
5
+ except Exception:
6
+ cv2 = None
7
+ import torch
8
+
9
+ def apply():
10
+ os.environ.setdefault("OMP_NUM_THREADS", "4")
11
+ if cv2:
12
+ try:
13
+ cv2.setNumThreads(4)
14
+ except Exception as e:
15
+ logging.info("cv2 threads not set: %s", e)
16
+ if torch.cuda.is_available():
17
+ torch.backends.cudnn.benchmark = True
18
+ try:
19
+ logging.info("CUDA device %s β€” cuDNN benchmark ON", torch.cuda.get_device_name(0))
20
+ except Exception:
21
+ logging.info("CUDA available β€” cuDNN benchmark ON")