MogensR commited on
Commit
347ce74
Β·
1 Parent(s): 9337085

Update core/app.py

Browse files
Files changed (1) hide show
  1. core/app.py +6 -37
core/app.py CHANGED
@@ -5,42 +5,15 @@
5
  """
6
  from __future__ import annotations
7
 
8
- # ── Early env/threading hygiene (safe default to silence libgomp) ────────────
9
  import os
10
- os.environ["OMP_NUM_THREADS"] = "1" # Force valid value early
11
-
12
- # --- Stable runtime env defaults and optional secret-name mapping ---
13
- def _map_env(src: str, dst: str):
14
- v = os.getenv(src)
15
- if v and not os.getenv(dst):
16
- os.environ[dst] = v
17
-
18
- # If HF blocks underscores in secret names, you can add simple names and map them:
19
- # Add secrets later like: OMPNUMTHREADS=1, TOKENIZERSPARALLELISM=false,
20
- # PYTORCHCUDAALLOCCONF=max_split_size_mb:128, LOGLEVEL=info, APPENV=production
21
- _map_env("OMPNUMTHREADS", "OMP_NUM_THREADS")
22
- _map_env("TOKENIZERSPARALLELISM", "TOKENIZERS_PARALLELISM")
23
- _map_env("PYTORCHCUDAALLOCCONF", "PYTORCH_CUDA_ALLOC_CONF")
24
- _map_env("LOGLEVEL", "LOG_LEVEL")
25
- _map_env("APPENV", "APP_ENV")
26
-
27
- # Provider secret mappings (underscore-free -> standard)
28
- _map_env("CLOUDINARYURL", "CLOUDINARY_URL")
29
- _map_env("HEYGENAPIKEY", "HEYGEN_API_KEY")
30
- _map_env("DATABASEURL", "DATABASE_URL")
31
-
32
- # Cache path mappings (underscore-free -> standard)
33
- _map_env("HFHOME", "HF_HOME")
34
- _map_env("TRANSFORMERSCACHE", "TRANSFORMERS_CACHE")
35
- _map_env("TORCHHOME", "TORCH_HOME")
36
- # Optional: GPU visibility alias
37
- _map_env("CUDAVISIBLEDEVICES", "CUDA_VISIBLE_DEVICES")
38
-
39
- # Critical defaults (safe even if already set elsewhere)
40
  os.environ.setdefault("TOKENIZERS_PARALLELISM", "false")
 
41
  os.environ.setdefault("LOG_LEVEL", "info")
42
  os.environ.setdefault("APP_ENV", "production")
43
- os.environ.setdefault("PYTORCH_CUDA_ALLOC_CONF", "max_split_size_mb:128")
44
 
45
  # Ensure reasonable cache defaults if not set
46
  from pathlib import Path
@@ -74,10 +47,6 @@ def _ensure_cloudinary_url():
74
  import time
75
  from typing import Optional, Tuple, Dict, Any, Callable
76
 
77
- # Mitigate CUDA fragmentation (must be set before importing torch)
78
- if "PYTORCH_CUDA_ALLOC_CONF" not in os.environ:
79
- os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True,max_split_size_mb:128"
80
-
81
  # ── Logging ──────────────────────────────────────────────────────────────────
82
  logging.basicConfig(
83
  level=logging.INFO,
@@ -462,7 +431,7 @@ def _process_single_stage(
462
  output_path=out_path,
463
  bg_config={
464
  "background_choice": background_choice,
465
- "custom_path": { "path": custom_background_path } if isinstance(custom_background_path, dict) else custom_background_path,
466
  },
467
  progress_callback=progress_callback,
468
  )
 
5
  """
6
  from __future__ import annotations
7
 
8
+ # ── Critical environment defaults (set before any imports) ────────────────
9
  import os
10
+
11
+ # Set critical defaults directly - HF now supports underscores in env vars
12
+ os.environ.setdefault("OMP_NUM_THREADS", "1")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  os.environ.setdefault("TOKENIZERS_PARALLELISM", "false")
14
+ os.environ.setdefault("PYTORCH_CUDA_ALLOC_CONF", "max_split_size_mb:128")
15
  os.environ.setdefault("LOG_LEVEL", "info")
16
  os.environ.setdefault("APP_ENV", "production")
 
17
 
18
  # Ensure reasonable cache defaults if not set
19
  from pathlib import Path
 
47
  import time
48
  from typing import Optional, Tuple, Dict, Any, Callable
49
 
 
 
 
 
50
  # ── Logging ──────────────────────────────────────────────────────────────────
51
  logging.basicConfig(
52
  level=logging.INFO,
 
431
  output_path=out_path,
432
  bg_config={
433
  "background_choice": background_choice,
434
+ "custom_path": custom_background_path,
435
  },
436
  progress_callback=progress_callback,
437
  )