Update streamlit_app.py
Browse files- streamlit_app.py +51 -44
streamlit_app.py
CHANGED
|
@@ -53,7 +53,6 @@ def check_gpu():
|
|
| 53 |
logger.info(f"Device Name: {torch.cuda.get_device_name(0)}")
|
| 54 |
logger.info(f"Device Capability: {torch.cuda.get_device_capability(0)}")
|
| 55 |
|
| 56 |
-
# Set as default device
|
| 57 |
torch.cuda.set_device(0)
|
| 58 |
logger.info("Set CUDA device 0 as default")
|
| 59 |
|
|
@@ -102,13 +101,17 @@ def initialize_session_state():
|
|
| 102 |
defaults = {
|
| 103 |
'uploaded_video': None,
|
| 104 |
'video_bytes_cache': None,
|
|
|
|
| 105 |
'bg_image_cache': None,
|
|
|
|
| 106 |
'bg_color': "#00FF00",
|
|
|
|
| 107 |
'color_display_cache': None,
|
| 108 |
'processed_video_bytes': None,
|
| 109 |
'processing': False,
|
| 110 |
'gpu_available': None,
|
| 111 |
-
'
|
|
|
|
| 112 |
}
|
| 113 |
for key, value in defaults.items():
|
| 114 |
if key not in st.session_state:
|
|
@@ -125,12 +128,10 @@ def process_video(input_file, background, bg_type="image"):
|
|
| 125 |
logger.info("=" * 60)
|
| 126 |
|
| 127 |
try:
|
| 128 |
-
# Ensure GPU is default device
|
| 129 |
if torch.cuda.is_available():
|
| 130 |
torch.cuda.set_device(0)
|
| 131 |
logger.info("GPU set as default device for processing")
|
| 132 |
|
| 133 |
-
# Create temp directory
|
| 134 |
temp_base = Path(tempfile.gettempdir()) / "video_processing"
|
| 135 |
temp_base.mkdir(exist_ok=True)
|
| 136 |
temp_dir = temp_base / f"session_{int(time.time())}"
|
|
@@ -138,7 +139,6 @@ def process_video(input_file, background, bg_type="image"):
|
|
| 138 |
|
| 139 |
logger.info(f"Temp directory: {temp_dir}")
|
| 140 |
|
| 141 |
-
# Save video
|
| 142 |
input_path = str(temp_dir / "input.mp4")
|
| 143 |
logger.info(f"Saving video to {input_path}")
|
| 144 |
|
|
@@ -150,7 +150,6 @@ def process_video(input_file, background, bg_type="image"):
|
|
| 150 |
file_size = os.path.getsize(input_path)
|
| 151 |
logger.info(f"Video saved: {file_size} bytes ({file_size/1e6:.2f}MB)")
|
| 152 |
|
| 153 |
-
# Prepare background
|
| 154 |
bg_path = None
|
| 155 |
if bg_type == "image" and background is not None:
|
| 156 |
logger.info("Processing background IMAGE")
|
|
@@ -167,7 +166,6 @@ def process_video(input_file, background, bg_type="image"):
|
|
| 167 |
cv2.imwrite(bg_path, np.ones((100, 100, 3), dtype=np.uint8) * color_rgb[::-1])
|
| 168 |
logger.info(f"Background color saved")
|
| 169 |
|
| 170 |
-
# Progress tracking
|
| 171 |
progress_bar = st.progress(0)
|
| 172 |
status_text = st.empty()
|
| 173 |
|
|
@@ -177,18 +175,15 @@ def progress_callback(progress, message):
|
|
| 177 |
progress_bar.progress(progress)
|
| 178 |
status_text.text(f"{message} ({progress*100:.0f}%)")
|
| 179 |
|
| 180 |
-
# GPU status before processing
|
| 181 |
if torch.cuda.is_available():
|
| 182 |
logger.info(f"GPU Memory Before: {torch.cuda.memory_allocated()/1e9:.2f}GB")
|
| 183 |
|
| 184 |
-
# Process video
|
| 185 |
output_path = str(temp_dir / "output.mp4")
|
| 186 |
click_points = [[0.5, 0.5]]
|
| 187 |
|
| 188 |
logger.info("Importing TwoStageProcessor...")
|
| 189 |
from pipeline.integrated_pipeline import TwoStageProcessor
|
| 190 |
|
| 191 |
-
# Don't cache processor - causes GPU issues
|
| 192 |
logger.info("Creating processor instance...")
|
| 193 |
processor = TwoStageProcessor(temp_dir=str(temp_dir))
|
| 194 |
logger.info("Processor created")
|
|
@@ -216,21 +211,18 @@ def progress_callback(progress, message):
|
|
| 216 |
if not success:
|
| 217 |
raise RuntimeError("Processing returned False")
|
| 218 |
|
| 219 |
-
# Check output
|
| 220 |
if not os.path.exists(output_path):
|
| 221 |
raise FileNotFoundError("Output video not created")
|
| 222 |
|
| 223 |
output_size = os.path.getsize(output_path)
|
| 224 |
logger.info(f"Output: {output_size} bytes ({output_size/1e6:.2f}MB)")
|
| 225 |
|
| 226 |
-
# Read output
|
| 227 |
logger.info("Reading output...")
|
| 228 |
with open(output_path, 'rb') as f:
|
| 229 |
video_bytes = f.read()
|
| 230 |
|
| 231 |
logger.info(f"Output loaded: {len(video_bytes)/1e6:.2f}MB")
|
| 232 |
|
| 233 |
-
# Cleanup
|
| 234 |
try:
|
| 235 |
shutil.rmtree(temp_dir)
|
| 236 |
logger.info("Temp cleaned")
|
|
@@ -259,7 +251,6 @@ def main():
|
|
| 259 |
|
| 260 |
initialize_session_state()
|
| 261 |
|
| 262 |
-
# GPU Status
|
| 263 |
with st.sidebar:
|
| 264 |
st.subheader("System Status")
|
| 265 |
if st.session_state.gpu_available:
|
|
@@ -280,20 +271,20 @@ def main():
|
|
| 280 |
key="video_uploader"
|
| 281 |
)
|
| 282 |
|
| 283 |
-
#
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
|
| 290 |
-
|
| 291 |
-
st.session_state.video_bytes_cache = None
|
| 292 |
-
st.session_state.processed_video_bytes = None
|
| 293 |
-
st.session_state.files_loaded = True
|
| 294 |
|
| 295 |
-
# Video preview
|
| 296 |
st.markdown("### Video Preview")
|
|
|
|
|
|
|
|
|
|
| 297 |
if st.session_state.uploaded_video is not None:
|
| 298 |
try:
|
| 299 |
if st.session_state.video_bytes_cache is None:
|
|
@@ -302,12 +293,13 @@ def main():
|
|
| 302 |
st.session_state.video_bytes_cache = st.session_state.uploaded_video.read()
|
| 303 |
logger.info(f"Cached {len(st.session_state.video_bytes_cache)/1e6:.2f}MB")
|
| 304 |
|
| 305 |
-
st.
|
|
|
|
| 306 |
except Exception as e:
|
| 307 |
logger.error(f"Video preview error: {e}")
|
| 308 |
-
st.error(f"Cannot display video: {e}")
|
| 309 |
else:
|
| 310 |
-
st.
|
| 311 |
|
| 312 |
with col2:
|
| 313 |
st.header("2. Background Settings")
|
|
@@ -326,18 +318,25 @@ def main():
|
|
| 326 |
key="bg_image_uploader"
|
| 327 |
)
|
| 328 |
|
| 329 |
-
if
|
| 330 |
-
|
| 331 |
-
|
| 332 |
-
|
| 333 |
-
|
| 334 |
-
|
| 335 |
-
|
| 336 |
-
|
| 337 |
-
|
| 338 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 339 |
else:
|
| 340 |
-
st.
|
| 341 |
|
| 342 |
elif bg_type == "Color":
|
| 343 |
selected_color = st.color_picker(
|
|
@@ -346,15 +345,24 @@ def main():
|
|
| 346 |
key="color_picker"
|
| 347 |
)
|
| 348 |
|
| 349 |
-
if selected_color != st.session_state.
|
|
|
|
| 350 |
st.session_state.bg_color = selected_color
|
|
|
|
|
|
|
| 351 |
color_rgb = tuple(int(selected_color.lstrip('#')[i:i+2], 16) for i in (0, 2, 4))
|
| 352 |
color_display = np.zeros((100, 100, 3), dtype=np.uint8)
|
| 353 |
color_display[:, :] = color_rgb[::-1]
|
| 354 |
st.session_state.color_display_cache = color_display
|
| 355 |
|
|
|
|
|
|
|
|
|
|
| 356 |
if st.session_state.color_display_cache is not None:
|
| 357 |
-
st.
|
|
|
|
|
|
|
|
|
|
| 358 |
|
| 359 |
st.header("3. Process & Download")
|
| 360 |
|
|
@@ -400,7 +408,6 @@ def main():
|
|
| 400 |
finally:
|
| 401 |
st.session_state.processing = False
|
| 402 |
|
| 403 |
-
# Show processed video
|
| 404 |
if st.session_state.processed_video_bytes is not None:
|
| 405 |
st.markdown("---")
|
| 406 |
st.markdown("### Processed Video")
|
|
@@ -419,4 +426,4 @@ def main():
|
|
| 419 |
st.error(f"Display error: {e}")
|
| 420 |
|
| 421 |
if __name__ == "__main__":
|
| 422 |
-
main()
|
|
|
|
| 53 |
logger.info(f"Device Name: {torch.cuda.get_device_name(0)}")
|
| 54 |
logger.info(f"Device Capability: {torch.cuda.get_device_capability(0)}")
|
| 55 |
|
|
|
|
| 56 |
torch.cuda.set_device(0)
|
| 57 |
logger.info("Set CUDA device 0 as default")
|
| 58 |
|
|
|
|
| 101 |
defaults = {
|
| 102 |
'uploaded_video': None,
|
| 103 |
'video_bytes_cache': None,
|
| 104 |
+
'video_preview_placeholder': None,
|
| 105 |
'bg_image_cache': None,
|
| 106 |
+
'bg_preview_placeholder': None,
|
| 107 |
'bg_color': "#00FF00",
|
| 108 |
+
'cached_color': None,
|
| 109 |
'color_display_cache': None,
|
| 110 |
'processed_video_bytes': None,
|
| 111 |
'processing': False,
|
| 112 |
'gpu_available': None,
|
| 113 |
+
'last_video_id': None,
|
| 114 |
+
'last_bg_image_id': None
|
| 115 |
}
|
| 116 |
for key, value in defaults.items():
|
| 117 |
if key not in st.session_state:
|
|
|
|
| 128 |
logger.info("=" * 60)
|
| 129 |
|
| 130 |
try:
|
|
|
|
| 131 |
if torch.cuda.is_available():
|
| 132 |
torch.cuda.set_device(0)
|
| 133 |
logger.info("GPU set as default device for processing")
|
| 134 |
|
|
|
|
| 135 |
temp_base = Path(tempfile.gettempdir()) / "video_processing"
|
| 136 |
temp_base.mkdir(exist_ok=True)
|
| 137 |
temp_dir = temp_base / f"session_{int(time.time())}"
|
|
|
|
| 139 |
|
| 140 |
logger.info(f"Temp directory: {temp_dir}")
|
| 141 |
|
|
|
|
| 142 |
input_path = str(temp_dir / "input.mp4")
|
| 143 |
logger.info(f"Saving video to {input_path}")
|
| 144 |
|
|
|
|
| 150 |
file_size = os.path.getsize(input_path)
|
| 151 |
logger.info(f"Video saved: {file_size} bytes ({file_size/1e6:.2f}MB)")
|
| 152 |
|
|
|
|
| 153 |
bg_path = None
|
| 154 |
if bg_type == "image" and background is not None:
|
| 155 |
logger.info("Processing background IMAGE")
|
|
|
|
| 166 |
cv2.imwrite(bg_path, np.ones((100, 100, 3), dtype=np.uint8) * color_rgb[::-1])
|
| 167 |
logger.info(f"Background color saved")
|
| 168 |
|
|
|
|
| 169 |
progress_bar = st.progress(0)
|
| 170 |
status_text = st.empty()
|
| 171 |
|
|
|
|
| 175 |
progress_bar.progress(progress)
|
| 176 |
status_text.text(f"{message} ({progress*100:.0f}%)")
|
| 177 |
|
|
|
|
| 178 |
if torch.cuda.is_available():
|
| 179 |
logger.info(f"GPU Memory Before: {torch.cuda.memory_allocated()/1e9:.2f}GB")
|
| 180 |
|
|
|
|
| 181 |
output_path = str(temp_dir / "output.mp4")
|
| 182 |
click_points = [[0.5, 0.5]]
|
| 183 |
|
| 184 |
logger.info("Importing TwoStageProcessor...")
|
| 185 |
from pipeline.integrated_pipeline import TwoStageProcessor
|
| 186 |
|
|
|
|
| 187 |
logger.info("Creating processor instance...")
|
| 188 |
processor = TwoStageProcessor(temp_dir=str(temp_dir))
|
| 189 |
logger.info("Processor created")
|
|
|
|
| 211 |
if not success:
|
| 212 |
raise RuntimeError("Processing returned False")
|
| 213 |
|
|
|
|
| 214 |
if not os.path.exists(output_path):
|
| 215 |
raise FileNotFoundError("Output video not created")
|
| 216 |
|
| 217 |
output_size = os.path.getsize(output_path)
|
| 218 |
logger.info(f"Output: {output_size} bytes ({output_size/1e6:.2f}MB)")
|
| 219 |
|
|
|
|
| 220 |
logger.info("Reading output...")
|
| 221 |
with open(output_path, 'rb') as f:
|
| 222 |
video_bytes = f.read()
|
| 223 |
|
| 224 |
logger.info(f"Output loaded: {len(video_bytes)/1e6:.2f}MB")
|
| 225 |
|
|
|
|
| 226 |
try:
|
| 227 |
shutil.rmtree(temp_dir)
|
| 228 |
logger.info("Temp cleaned")
|
|
|
|
| 251 |
|
| 252 |
initialize_session_state()
|
| 253 |
|
|
|
|
| 254 |
with st.sidebar:
|
| 255 |
st.subheader("System Status")
|
| 256 |
if st.session_state.gpu_available:
|
|
|
|
| 271 |
key="video_uploader"
|
| 272 |
)
|
| 273 |
|
| 274 |
+
# Check if video actually changed using id()
|
| 275 |
+
current_video_id = id(uploaded)
|
| 276 |
+
if current_video_id != st.session_state.last_video_id:
|
| 277 |
+
logger.info(f"New video: {uploaded.name if uploaded else 'None'}")
|
| 278 |
+
st.session_state.uploaded_video = uploaded
|
| 279 |
+
st.session_state.last_video_id = current_video_id
|
| 280 |
+
st.session_state.video_bytes_cache = None
|
| 281 |
+
st.session_state.processed_video_bytes = None
|
|
|
|
|
|
|
|
|
|
| 282 |
|
| 283 |
+
# Video preview with placeholder
|
| 284 |
st.markdown("### Video Preview")
|
| 285 |
+
if st.session_state.video_preview_placeholder is None:
|
| 286 |
+
st.session_state.video_preview_placeholder = st.empty()
|
| 287 |
+
|
| 288 |
if st.session_state.uploaded_video is not None:
|
| 289 |
try:
|
| 290 |
if st.session_state.video_bytes_cache is None:
|
|
|
|
| 293 |
st.session_state.video_bytes_cache = st.session_state.uploaded_video.read()
|
| 294 |
logger.info(f"Cached {len(st.session_state.video_bytes_cache)/1e6:.2f}MB")
|
| 295 |
|
| 296 |
+
with st.session_state.video_preview_placeholder.container():
|
| 297 |
+
st.video(st.session_state.video_bytes_cache)
|
| 298 |
except Exception as e:
|
| 299 |
logger.error(f"Video preview error: {e}")
|
| 300 |
+
st.session_state.video_preview_placeholder.error(f"Cannot display video: {e}")
|
| 301 |
else:
|
| 302 |
+
st.session_state.video_preview_placeholder.empty()
|
| 303 |
|
| 304 |
with col2:
|
| 305 |
st.header("2. Background Settings")
|
|
|
|
| 318 |
key="bg_image_uploader"
|
| 319 |
)
|
| 320 |
|
| 321 |
+
# Check if image actually changed using id()
|
| 322 |
+
current_bg_id = id(bg_image)
|
| 323 |
+
if current_bg_id != st.session_state.last_bg_image_id:
|
| 324 |
+
logger.info(f"New background: {bg_image.name if bg_image else 'None'}")
|
| 325 |
+
st.session_state.last_bg_image_id = current_bg_id
|
| 326 |
+
if bg_image is not None:
|
| 327 |
+
st.session_state.bg_image_cache = Image.open(bg_image)
|
| 328 |
+
else:
|
| 329 |
+
st.session_state.bg_image_cache = None
|
| 330 |
+
|
| 331 |
+
# Background preview with placeholder
|
| 332 |
+
if st.session_state.bg_preview_placeholder is None:
|
| 333 |
+
st.session_state.bg_preview_placeholder = st.empty()
|
| 334 |
+
|
| 335 |
+
if st.session_state.bg_image_cache is not None:
|
| 336 |
+
with st.session_state.bg_preview_placeholder.container():
|
| 337 |
+
st.image(st.session_state.bg_image_cache, caption="Selected Background", use_container_width=True)
|
| 338 |
else:
|
| 339 |
+
st.session_state.bg_preview_placeholder.empty()
|
| 340 |
|
| 341 |
elif bg_type == "Color":
|
| 342 |
selected_color = st.color_picker(
|
|
|
|
| 345 |
key="color_picker"
|
| 346 |
)
|
| 347 |
|
| 348 |
+
if selected_color != st.session_state.cached_color:
|
| 349 |
+
logger.info(f"Color: {selected_color}")
|
| 350 |
st.session_state.bg_color = selected_color
|
| 351 |
+
st.session_state.cached_color = selected_color
|
| 352 |
+
|
| 353 |
color_rgb = tuple(int(selected_color.lstrip('#')[i:i+2], 16) for i in (0, 2, 4))
|
| 354 |
color_display = np.zeros((100, 100, 3), dtype=np.uint8)
|
| 355 |
color_display[:, :] = color_rgb[::-1]
|
| 356 |
st.session_state.color_display_cache = color_display
|
| 357 |
|
| 358 |
+
if st.session_state.bg_preview_placeholder is None:
|
| 359 |
+
st.session_state.bg_preview_placeholder = st.empty()
|
| 360 |
+
|
| 361 |
if st.session_state.color_display_cache is not None:
|
| 362 |
+
with st.session_state.bg_preview_placeholder.container():
|
| 363 |
+
st.image(st.session_state.color_display_cache, caption="Selected Color", width=200)
|
| 364 |
+
else:
|
| 365 |
+
st.session_state.bg_preview_placeholder.empty()
|
| 366 |
|
| 367 |
st.header("3. Process & Download")
|
| 368 |
|
|
|
|
| 408 |
finally:
|
| 409 |
st.session_state.processing = False
|
| 410 |
|
|
|
|
| 411 |
if st.session_state.processed_video_bytes is not None:
|
| 412 |
st.markdown("---")
|
| 413 |
st.markdown("### Processed Video")
|
|
|
|
| 426 |
st.error(f"Display error: {e}")
|
| 427 |
|
| 428 |
if __name__ == "__main__":
|
| 429 |
+
main()
|