Update
Browse files- .gitattributes +1 -0
- .gitignore +0 -1
- README.md +1 -1
- app.py +33 -48
- images/pexels-ksenia-chernaya-8535230.jpg +3 -0
- requirements.txt +6 -6
- style.css +7 -0
.gitattributes
CHANGED
|
@@ -26,3 +26,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 26 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 27 |
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
| 28 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 26 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 27 |
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
| 28 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
.gitignore
DELETED
|
@@ -1 +0,0 @@
|
|
| 1 |
-
images
|
|
|
|
|
|
README.md
CHANGED
|
@@ -4,7 +4,7 @@ emoji: 📚
|
|
| 4 |
colorFrom: purple
|
| 5 |
colorTo: yellow
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version: 3.
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
---
|
|
|
|
| 4 |
colorFrom: purple
|
| 5 |
colorTo: yellow
|
| 6 |
sdk: gradio
|
| 7 |
+
sdk_version: 3.36.1
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
---
|
app.py
CHANGED
|
@@ -4,9 +4,7 @@ from __future__ import annotations
|
|
| 4 |
|
| 5 |
import functools
|
| 6 |
import os
|
| 7 |
-
import pathlib
|
| 8 |
import sys
|
| 9 |
-
import tarfile
|
| 10 |
from typing import Callable
|
| 11 |
|
| 12 |
import cv2
|
|
@@ -27,32 +25,12 @@ sys.path.insert(0, 'deep-head-pose/code')
|
|
| 27 |
from hopenet import Hopenet
|
| 28 |
from ibug.face_detection import RetinaFacePredictor
|
| 29 |
|
| 30 |
-
|
| 31 |
-
DESCRIPTION = 'This is an unofficial demo for https://github.com/natanielruiz/deep-head-pose.'
|
| 32 |
-
|
| 33 |
-
HF_TOKEN = os.getenv('HF_TOKEN')
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
def load_sample_images() -> list[pathlib.Path]:
|
| 37 |
-
image_dir = pathlib.Path('images')
|
| 38 |
-
if not image_dir.exists():
|
| 39 |
-
image_dir.mkdir()
|
| 40 |
-
dataset_repo = 'hysts/input-images'
|
| 41 |
-
filenames = ['001.tar']
|
| 42 |
-
for name in filenames:
|
| 43 |
-
path = huggingface_hub.hf_hub_download(dataset_repo,
|
| 44 |
-
name,
|
| 45 |
-
repo_type='dataset',
|
| 46 |
-
use_auth_token=HF_TOKEN)
|
| 47 |
-
with tarfile.open(path) as f:
|
| 48 |
-
f.extractall(image_dir.as_posix())
|
| 49 |
-
return sorted(image_dir.rglob('*.jpg'))
|
| 50 |
|
| 51 |
|
| 52 |
def load_model(model_name: str, device: torch.device) -> nn.Module:
|
| 53 |
-
path = huggingface_hub.hf_hub_download('
|
| 54 |
-
f'models/{model_name}.pkl'
|
| 55 |
-
use_auth_token=HF_TOKEN)
|
| 56 |
state_dict = torch.load(path, map_location='cpu')
|
| 57 |
model = Hopenet(torchvision.models.resnet.Bottleneck, [3, 4, 6, 3], 66)
|
| 58 |
model.load_state_dict(state_dict)
|
|
@@ -160,26 +138,33 @@ model_names = [
|
|
| 160 |
models = {name: load_model(name, device) for name in model_names}
|
| 161 |
transform = create_transform()
|
| 162 |
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
gr.
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
|
| 5 |
import functools
|
| 6 |
import os
|
|
|
|
| 7 |
import sys
|
|
|
|
| 8 |
from typing import Callable
|
| 9 |
|
| 10 |
import cv2
|
|
|
|
| 25 |
from hopenet import Hopenet
|
| 26 |
from ibug.face_detection import RetinaFacePredictor
|
| 27 |
|
| 28 |
+
DESCRIPTION = '# [Hopenet](https://github.com/natanielruiz/deep-head-pose)'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
|
| 30 |
|
| 31 |
def load_model(model_name: str, device: torch.device) -> nn.Module:
|
| 32 |
+
path = huggingface_hub.hf_hub_download('public-data/Hopenet',
|
| 33 |
+
f'models/{model_name}.pkl')
|
|
|
|
| 34 |
state_dict = torch.load(path, map_location='cpu')
|
| 35 |
model = Hopenet(torchvision.models.resnet.Bottleneck, [3, 4, 6, 3], 66)
|
| 36 |
model.load_state_dict(state_dict)
|
|
|
|
| 138 |
models = {name: load_model(name, device) for name in model_names}
|
| 139 |
transform = create_transform()
|
| 140 |
|
| 141 |
+
fn = functools.partial(run,
|
| 142 |
+
face_detector=face_detector,
|
| 143 |
+
models=models,
|
| 144 |
+
transform=transform,
|
| 145 |
+
device=device)
|
| 146 |
+
|
| 147 |
+
examples = [['images/pexels-ksenia-chernaya-8535230.jpg', 'hopenet_alpha1']]
|
| 148 |
+
|
| 149 |
+
with gr.Blocks(css='style.css') as demo:
|
| 150 |
+
gr.Markdown(DESCRIPTION)
|
| 151 |
+
with gr.Row():
|
| 152 |
+
with gr.Column():
|
| 153 |
+
image = gr.Image(label='Input', type='numpy')
|
| 154 |
+
model_name = gr.Radio(label='Model',
|
| 155 |
+
choices=model_names,
|
| 156 |
+
type='value',
|
| 157 |
+
value=model_names[0])
|
| 158 |
+
run_button = gr.Button('Run')
|
| 159 |
+
with gr.Column():
|
| 160 |
+
result = gr.Image(label='Output')
|
| 161 |
+
gr.Examples(examples=examples,
|
| 162 |
+
inputs=[image, model_name],
|
| 163 |
+
outputs=result,
|
| 164 |
+
fn=fn,
|
| 165 |
+
cache_examples=os.getenv('CACHE_EXAMPLES') == '1')
|
| 166 |
+
run_button.click(fn=fn,
|
| 167 |
+
inputs=[image, model_name],
|
| 168 |
+
outputs=result,
|
| 169 |
+
api_name='run')
|
| 170 |
+
demo.queue().launch()
|
images/pexels-ksenia-chernaya-8535230.jpg
ADDED
|
Git LFS Details
|
requirements.txt
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
-
numpy==1.
|
| 2 |
-
opencv-python-headless==4.
|
| 3 |
-
Pillow==
|
| 4 |
-
scipy==1.
|
| 5 |
-
torch==
|
| 6 |
-
torchvision==0.
|
|
|
|
| 1 |
+
numpy==1.23.5
|
| 2 |
+
opencv-python-headless==4.8.0.74
|
| 3 |
+
Pillow==10.0.0
|
| 4 |
+
scipy==1.11.1
|
| 5 |
+
torch==2.0.1
|
| 6 |
+
torchvision==0.15.2
|
style.css
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
h1 {
|
| 2 |
+
text-align: center;
|
| 3 |
+
}
|
| 4 |
+
|
| 5 |
+
#duplicate-button {
|
| 6 |
+
margin: auto;
|
| 7 |
+
}
|