Spaces:
Running
on
Zero
Running
on
Zero
Naved
commited on
Delete src
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- src/config/__init__.py +0 -0
- src/config/argument_config.py +0 -45
- src/config/base_config.py +0 -29
- src/config/crop_config.py +0 -18
- src/config/inference_config.py +0 -51
- src/config/models.yaml +0 -43
- src/gradio_pipeline.py +0 -112
- src/live_portrait_pipeline.py +0 -209
- src/live_portrait_wrapper.py +0 -307
- src/modules/__init__.py +0 -0
- src/modules/appearance_feature_extractor.py +0 -48
- src/modules/convnextv2.py +0 -149
- src/modules/dense_motion.py +0 -104
- src/modules/motion_extractor.py +0 -35
- src/modules/spade_generator.py +0 -59
- src/modules/stitching_retargeting_network.py +0 -38
- src/modules/util.py +0 -441
- src/modules/warping_network.py +0 -77
- src/template_maker.py +0 -65
- src/utils/__init__.py +0 -0
- src/utils/camera.py +0 -75
- src/utils/crop.py +0 -412
- src/utils/cropper.py +0 -145
- src/utils/dependencies/insightface/__init__.py +0 -20
- src/utils/dependencies/insightface/app/__init__.py +0 -1
- src/utils/dependencies/insightface/app/common.py +0 -49
- src/utils/dependencies/insightface/app/face_analysis.py +0 -110
- src/utils/dependencies/insightface/data/__init__.py +0 -2
- src/utils/dependencies/insightface/data/image.py +0 -27
- src/utils/dependencies/insightface/data/images/Tom_Hanks_54745.png +0 -0
- src/utils/dependencies/insightface/data/images/mask_black.jpg +0 -0
- src/utils/dependencies/insightface/data/images/mask_blue.jpg +0 -0
- src/utils/dependencies/insightface/data/images/mask_green.jpg +0 -0
- src/utils/dependencies/insightface/data/images/mask_white.jpg +0 -0
- src/utils/dependencies/insightface/data/images/t1.jpg +0 -0
- src/utils/dependencies/insightface/data/objects/meanshape_68.pkl +0 -3
- src/utils/dependencies/insightface/data/pickle_object.py +0 -17
- src/utils/dependencies/insightface/data/rec_builder.py +0 -71
- src/utils/dependencies/insightface/model_zoo/__init__.py +0 -6
- src/utils/dependencies/insightface/model_zoo/arcface_onnx.py +0 -92
- src/utils/dependencies/insightface/model_zoo/attribute.py +0 -94
- src/utils/dependencies/insightface/model_zoo/inswapper.py +0 -114
- src/utils/dependencies/insightface/model_zoo/landmark.py +0 -114
- src/utils/dependencies/insightface/model_zoo/model_store.py +0 -103
- src/utils/dependencies/insightface/model_zoo/model_zoo.py +0 -97
- src/utils/dependencies/insightface/model_zoo/retinaface.py +0 -301
- src/utils/dependencies/insightface/model_zoo/scrfd.py +0 -348
- src/utils/dependencies/insightface/utils/__init__.py +0 -6
- src/utils/dependencies/insightface/utils/constant.py +0 -3
- src/utils/dependencies/insightface/utils/download.py +0 -95
src/config/__init__.py
DELETED
File without changes
|
src/config/argument_config.py
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
# coding: utf-8
|
2 |
-
|
3 |
-
"""
|
4 |
-
config for user
|
5 |
-
"""
|
6 |
-
|
7 |
-
import os.path as osp
|
8 |
-
from dataclasses import dataclass
|
9 |
-
import tyro
|
10 |
-
from typing_extensions import Annotated
|
11 |
-
from typing import Optional
|
12 |
-
from .base_config import PrintableConfig, make_abs_path
|
13 |
-
|
14 |
-
|
15 |
-
@dataclass(repr=False) # use repr from PrintableConfig
|
16 |
-
class ArgumentConfig(PrintableConfig):
|
17 |
-
########## input arguments ##########
|
18 |
-
source_image: Annotated[str, tyro.conf.arg(aliases=["-s"])] = make_abs_path('../../assets/examples/source/s6.jpg') # path to the source portrait
|
19 |
-
driving_info: Annotated[str, tyro.conf.arg(aliases=["-d"])] = make_abs_path('../../assets/examples/driving/d0.mp4') # path to driving video or template (.pkl format)
|
20 |
-
output_dir: Annotated[str, tyro.conf.arg(aliases=["-o"])] = 'animations/' # directory to save output video
|
21 |
-
#####################################
|
22 |
-
|
23 |
-
########## inference arguments ##########
|
24 |
-
device_id: int = 0
|
25 |
-
flag_lip_zero : bool = True # whether let the lip to close state before animation, only take effect when flag_eye_retargeting and flag_lip_retargeting is False
|
26 |
-
flag_eye_retargeting: bool = False
|
27 |
-
flag_lip_retargeting: bool = False
|
28 |
-
flag_stitching: bool = True # we recommend setting it to True!
|
29 |
-
flag_relative: bool = True # whether to use relative motion
|
30 |
-
flag_pasteback: bool = True # whether to paste-back/stitch the animated face cropping from the face-cropping space to the original image space
|
31 |
-
flag_do_crop: bool = True # whether to crop the source portrait to the face-cropping space
|
32 |
-
flag_do_rot: bool = True # whether to conduct the rotation when flag_do_crop is True
|
33 |
-
#########################################
|
34 |
-
|
35 |
-
########## crop arguments ##########
|
36 |
-
dsize: int = 512
|
37 |
-
scale: float = 2.3
|
38 |
-
vx_ratio: float = 0 # vx ratio
|
39 |
-
vy_ratio: float = -0.125 # vy ratio +up, -down
|
40 |
-
####################################
|
41 |
-
|
42 |
-
########## gradio arguments ##########
|
43 |
-
server_port: Annotated[int, tyro.conf.arg(aliases=["-p"])] = 7860
|
44 |
-
share: bool = False
|
45 |
-
server_name: Optional[str] = None # one can set "0.0.0.0" on local
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/config/base_config.py
DELETED
@@ -1,29 +0,0 @@
|
|
1 |
-
# coding: utf-8
|
2 |
-
|
3 |
-
"""
|
4 |
-
pretty printing class
|
5 |
-
"""
|
6 |
-
|
7 |
-
from __future__ import annotations
|
8 |
-
import os.path as osp
|
9 |
-
from typing import Tuple
|
10 |
-
|
11 |
-
|
12 |
-
def make_abs_path(fn):
|
13 |
-
return osp.join(osp.dirname(osp.realpath(__file__)), fn)
|
14 |
-
|
15 |
-
|
16 |
-
class PrintableConfig: # pylint: disable=too-few-public-methods
|
17 |
-
"""Printable Config defining str function"""
|
18 |
-
|
19 |
-
def __repr__(self):
|
20 |
-
lines = [self.__class__.__name__ + ":"]
|
21 |
-
for key, val in vars(self).items():
|
22 |
-
if isinstance(val, Tuple):
|
23 |
-
flattened_val = "["
|
24 |
-
for item in val:
|
25 |
-
flattened_val += str(item) + "\n"
|
26 |
-
flattened_val = flattened_val.rstrip("\n")
|
27 |
-
val = flattened_val + "]"
|
28 |
-
lines += f"{key}: {str(val)}".split("\n")
|
29 |
-
return "\n ".join(lines)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/config/crop_config.py
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
# coding: utf-8
|
2 |
-
|
3 |
-
"""
|
4 |
-
parameters used for crop faces
|
5 |
-
"""
|
6 |
-
|
7 |
-
import os.path as osp
|
8 |
-
from dataclasses import dataclass
|
9 |
-
from typing import Union, List
|
10 |
-
from .base_config import PrintableConfig
|
11 |
-
|
12 |
-
|
13 |
-
@dataclass(repr=False) # use repr from PrintableConfig
|
14 |
-
class CropConfig(PrintableConfig):
|
15 |
-
dsize: int = 512 # crop size
|
16 |
-
scale: float = 2.3 # scale factor
|
17 |
-
vx_ratio: float = 0 # vx ratio
|
18 |
-
vy_ratio: float = -0.125 # vy ratio +up, -down
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/config/inference_config.py
DELETED
@@ -1,51 +0,0 @@
|
|
1 |
-
# coding: utf-8
|
2 |
-
|
3 |
-
"""
|
4 |
-
config dataclass used for inference
|
5 |
-
"""
|
6 |
-
|
7 |
-
import os.path as osp
|
8 |
-
import cv2
|
9 |
-
from numpy import ndarray
|
10 |
-
from dataclasses import dataclass
|
11 |
-
from typing import Literal, Tuple
|
12 |
-
from .base_config import PrintableConfig, make_abs_path
|
13 |
-
|
14 |
-
|
15 |
-
@dataclass(repr=False) # use repr from PrintableConfig
|
16 |
-
class InferenceConfig(PrintableConfig):
|
17 |
-
models_config: str = make_abs_path('./models.yaml') # portrait animation config
|
18 |
-
checkpoint_F: str = make_abs_path('../../pretrained_weights/liveportrait/base_models/appearance_feature_extractor.pth') # path to checkpoint
|
19 |
-
checkpoint_M: str = make_abs_path('../../pretrained_weights/liveportrait/base_models/motion_extractor.pth') # path to checkpoint
|
20 |
-
checkpoint_G: str = make_abs_path('../../pretrained_weights/liveportrait/base_models/spade_generator.pth') # path to checkpoint
|
21 |
-
checkpoint_W: str = make_abs_path('../../pretrained_weights/liveportrait/base_models/warping_module.pth') # path to checkpoint
|
22 |
-
|
23 |
-
checkpoint_S: str = make_abs_path('../../pretrained_weights/liveportrait/retargeting_models/stitching_retargeting_module.pth') # path to checkpoint
|
24 |
-
flag_use_half_precision: bool = True # whether to use half precision
|
25 |
-
|
26 |
-
flag_lip_zero: bool = True # whether let the lip to close state before animation, only take effect when flag_eye_retargeting and flag_lip_retargeting is False
|
27 |
-
lip_zero_threshold: float = 0.03
|
28 |
-
|
29 |
-
flag_eye_retargeting: bool = False
|
30 |
-
flag_lip_retargeting: bool = False
|
31 |
-
flag_stitching: bool = True # we recommend setting it to True!
|
32 |
-
|
33 |
-
flag_relative: bool = True # whether to use relative motion
|
34 |
-
anchor_frame: int = 0 # set this value if find_best_frame is True
|
35 |
-
|
36 |
-
input_shape: Tuple[int, int] = (256, 256) # input shape
|
37 |
-
output_format: Literal['mp4', 'gif'] = 'mp4' # output video format
|
38 |
-
output_fps: int = 30 # fps for output video
|
39 |
-
crf: int = 15 # crf for output video
|
40 |
-
|
41 |
-
flag_write_result: bool = True # whether to write output video
|
42 |
-
flag_pasteback: bool = True # whether to paste-back/stitch the animated face cropping from the face-cropping space to the original image space
|
43 |
-
mask_crop: ndarray = cv2.imread(make_abs_path('../utils/resources/mask_template.png'), cv2.IMREAD_COLOR)
|
44 |
-
flag_write_gif: bool = False
|
45 |
-
size_gif: int = 256
|
46 |
-
ref_max_shape: int = 1280
|
47 |
-
ref_shape_n: int = 2
|
48 |
-
|
49 |
-
device_id: int = 0
|
50 |
-
flag_do_crop: bool = False # whether to crop the source portrait to the face-cropping space
|
51 |
-
flag_do_rot: bool = True # whether to conduct the rotation when flag_do_crop is True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/config/models.yaml
DELETED
@@ -1,43 +0,0 @@
|
|
1 |
-
model_params:
|
2 |
-
appearance_feature_extractor_params: # the F in the paper
|
3 |
-
image_channel: 3
|
4 |
-
block_expansion: 64
|
5 |
-
num_down_blocks: 2
|
6 |
-
max_features: 512
|
7 |
-
reshape_channel: 32
|
8 |
-
reshape_depth: 16
|
9 |
-
num_resblocks: 6
|
10 |
-
motion_extractor_params: # the M in the paper
|
11 |
-
num_kp: 21
|
12 |
-
backbone: convnextv2_tiny
|
13 |
-
warping_module_params: # the W in the paper
|
14 |
-
num_kp: 21
|
15 |
-
block_expansion: 64
|
16 |
-
max_features: 512
|
17 |
-
num_down_blocks: 2
|
18 |
-
reshape_channel: 32
|
19 |
-
estimate_occlusion_map: True
|
20 |
-
dense_motion_params:
|
21 |
-
block_expansion: 32
|
22 |
-
max_features: 1024
|
23 |
-
num_blocks: 5
|
24 |
-
reshape_depth: 16
|
25 |
-
compress: 4
|
26 |
-
spade_generator_params: # the G in the paper
|
27 |
-
upscale: 2 # represents upsample factor 256x256 -> 512x512
|
28 |
-
block_expansion: 64
|
29 |
-
max_features: 512
|
30 |
-
num_down_blocks: 2
|
31 |
-
stitching_retargeting_module_params: # the S in the paper
|
32 |
-
stitching:
|
33 |
-
input_size: 126 # (21*3)*2
|
34 |
-
hidden_sizes: [128, 128, 64]
|
35 |
-
output_size: 65 # (21*3)+2(tx,ty)
|
36 |
-
lip:
|
37 |
-
input_size: 65 # (21*3)+2
|
38 |
-
hidden_sizes: [128, 128, 64]
|
39 |
-
output_size: 63 # (21*3)
|
40 |
-
eye:
|
41 |
-
input_size: 66 # (21*3)+3
|
42 |
-
hidden_sizes: [256, 256, 128, 128, 64]
|
43 |
-
output_size: 63 # (21*3)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/gradio_pipeline.py
DELETED
@@ -1,112 +0,0 @@
|
|
1 |
-
# coding: utf-8
|
2 |
-
|
3 |
-
"""
|
4 |
-
Pipeline for gradio
|
5 |
-
"""
|
6 |
-
import gradio as gr
|
7 |
-
from .config.argument_config import ArgumentConfig
|
8 |
-
from .live_portrait_pipeline import LivePortraitPipeline
|
9 |
-
from .utils.io import load_img_online
|
10 |
-
from .utils.rprint import rlog as log
|
11 |
-
from .utils.crop import prepare_paste_back, paste_back
|
12 |
-
# from .utils.camera import get_rotation_matrix
|
13 |
-
|
14 |
-
def update_args(args, user_args):
|
15 |
-
"""update the args according to user inputs
|
16 |
-
"""
|
17 |
-
for k, v in user_args.items():
|
18 |
-
if hasattr(args, k):
|
19 |
-
setattr(args, k, v)
|
20 |
-
return args
|
21 |
-
|
22 |
-
class GradioPipeline(LivePortraitPipeline):
|
23 |
-
|
24 |
-
def __init__(self, inference_cfg, crop_cfg, args: ArgumentConfig):
|
25 |
-
super().__init__(inference_cfg, crop_cfg)
|
26 |
-
# self.live_portrait_wrapper = self.live_portrait_wrapper
|
27 |
-
self.args = args
|
28 |
-
|
29 |
-
def execute_video(
|
30 |
-
self,
|
31 |
-
input_image_path,
|
32 |
-
input_video_path,
|
33 |
-
flag_relative_input,
|
34 |
-
flag_do_crop_input,
|
35 |
-
flag_remap_input,
|
36 |
-
):
|
37 |
-
""" for video driven potrait animation
|
38 |
-
"""
|
39 |
-
if input_image_path is not None and input_video_path is not None:
|
40 |
-
args_user = {
|
41 |
-
'source_image': input_image_path,
|
42 |
-
'driving_info': input_video_path,
|
43 |
-
'flag_relative': flag_relative_input,
|
44 |
-
'flag_do_crop': flag_do_crop_input,
|
45 |
-
'flag_pasteback': flag_remap_input,
|
46 |
-
}
|
47 |
-
# update config from user input
|
48 |
-
self.args = update_args(self.args, args_user)
|
49 |
-
self.live_portrait_wrapper.update_config(self.args.__dict__)
|
50 |
-
self.cropper.update_config(self.args.__dict__)
|
51 |
-
# video driven animation
|
52 |
-
video_path, video_path_concat = self.execute(self.args)
|
53 |
-
# gr.Info("Run successfully!", duration=2)
|
54 |
-
return video_path, video_path_concat,
|
55 |
-
else:
|
56 |
-
raise gr.Error("Please upload the source portrait and driving video 🤗🤗🤗", duration=5)
|
57 |
-
|
58 |
-
def execute_image(self, input_eye_ratio: float, input_lip_ratio: float, input_image, flag_do_crop = True):
|
59 |
-
""" for single image retargeting
|
60 |
-
"""
|
61 |
-
# disposable feature
|
62 |
-
f_s_user, x_s_user, source_lmk_user, crop_M_c2o, mask_ori, img_rgb = \
|
63 |
-
self.prepare_retargeting(input_image, flag_do_crop)
|
64 |
-
|
65 |
-
if input_eye_ratio is None or input_lip_ratio is None:
|
66 |
-
raise gr.Error("Invalid ratio input 💥!", duration=5)
|
67 |
-
else:
|
68 |
-
x_s_user = x_s_user.to("cuda")
|
69 |
-
f_s_user = f_s_user.to("cuda")
|
70 |
-
# ∆_eyes,i = R_eyes(x_s; c_s,eyes, c_d,eyes,i)
|
71 |
-
combined_eye_ratio_tensor = self.live_portrait_wrapper.calc_combined_eye_ratio([[input_eye_ratio]], source_lmk_user)
|
72 |
-
eyes_delta = self.live_portrait_wrapper.retarget_eye(x_s_user, combined_eye_ratio_tensor)
|
73 |
-
# ∆_lip,i = R_lip(x_s; c_s,lip, c_d,lip,i)
|
74 |
-
combined_lip_ratio_tensor = self.live_portrait_wrapper.calc_combined_lip_ratio([[input_lip_ratio]], source_lmk_user)
|
75 |
-
lip_delta = self.live_portrait_wrapper.retarget_lip(x_s_user, combined_lip_ratio_tensor)
|
76 |
-
num_kp = x_s_user.shape[1]
|
77 |
-
# default: use x_s
|
78 |
-
x_d_new = x_s_user + eyes_delta.reshape(-1, num_kp, 3) + lip_delta.reshape(-1, num_kp, 3)
|
79 |
-
# D(W(f_s; x_s, x′_d))
|
80 |
-
out = self.live_portrait_wrapper.warp_decode(f_s_user, x_s_user, x_d_new)
|
81 |
-
out = self.live_portrait_wrapper.parse_output(out['out'])[0]
|
82 |
-
out_to_ori_blend = paste_back(out, crop_M_c2o, img_rgb, mask_ori)
|
83 |
-
# gr.Info("Run successfully!", duration=2)
|
84 |
-
return out, out_to_ori_blend
|
85 |
-
|
86 |
-
|
87 |
-
def prepare_retargeting(self, input_image, flag_do_crop = True):
|
88 |
-
""" for single image retargeting
|
89 |
-
"""
|
90 |
-
if input_image is not None:
|
91 |
-
# gr.Info("Upload successfully!", duration=2)
|
92 |
-
inference_cfg = self.live_portrait_wrapper.cfg
|
93 |
-
######## process source portrait ########
|
94 |
-
img_rgb = load_img_online(input_image, mode='rgb', max_dim=1280, n=1) # n=1 means do not trim the pixels
|
95 |
-
log(f"Load source image from {input_image}.")
|
96 |
-
crop_info = self.cropper.crop_single_image(img_rgb)
|
97 |
-
if flag_do_crop:
|
98 |
-
I_s = self.live_portrait_wrapper.prepare_source(crop_info['img_crop_256x256'])
|
99 |
-
else:
|
100 |
-
I_s = self.live_portrait_wrapper.prepare_source(img_rgb)
|
101 |
-
x_s_info = self.live_portrait_wrapper.get_kp_info(I_s)
|
102 |
-
# R_s = get_rotation_matrix(x_s_info['pitch'], x_s_info['yaw'], x_s_info['roll'])
|
103 |
-
############################################
|
104 |
-
f_s_user = self.live_portrait_wrapper.extract_feature_3d(I_s)
|
105 |
-
x_s_user = self.live_portrait_wrapper.transform_keypoint(x_s_info)
|
106 |
-
source_lmk_user = crop_info['lmk_crop']
|
107 |
-
crop_M_c2o = crop_info['M_c2o']
|
108 |
-
mask_ori = prepare_paste_back(inference_cfg.mask_crop, crop_info['M_c2o'], dsize=(img_rgb.shape[1], img_rgb.shape[0]))
|
109 |
-
return f_s_user, x_s_user, source_lmk_user, crop_M_c2o, mask_ori, img_rgb
|
110 |
-
else:
|
111 |
-
# when press the clear button, go here
|
112 |
-
raise gr.Error("Please upload a source portrait as the retargeting input 🤗🤗🤗", duration=5)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/live_portrait_pipeline.py
DELETED
@@ -1,209 +0,0 @@
|
|
1 |
-
# coding: utf-8
|
2 |
-
|
3 |
-
"""
|
4 |
-
Pipeline of LivePortrait
|
5 |
-
"""
|
6 |
-
|
7 |
-
import torch
|
8 |
-
torch.backends.cudnn.benchmark = True # disable CUDNN_BACKEND_EXECUTION_PLAN_DESCRIPTOR warning
|
9 |
-
|
10 |
-
import cv2
|
11 |
-
import numpy as np
|
12 |
-
import pickle
|
13 |
-
import os
|
14 |
-
import os.path as osp
|
15 |
-
from rich.progress import track
|
16 |
-
|
17 |
-
from .config.argument_config import ArgumentConfig
|
18 |
-
from .config.inference_config import InferenceConfig
|
19 |
-
from .config.crop_config import CropConfig
|
20 |
-
from .utils.cropper import Cropper
|
21 |
-
from .utils.camera import get_rotation_matrix
|
22 |
-
from .utils.video import images2video, concat_frames, get_fps, add_audio_to_video, has_audio_stream
|
23 |
-
from .utils.crop import _transform_img, prepare_paste_back, paste_back
|
24 |
-
from .utils.retargeting_utils import calc_lip_close_ratio
|
25 |
-
from .utils.io import load_image_rgb, load_driving_info, resize_to_limit
|
26 |
-
from .utils.helper import mkdir, basename, dct2cuda, is_video, is_template
|
27 |
-
from .utils.rprint import rlog as log
|
28 |
-
from .live_portrait_wrapper import LivePortraitWrapper
|
29 |
-
|
30 |
-
|
31 |
-
def make_abs_path(fn):
|
32 |
-
return osp.join(osp.dirname(osp.realpath(__file__)), fn)
|
33 |
-
|
34 |
-
|
35 |
-
class LivePortraitPipeline(object):
|
36 |
-
|
37 |
-
def __init__(self, inference_cfg: InferenceConfig, crop_cfg: CropConfig):
|
38 |
-
self.live_portrait_wrapper: LivePortraitWrapper = LivePortraitWrapper(cfg=inference_cfg)
|
39 |
-
self.cropper = Cropper(crop_cfg=crop_cfg)
|
40 |
-
|
41 |
-
def execute(self, args: ArgumentConfig):
|
42 |
-
inference_cfg = self.live_portrait_wrapper.cfg # for convenience
|
43 |
-
######## process source portrait ########
|
44 |
-
img_rgb = load_image_rgb(args.source_image)
|
45 |
-
img_rgb = resize_to_limit(img_rgb, inference_cfg.ref_max_shape, inference_cfg.ref_shape_n)
|
46 |
-
log(f"Load source image from {args.source_image}")
|
47 |
-
crop_info = self.cropper.crop_single_image(img_rgb)
|
48 |
-
source_lmk = crop_info['lmk_crop']
|
49 |
-
img_crop, img_crop_256x256 = crop_info['img_crop'], crop_info['img_crop_256x256']
|
50 |
-
if inference_cfg.flag_do_crop:
|
51 |
-
I_s = self.live_portrait_wrapper.prepare_source(img_crop_256x256)
|
52 |
-
else:
|
53 |
-
I_s = self.live_portrait_wrapper.prepare_source(img_rgb)
|
54 |
-
x_s_info = self.live_portrait_wrapper.get_kp_info(I_s)
|
55 |
-
x_c_s = x_s_info['kp']
|
56 |
-
R_s = get_rotation_matrix(x_s_info['pitch'], x_s_info['yaw'], x_s_info['roll'])
|
57 |
-
f_s = self.live_portrait_wrapper.extract_feature_3d(I_s)
|
58 |
-
x_s = self.live_portrait_wrapper.transform_keypoint(x_s_info)
|
59 |
-
|
60 |
-
if inference_cfg.flag_lip_zero:
|
61 |
-
# let lip-open scalar to be 0 at first
|
62 |
-
c_d_lip_before_animation = [0.]
|
63 |
-
combined_lip_ratio_tensor_before_animation = self.live_portrait_wrapper.calc_combined_lip_ratio(c_d_lip_before_animation, source_lmk)
|
64 |
-
if combined_lip_ratio_tensor_before_animation[0][0] < inference_cfg.lip_zero_threshold:
|
65 |
-
inference_cfg.flag_lip_zero = False
|
66 |
-
else:
|
67 |
-
lip_delta_before_animation = self.live_portrait_wrapper.retarget_lip(x_s, combined_lip_ratio_tensor_before_animation)
|
68 |
-
############################################
|
69 |
-
|
70 |
-
######## process driving info ########
|
71 |
-
output_fps = 30 # default fps
|
72 |
-
if is_video(args.driving_info):
|
73 |
-
log(f"Load from video file (mp4 mov avi etc...): {args.driving_info}")
|
74 |
-
output_fps = int(get_fps(args.driving_info))
|
75 |
-
log(f'The FPS of {args.driving_info} is: {output_fps}')
|
76 |
-
|
77 |
-
# TODO: 这里track一下驱动视频 -> 构建模板
|
78 |
-
driving_rgb_lst = load_driving_info(args.driving_info)
|
79 |
-
driving_rgb_lst_256 = [cv2.resize(_, (256, 256)) for _ in driving_rgb_lst]
|
80 |
-
I_d_lst = self.live_portrait_wrapper.prepare_driving_videos(driving_rgb_lst_256)
|
81 |
-
n_frames = I_d_lst.shape[0]
|
82 |
-
if inference_cfg.flag_eye_retargeting or inference_cfg.flag_lip_retargeting:
|
83 |
-
driving_lmk_lst = self.cropper.get_retargeting_lmk_info(driving_rgb_lst)
|
84 |
-
input_eye_ratio_lst, input_lip_ratio_lst = self.live_portrait_wrapper.calc_retargeting_ratio(source_lmk, driving_lmk_lst)
|
85 |
-
elif is_template(args.driving_info):
|
86 |
-
log(f"Load from video templates {args.driving_info}")
|
87 |
-
with open(args.driving_info, 'rb') as f:
|
88 |
-
template_lst, driving_lmk_lst = pickle.load(f)
|
89 |
-
n_frames = template_lst[0]['n_frames']
|
90 |
-
input_eye_ratio_lst, input_lip_ratio_lst = self.live_portrait_wrapper.calc_retargeting_ratio(source_lmk, driving_lmk_lst)
|
91 |
-
else:
|
92 |
-
raise Exception("Unsupported driving types!")
|
93 |
-
#########################################
|
94 |
-
|
95 |
-
######## prepare for pasteback ########
|
96 |
-
if inference_cfg.flag_pasteback:
|
97 |
-
mask_ori = prepare_paste_back(inference_cfg.mask_crop, crop_info['M_c2o'], dsize=(img_rgb.shape[1], img_rgb.shape[0]))
|
98 |
-
I_p_paste_lst = []
|
99 |
-
#########################################
|
100 |
-
|
101 |
-
I_p_lst = []
|
102 |
-
R_d_0, x_d_0_info = None, None
|
103 |
-
for i in track(range(n_frames), description='Animating...', total=n_frames):
|
104 |
-
if is_video(args.driving_info):
|
105 |
-
# extract kp info by M
|
106 |
-
I_d_i = I_d_lst[i]
|
107 |
-
x_d_i_info = self.live_portrait_wrapper.get_kp_info(I_d_i)
|
108 |
-
R_d_i = get_rotation_matrix(x_d_i_info['pitch'], x_d_i_info['yaw'], x_d_i_info['roll'])
|
109 |
-
else:
|
110 |
-
# from template
|
111 |
-
x_d_i_info = template_lst[i]
|
112 |
-
x_d_i_info = dct2cuda(x_d_i_info, inference_cfg.device_id)
|
113 |
-
R_d_i = x_d_i_info['R_d']
|
114 |
-
|
115 |
-
if i == 0:
|
116 |
-
R_d_0 = R_d_i
|
117 |
-
x_d_0_info = x_d_i_info
|
118 |
-
|
119 |
-
if inference_cfg.flag_relative:
|
120 |
-
R_new = (R_d_i @ R_d_0.permute(0, 2, 1)) @ R_s
|
121 |
-
delta_new = x_s_info['exp'] + (x_d_i_info['exp'] - x_d_0_info['exp'])
|
122 |
-
scale_new = x_s_info['scale'] * (x_d_i_info['scale'] / x_d_0_info['scale'])
|
123 |
-
t_new = x_s_info['t'] + (x_d_i_info['t'] - x_d_0_info['t'])
|
124 |
-
else:
|
125 |
-
R_new = R_d_i
|
126 |
-
delta_new = x_d_i_info['exp']
|
127 |
-
scale_new = x_s_info['scale']
|
128 |
-
t_new = x_d_i_info['t']
|
129 |
-
|
130 |
-
t_new[..., 2].fill_(0) # zero tz
|
131 |
-
x_d_i_new = scale_new * (x_c_s @ R_new + delta_new) + t_new
|
132 |
-
|
133 |
-
# Algorithm 1:
|
134 |
-
if not inference_cfg.flag_stitching and not inference_cfg.flag_eye_retargeting and not inference_cfg.flag_lip_retargeting:
|
135 |
-
# without stitching or retargeting
|
136 |
-
if inference_cfg.flag_lip_zero:
|
137 |
-
x_d_i_new += lip_delta_before_animation.reshape(-1, x_s.shape[1], 3)
|
138 |
-
else:
|
139 |
-
pass
|
140 |
-
elif inference_cfg.flag_stitching and not inference_cfg.flag_eye_retargeting and not inference_cfg.flag_lip_retargeting:
|
141 |
-
# with stitching and without retargeting
|
142 |
-
if inference_cfg.flag_lip_zero:
|
143 |
-
x_d_i_new = self.live_portrait_wrapper.stitching(x_s, x_d_i_new) + lip_delta_before_animation.reshape(-1, x_s.shape[1], 3)
|
144 |
-
else:
|
145 |
-
x_d_i_new = self.live_portrait_wrapper.stitching(x_s, x_d_i_new)
|
146 |
-
else:
|
147 |
-
eyes_delta, lip_delta = None, None
|
148 |
-
if inference_cfg.flag_eye_retargeting:
|
149 |
-
c_d_eyes_i = input_eye_ratio_lst[i]
|
150 |
-
combined_eye_ratio_tensor = self.live_portrait_wrapper.calc_combined_eye_ratio(c_d_eyes_i, source_lmk)
|
151 |
-
# ∆_eyes,i = R_eyes(x_s; c_s,eyes, c_d,eyes,i)
|
152 |
-
eyes_delta = self.live_portrait_wrapper.retarget_eye(x_s, combined_eye_ratio_tensor)
|
153 |
-
if inference_cfg.flag_lip_retargeting:
|
154 |
-
c_d_lip_i = input_lip_ratio_lst[i]
|
155 |
-
combined_lip_ratio_tensor = self.live_portrait_wrapper.calc_combined_lip_ratio(c_d_lip_i, source_lmk)
|
156 |
-
# ∆_lip,i = R_lip(x_s; c_s,lip, c_d,lip,i)
|
157 |
-
lip_delta = self.live_portrait_wrapper.retarget_lip(x_s, combined_lip_ratio_tensor)
|
158 |
-
|
159 |
-
if inference_cfg.flag_relative: # use x_s
|
160 |
-
x_d_i_new = x_s + \
|
161 |
-
(eyes_delta.reshape(-1, x_s.shape[1], 3) if eyes_delta is not None else 0) + \
|
162 |
-
(lip_delta.reshape(-1, x_s.shape[1], 3) if lip_delta is not None else 0)
|
163 |
-
else: # use x_d,i
|
164 |
-
x_d_i_new = x_d_i_new + \
|
165 |
-
(eyes_delta.reshape(-1, x_s.shape[1], 3) if eyes_delta is not None else 0) + \
|
166 |
-
(lip_delta.reshape(-1, x_s.shape[1], 3) if lip_delta is not None else 0)
|
167 |
-
|
168 |
-
if inference_cfg.flag_stitching:
|
169 |
-
x_d_i_new = self.live_portrait_wrapper.stitching(x_s, x_d_i_new)
|
170 |
-
|
171 |
-
out = self.live_portrait_wrapper.warp_decode(f_s, x_s, x_d_i_new)
|
172 |
-
I_p_i = self.live_portrait_wrapper.parse_output(out['out'])[0]
|
173 |
-
I_p_lst.append(I_p_i)
|
174 |
-
|
175 |
-
if inference_cfg.flag_pasteback:
|
176 |
-
I_p_i_to_ori_blend = paste_back(I_p_i, crop_info['M_c2o'], img_rgb, mask_ori)
|
177 |
-
I_p_paste_lst.append(I_p_i_to_ori_blend)
|
178 |
-
|
179 |
-
mkdir(args.output_dir)
|
180 |
-
wfp_concat = None
|
181 |
-
flag_has_audio = has_audio_stream(args.driving_info)
|
182 |
-
|
183 |
-
if is_video(args.driving_info):
|
184 |
-
frames_concatenated = concat_frames(I_p_lst, driving_rgb_lst, img_crop_256x256)
|
185 |
-
# save (driving frames, source image, drived frames) result
|
186 |
-
wfp_concat = osp.join(args.output_dir, f'{basename(args.source_image)}--{basename(args.driving_info)}_concat.mp4')
|
187 |
-
images2video(frames_concatenated, wfp=wfp_concat, fps=output_fps)
|
188 |
-
if flag_has_audio:
|
189 |
-
# final result with concat
|
190 |
-
wfp_concat_with_audio = osp.join(args.output_dir, f'{basename(args.source_image)}--{basename(args.driving_info)}_concat_with_audio.mp4')
|
191 |
-
add_audio_to_video(wfp_concat, args.driving_info, wfp_concat_with_audio)
|
192 |
-
os.replace(wfp_concat_with_audio, wfp_concat)
|
193 |
-
log(f"Replace {wfp_concat} with {wfp_concat_with_audio}")
|
194 |
-
|
195 |
-
# save drived result
|
196 |
-
wfp = osp.join(args.output_dir, f'{basename(args.source_image)}--{basename(args.driving_info)}.mp4')
|
197 |
-
if inference_cfg.flag_pasteback:
|
198 |
-
images2video(I_p_paste_lst, wfp=wfp, fps=output_fps)
|
199 |
-
else:
|
200 |
-
images2video(I_p_lst, wfp=wfp, fps=output_fps)
|
201 |
-
|
202 |
-
######### build final result #########
|
203 |
-
if flag_has_audio:
|
204 |
-
wfp_with_audio = osp.join(args.output_dir, f'{basename(args.source_image)}--{basename(args.driving_info)}_with_audio.mp4')
|
205 |
-
add_audio_to_video(wfp, args.driving_info, wfp_with_audio)
|
206 |
-
os.replace(wfp_with_audio, wfp)
|
207 |
-
log(f"Replace {wfp} with {wfp_with_audio}")
|
208 |
-
|
209 |
-
return wfp, wfp_concat
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/live_portrait_wrapper.py
DELETED
@@ -1,307 +0,0 @@
|
|
1 |
-
# coding: utf-8
|
2 |
-
|
3 |
-
"""
|
4 |
-
Wrapper for LivePortrait core functions
|
5 |
-
"""
|
6 |
-
|
7 |
-
import os.path as osp
|
8 |
-
import numpy as np
|
9 |
-
import cv2
|
10 |
-
import torch
|
11 |
-
import yaml
|
12 |
-
|
13 |
-
from .utils.timer import Timer
|
14 |
-
from .utils.helper import load_model, concat_feat
|
15 |
-
from .utils.camera import headpose_pred_to_degree, get_rotation_matrix
|
16 |
-
from .utils.retargeting_utils import calc_eye_close_ratio, calc_lip_close_ratio
|
17 |
-
from .config.inference_config import InferenceConfig
|
18 |
-
from .utils.rprint import rlog as log
|
19 |
-
|
20 |
-
|
21 |
-
class LivePortraitWrapper(object):
|
22 |
-
|
23 |
-
def __init__(self, cfg: InferenceConfig):
|
24 |
-
|
25 |
-
model_config = yaml.load(open(cfg.models_config, 'r'), Loader=yaml.SafeLoader)
|
26 |
-
|
27 |
-
# init F
|
28 |
-
self.appearance_feature_extractor = load_model(cfg.checkpoint_F, model_config, cfg.device_id, 'appearance_feature_extractor')
|
29 |
-
log(f'Load appearance_feature_extractor done.')
|
30 |
-
# init M
|
31 |
-
self.motion_extractor = load_model(cfg.checkpoint_M, model_config, cfg.device_id, 'motion_extractor')
|
32 |
-
log(f'Load motion_extractor done.')
|
33 |
-
# init W
|
34 |
-
self.warping_module = load_model(cfg.checkpoint_W, model_config, cfg.device_id, 'warping_module')
|
35 |
-
log(f'Load warping_module done.')
|
36 |
-
# init G
|
37 |
-
self.spade_generator = load_model(cfg.checkpoint_G, model_config, cfg.device_id, 'spade_generator')
|
38 |
-
log(f'Load spade_generator done.')
|
39 |
-
# init S and R
|
40 |
-
if cfg.checkpoint_S is not None and osp.exists(cfg.checkpoint_S):
|
41 |
-
self.stitching_retargeting_module = load_model(cfg.checkpoint_S, model_config, cfg.device_id, 'stitching_retargeting_module')
|
42 |
-
log(f'Load stitching_retargeting_module done.')
|
43 |
-
else:
|
44 |
-
self.stitching_retargeting_module = None
|
45 |
-
|
46 |
-
self.cfg = cfg
|
47 |
-
self.device_id = cfg.device_id
|
48 |
-
self.timer = Timer()
|
49 |
-
|
50 |
-
def update_config(self, user_args):
|
51 |
-
for k, v in user_args.items():
|
52 |
-
if hasattr(self.cfg, k):
|
53 |
-
setattr(self.cfg, k, v)
|
54 |
-
|
55 |
-
def prepare_source(self, img: np.ndarray) -> torch.Tensor:
|
56 |
-
""" construct the input as standard
|
57 |
-
img: HxWx3, uint8, 256x256
|
58 |
-
"""
|
59 |
-
h, w = img.shape[:2]
|
60 |
-
if h != self.cfg.input_shape[0] or w != self.cfg.input_shape[1]:
|
61 |
-
x = cv2.resize(img, (self.cfg.input_shape[0], self.cfg.input_shape[1]))
|
62 |
-
else:
|
63 |
-
x = img.copy()
|
64 |
-
|
65 |
-
if x.ndim == 3:
|
66 |
-
x = x[np.newaxis].astype(np.float32) / 255. # HxWx3 -> 1xHxWx3, normalized to 0~1
|
67 |
-
elif x.ndim == 4:
|
68 |
-
x = x.astype(np.float32) / 255. # BxHxWx3, normalized to 0~1
|
69 |
-
else:
|
70 |
-
raise ValueError(f'img ndim should be 3 or 4: {x.ndim}')
|
71 |
-
x = np.clip(x, 0, 1) # clip to 0~1
|
72 |
-
x = torch.from_numpy(x).permute(0, 3, 1, 2) # 1xHxWx3 -> 1x3xHxW
|
73 |
-
x = x.cuda(self.device_id)
|
74 |
-
return x
|
75 |
-
|
76 |
-
def prepare_driving_videos(self, imgs) -> torch.Tensor:
|
77 |
-
""" construct the input as standard
|
78 |
-
imgs: NxBxHxWx3, uint8
|
79 |
-
"""
|
80 |
-
if isinstance(imgs, list):
|
81 |
-
_imgs = np.array(imgs)[..., np.newaxis] # TxHxWx3x1
|
82 |
-
elif isinstance(imgs, np.ndarray):
|
83 |
-
_imgs = imgs
|
84 |
-
else:
|
85 |
-
raise ValueError(f'imgs type error: {type(imgs)}')
|
86 |
-
|
87 |
-
y = _imgs.astype(np.float32) / 255.
|
88 |
-
y = np.clip(y, 0, 1) # clip to 0~1
|
89 |
-
y = torch.from_numpy(y).permute(0, 4, 3, 1, 2) # TxHxWx3x1 -> Tx1x3xHxW
|
90 |
-
y = y.cuda(self.device_id)
|
91 |
-
|
92 |
-
return y
|
93 |
-
|
94 |
-
def extract_feature_3d(self, x: torch.Tensor) -> torch.Tensor:
|
95 |
-
""" get the appearance feature of the image by F
|
96 |
-
x: Bx3xHxW, normalized to 0~1
|
97 |
-
"""
|
98 |
-
with torch.no_grad():
|
99 |
-
with torch.autocast(device_type='cuda', dtype=torch.float16, enabled=self.cfg.flag_use_half_precision):
|
100 |
-
feature_3d = self.appearance_feature_extractor(x)
|
101 |
-
|
102 |
-
return feature_3d.float()
|
103 |
-
|
104 |
-
def get_kp_info(self, x: torch.Tensor, **kwargs) -> dict:
|
105 |
-
""" get the implicit keypoint information
|
106 |
-
x: Bx3xHxW, normalized to 0~1
|
107 |
-
flag_refine_info: whether to trandform the pose to degrees and the dimention of the reshape
|
108 |
-
return: A dict contains keys: 'pitch', 'yaw', 'roll', 't', 'exp', 'scale', 'kp'
|
109 |
-
"""
|
110 |
-
with torch.no_grad():
|
111 |
-
with torch.autocast(device_type='cuda', dtype=torch.float16, enabled=self.cfg.flag_use_half_precision):
|
112 |
-
kp_info = self.motion_extractor(x)
|
113 |
-
|
114 |
-
if self.cfg.flag_use_half_precision:
|
115 |
-
# float the dict
|
116 |
-
for k, v in kp_info.items():
|
117 |
-
if isinstance(v, torch.Tensor):
|
118 |
-
kp_info[k] = v.float()
|
119 |
-
|
120 |
-
flag_refine_info: bool = kwargs.get('flag_refine_info', True)
|
121 |
-
if flag_refine_info:
|
122 |
-
bs = kp_info['kp'].shape[0]
|
123 |
-
kp_info['pitch'] = headpose_pred_to_degree(kp_info['pitch'])[:, None] # Bx1
|
124 |
-
kp_info['yaw'] = headpose_pred_to_degree(kp_info['yaw'])[:, None] # Bx1
|
125 |
-
kp_info['roll'] = headpose_pred_to_degree(kp_info['roll'])[:, None] # Bx1
|
126 |
-
kp_info['kp'] = kp_info['kp'].reshape(bs, -1, 3) # BxNx3
|
127 |
-
kp_info['exp'] = kp_info['exp'].reshape(bs, -1, 3) # BxNx3
|
128 |
-
|
129 |
-
return kp_info
|
130 |
-
|
131 |
-
def get_pose_dct(self, kp_info: dict) -> dict:
|
132 |
-
pose_dct = dict(
|
133 |
-
pitch=headpose_pred_to_degree(kp_info['pitch']).item(),
|
134 |
-
yaw=headpose_pred_to_degree(kp_info['yaw']).item(),
|
135 |
-
roll=headpose_pred_to_degree(kp_info['roll']).item(),
|
136 |
-
)
|
137 |
-
return pose_dct
|
138 |
-
|
139 |
-
def get_fs_and_kp_info(self, source_prepared, driving_first_frame):
|
140 |
-
|
141 |
-
# get the canonical keypoints of source image by M
|
142 |
-
source_kp_info = self.get_kp_info(source_prepared, flag_refine_info=True)
|
143 |
-
source_rotation = get_rotation_matrix(source_kp_info['pitch'], source_kp_info['yaw'], source_kp_info['roll'])
|
144 |
-
|
145 |
-
# get the canonical keypoints of first driving frame by M
|
146 |
-
driving_first_frame_kp_info = self.get_kp_info(driving_first_frame, flag_refine_info=True)
|
147 |
-
driving_first_frame_rotation = get_rotation_matrix(
|
148 |
-
driving_first_frame_kp_info['pitch'],
|
149 |
-
driving_first_frame_kp_info['yaw'],
|
150 |
-
driving_first_frame_kp_info['roll']
|
151 |
-
)
|
152 |
-
|
153 |
-
# get feature volume by F
|
154 |
-
source_feature_3d = self.extract_feature_3d(source_prepared)
|
155 |
-
|
156 |
-
return source_kp_info, source_rotation, source_feature_3d, driving_first_frame_kp_info, driving_first_frame_rotation
|
157 |
-
|
158 |
-
def transform_keypoint(self, kp_info: dict):
|
159 |
-
"""
|
160 |
-
transform the implicit keypoints with the pose, shift, and expression deformation
|
161 |
-
kp: BxNx3
|
162 |
-
"""
|
163 |
-
kp = kp_info['kp'] # (bs, k, 3)
|
164 |
-
pitch, yaw, roll = kp_info['pitch'], kp_info['yaw'], kp_info['roll']
|
165 |
-
|
166 |
-
t, exp = kp_info['t'], kp_info['exp']
|
167 |
-
scale = kp_info['scale']
|
168 |
-
|
169 |
-
pitch = headpose_pred_to_degree(pitch)
|
170 |
-
yaw = headpose_pred_to_degree(yaw)
|
171 |
-
roll = headpose_pred_to_degree(roll)
|
172 |
-
|
173 |
-
bs = kp.shape[0]
|
174 |
-
if kp.ndim == 2:
|
175 |
-
num_kp = kp.shape[1] // 3 # Bx(num_kpx3)
|
176 |
-
else:
|
177 |
-
num_kp = kp.shape[1] # Bxnum_kpx3
|
178 |
-
|
179 |
-
rot_mat = get_rotation_matrix(pitch, yaw, roll) # (bs, 3, 3)
|
180 |
-
|
181 |
-
# Eqn.2: s * (R * x_c,s + exp) + t
|
182 |
-
kp_transformed = kp.view(bs, num_kp, 3) @ rot_mat + exp.view(bs, num_kp, 3)
|
183 |
-
kp_transformed *= scale[..., None] # (bs, k, 3) * (bs, 1, 1) = (bs, k, 3)
|
184 |
-
kp_transformed[:, :, 0:2] += t[:, None, 0:2] # remove z, only apply tx ty
|
185 |
-
|
186 |
-
return kp_transformed
|
187 |
-
|
188 |
-
def retarget_eye(self, kp_source: torch.Tensor, eye_close_ratio: torch.Tensor) -> torch.Tensor:
|
189 |
-
"""
|
190 |
-
kp_source: BxNx3
|
191 |
-
eye_close_ratio: Bx3
|
192 |
-
Return: Bx(3*num_kp+2)
|
193 |
-
"""
|
194 |
-
feat_eye = concat_feat(kp_source, eye_close_ratio)
|
195 |
-
|
196 |
-
with torch.no_grad():
|
197 |
-
delta = self.stitching_retargeting_module['eye'](feat_eye)
|
198 |
-
|
199 |
-
return delta
|
200 |
-
|
201 |
-
def retarget_lip(self, kp_source: torch.Tensor, lip_close_ratio: torch.Tensor) -> torch.Tensor:
|
202 |
-
"""
|
203 |
-
kp_source: BxNx3
|
204 |
-
lip_close_ratio: Bx2
|
205 |
-
"""
|
206 |
-
feat_lip = concat_feat(kp_source, lip_close_ratio)
|
207 |
-
|
208 |
-
with torch.no_grad():
|
209 |
-
delta = self.stitching_retargeting_module['lip'](feat_lip)
|
210 |
-
|
211 |
-
return delta
|
212 |
-
|
213 |
-
def stitch(self, kp_source: torch.Tensor, kp_driving: torch.Tensor) -> torch.Tensor:
|
214 |
-
"""
|
215 |
-
kp_source: BxNx3
|
216 |
-
kp_driving: BxNx3
|
217 |
-
Return: Bx(3*num_kp+2)
|
218 |
-
"""
|
219 |
-
feat_stiching = concat_feat(kp_source, kp_driving)
|
220 |
-
|
221 |
-
with torch.no_grad():
|
222 |
-
delta = self.stitching_retargeting_module['stitching'](feat_stiching)
|
223 |
-
|
224 |
-
return delta
|
225 |
-
|
226 |
-
def stitching(self, kp_source: torch.Tensor, kp_driving: torch.Tensor) -> torch.Tensor:
|
227 |
-
""" conduct the stitching
|
228 |
-
kp_source: Bxnum_kpx3
|
229 |
-
kp_driving: Bxnum_kpx3
|
230 |
-
"""
|
231 |
-
|
232 |
-
if self.stitching_retargeting_module is not None:
|
233 |
-
|
234 |
-
bs, num_kp = kp_source.shape[:2]
|
235 |
-
|
236 |
-
kp_driving_new = kp_driving.clone()
|
237 |
-
delta = self.stitch(kp_source, kp_driving_new)
|
238 |
-
|
239 |
-
delta_exp = delta[..., :3*num_kp].reshape(bs, num_kp, 3) # 1x20x3
|
240 |
-
delta_tx_ty = delta[..., 3*num_kp:3*num_kp+2].reshape(bs, 1, 2) # 1x1x2
|
241 |
-
|
242 |
-
kp_driving_new += delta_exp
|
243 |
-
kp_driving_new[..., :2] += delta_tx_ty
|
244 |
-
|
245 |
-
return kp_driving_new
|
246 |
-
|
247 |
-
return kp_driving
|
248 |
-
|
249 |
-
def warp_decode(self, feature_3d: torch.Tensor, kp_source: torch.Tensor, kp_driving: torch.Tensor) -> torch.Tensor:
|
250 |
-
""" get the image after the warping of the implicit keypoints
|
251 |
-
feature_3d: Bx32x16x64x64, feature volume
|
252 |
-
kp_source: BxNx3
|
253 |
-
kp_driving: BxNx3
|
254 |
-
"""
|
255 |
-
# The line 18 in Algorithm 1: D(W(f_s; x_s, x′_d,i))
|
256 |
-
with torch.no_grad():
|
257 |
-
with torch.autocast(device_type='cuda', dtype=torch.float16, enabled=self.cfg.flag_use_half_precision):
|
258 |
-
# get decoder input
|
259 |
-
ret_dct = self.warping_module(feature_3d, kp_source=kp_source, kp_driving=kp_driving)
|
260 |
-
# decode
|
261 |
-
ret_dct['out'] = self.spade_generator(feature=ret_dct['out'])
|
262 |
-
|
263 |
-
# float the dict
|
264 |
-
if self.cfg.flag_use_half_precision:
|
265 |
-
for k, v in ret_dct.items():
|
266 |
-
if isinstance(v, torch.Tensor):
|
267 |
-
ret_dct[k] = v.float()
|
268 |
-
|
269 |
-
return ret_dct
|
270 |
-
|
271 |
-
def parse_output(self, out: torch.Tensor) -> np.ndarray:
|
272 |
-
""" construct the output as standard
|
273 |
-
return: 1xHxWx3, uint8
|
274 |
-
"""
|
275 |
-
out = np.transpose(out.data.cpu().numpy(), [0, 2, 3, 1]) # 1x3xHxW -> 1xHxWx3
|
276 |
-
out = np.clip(out, 0, 1) # clip to 0~1
|
277 |
-
out = np.clip(out * 255, 0, 255).astype(np.uint8) # 0~1 -> 0~255
|
278 |
-
|
279 |
-
return out
|
280 |
-
|
281 |
-
def calc_retargeting_ratio(self, source_lmk, driving_lmk_lst):
|
282 |
-
input_eye_ratio_lst = []
|
283 |
-
input_lip_ratio_lst = []
|
284 |
-
for lmk in driving_lmk_lst:
|
285 |
-
# for eyes retargeting
|
286 |
-
input_eye_ratio_lst.append(calc_eye_close_ratio(lmk[None]))
|
287 |
-
# for lip retargeting
|
288 |
-
input_lip_ratio_lst.append(calc_lip_close_ratio(lmk[None]))
|
289 |
-
return input_eye_ratio_lst, input_lip_ratio_lst
|
290 |
-
|
291 |
-
def calc_combined_eye_ratio(self, input_eye_ratio, source_lmk):
|
292 |
-
eye_close_ratio = calc_eye_close_ratio(source_lmk[None])
|
293 |
-
eye_close_ratio_tensor = torch.from_numpy(eye_close_ratio).float().cuda(self.device_id)
|
294 |
-
input_eye_ratio_tensor = torch.Tensor([input_eye_ratio[0][0]]).reshape(1, 1).cuda(self.device_id)
|
295 |
-
# [c_s,eyes, c_d,eyes,i]
|
296 |
-
combined_eye_ratio_tensor = torch.cat([eye_close_ratio_tensor, input_eye_ratio_tensor], dim=1)
|
297 |
-
return combined_eye_ratio_tensor
|
298 |
-
|
299 |
-
def calc_combined_lip_ratio(self, input_lip_ratio, source_lmk):
|
300 |
-
lip_close_ratio = calc_lip_close_ratio(source_lmk[None])
|
301 |
-
lip_close_ratio_tensor = torch.from_numpy(lip_close_ratio).float().cuda(self.device_id)
|
302 |
-
# [c_s,lip, c_d,lip,i]
|
303 |
-
input_lip_ratio_tensor = torch.Tensor([input_lip_ratio[0]]).cuda(self.device_id)
|
304 |
-
if input_lip_ratio_tensor.shape != [1, 1]:
|
305 |
-
input_lip_ratio_tensor = input_lip_ratio_tensor.reshape(1, 1)
|
306 |
-
combined_lip_ratio_tensor = torch.cat([lip_close_ratio_tensor, input_lip_ratio_tensor], dim=1)
|
307 |
-
return combined_lip_ratio_tensor
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/modules/__init__.py
DELETED
File without changes
|
src/modules/appearance_feature_extractor.py
DELETED
@@ -1,48 +0,0 @@
|
|
1 |
-
# coding: utf-8
|
2 |
-
|
3 |
-
"""
|
4 |
-
Appearance extractor(F) defined in paper, which maps the source image s to a 3D appearance feature volume.
|
5 |
-
"""
|
6 |
-
|
7 |
-
import torch
|
8 |
-
from torch import nn
|
9 |
-
from .util import SameBlock2d, DownBlock2d, ResBlock3d
|
10 |
-
|
11 |
-
|
12 |
-
class AppearanceFeatureExtractor(nn.Module):
|
13 |
-
|
14 |
-
def __init__(self, image_channel, block_expansion, num_down_blocks, max_features, reshape_channel, reshape_depth, num_resblocks):
|
15 |
-
super(AppearanceFeatureExtractor, self).__init__()
|
16 |
-
self.image_channel = image_channel
|
17 |
-
self.block_expansion = block_expansion
|
18 |
-
self.num_down_blocks = num_down_blocks
|
19 |
-
self.max_features = max_features
|
20 |
-
self.reshape_channel = reshape_channel
|
21 |
-
self.reshape_depth = reshape_depth
|
22 |
-
|
23 |
-
self.first = SameBlock2d(image_channel, block_expansion, kernel_size=(3, 3), padding=(1, 1))
|
24 |
-
|
25 |
-
down_blocks = []
|
26 |
-
for i in range(num_down_blocks):
|
27 |
-
in_features = min(max_features, block_expansion * (2 ** i))
|
28 |
-
out_features = min(max_features, block_expansion * (2 ** (i + 1)))
|
29 |
-
down_blocks.append(DownBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1)))
|
30 |
-
self.down_blocks = nn.ModuleList(down_blocks)
|
31 |
-
|
32 |
-
self.second = nn.Conv2d(in_channels=out_features, out_channels=max_features, kernel_size=1, stride=1)
|
33 |
-
|
34 |
-
self.resblocks_3d = torch.nn.Sequential()
|
35 |
-
for i in range(num_resblocks):
|
36 |
-
self.resblocks_3d.add_module('3dr' + str(i), ResBlock3d(reshape_channel, kernel_size=3, padding=1))
|
37 |
-
|
38 |
-
def forward(self, source_image):
|
39 |
-
out = self.first(source_image) # Bx3x256x256 -> Bx64x256x256
|
40 |
-
|
41 |
-
for i in range(len(self.down_blocks)):
|
42 |
-
out = self.down_blocks[i](out)
|
43 |
-
out = self.second(out)
|
44 |
-
bs, c, h, w = out.shape # ->Bx512x64x64
|
45 |
-
|
46 |
-
f_s = out.view(bs, self.reshape_channel, self.reshape_depth, h, w) # ->Bx32x16x64x64
|
47 |
-
f_s = self.resblocks_3d(f_s) # ->Bx32x16x64x64
|
48 |
-
return f_s
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/modules/convnextv2.py
DELETED
@@ -1,149 +0,0 @@
|
|
1 |
-
# coding: utf-8
|
2 |
-
|
3 |
-
"""
|
4 |
-
This moudle is adapted to the ConvNeXtV2 version for the extraction of implicit keypoints, poses, and expression deformation.
|
5 |
-
"""
|
6 |
-
|
7 |
-
import torch
|
8 |
-
import torch.nn as nn
|
9 |
-
# from timm.models.layers import trunc_normal_, DropPath
|
10 |
-
from .util import LayerNorm, DropPath, trunc_normal_, GRN
|
11 |
-
|
12 |
-
__all__ = ['convnextv2_tiny']
|
13 |
-
|
14 |
-
|
15 |
-
class Block(nn.Module):
|
16 |
-
""" ConvNeXtV2 Block.
|
17 |
-
|
18 |
-
Args:
|
19 |
-
dim (int): Number of input channels.
|
20 |
-
drop_path (float): Stochastic depth rate. Default: 0.0
|
21 |
-
"""
|
22 |
-
|
23 |
-
def __init__(self, dim, drop_path=0.):
|
24 |
-
super().__init__()
|
25 |
-
self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv
|
26 |
-
self.norm = LayerNorm(dim, eps=1e-6)
|
27 |
-
self.pwconv1 = nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers
|
28 |
-
self.act = nn.GELU()
|
29 |
-
self.grn = GRN(4 * dim)
|
30 |
-
self.pwconv2 = nn.Linear(4 * dim, dim)
|
31 |
-
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
32 |
-
|
33 |
-
def forward(self, x):
|
34 |
-
input = x
|
35 |
-
x = self.dwconv(x)
|
36 |
-
x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
|
37 |
-
x = self.norm(x)
|
38 |
-
x = self.pwconv1(x)
|
39 |
-
x = self.act(x)
|
40 |
-
x = self.grn(x)
|
41 |
-
x = self.pwconv2(x)
|
42 |
-
x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
|
43 |
-
|
44 |
-
x = input + self.drop_path(x)
|
45 |
-
return x
|
46 |
-
|
47 |
-
|
48 |
-
class ConvNeXtV2(nn.Module):
|
49 |
-
""" ConvNeXt V2
|
50 |
-
|
51 |
-
Args:
|
52 |
-
in_chans (int): Number of input image channels. Default: 3
|
53 |
-
num_classes (int): Number of classes for classification head. Default: 1000
|
54 |
-
depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3]
|
55 |
-
dims (int): Feature dimension at each stage. Default: [96, 192, 384, 768]
|
56 |
-
drop_path_rate (float): Stochastic depth rate. Default: 0.
|
57 |
-
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
|
58 |
-
"""
|
59 |
-
|
60 |
-
def __init__(
|
61 |
-
self,
|
62 |
-
in_chans=3,
|
63 |
-
depths=[3, 3, 9, 3],
|
64 |
-
dims=[96, 192, 384, 768],
|
65 |
-
drop_path_rate=0.,
|
66 |
-
**kwargs
|
67 |
-
):
|
68 |
-
super().__init__()
|
69 |
-
self.depths = depths
|
70 |
-
self.downsample_layers = nn.ModuleList() # stem and 3 intermediate downsampling conv layers
|
71 |
-
stem = nn.Sequential(
|
72 |
-
nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4),
|
73 |
-
LayerNorm(dims[0], eps=1e-6, data_format="channels_first")
|
74 |
-
)
|
75 |
-
self.downsample_layers.append(stem)
|
76 |
-
for i in range(3):
|
77 |
-
downsample_layer = nn.Sequential(
|
78 |
-
LayerNorm(dims[i], eps=1e-6, data_format="channels_first"),
|
79 |
-
nn.Conv2d(dims[i], dims[i+1], kernel_size=2, stride=2),
|
80 |
-
)
|
81 |
-
self.downsample_layers.append(downsample_layer)
|
82 |
-
|
83 |
-
self.stages = nn.ModuleList() # 4 feature resolution stages, each consisting of multiple residual blocks
|
84 |
-
dp_rates = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
|
85 |
-
cur = 0
|
86 |
-
for i in range(4):
|
87 |
-
stage = nn.Sequential(
|
88 |
-
*[Block(dim=dims[i], drop_path=dp_rates[cur + j]) for j in range(depths[i])]
|
89 |
-
)
|
90 |
-
self.stages.append(stage)
|
91 |
-
cur += depths[i]
|
92 |
-
|
93 |
-
self.norm = nn.LayerNorm(dims[-1], eps=1e-6) # final norm layer
|
94 |
-
|
95 |
-
# NOTE: the output semantic items
|
96 |
-
num_bins = kwargs.get('num_bins', 66)
|
97 |
-
num_kp = kwargs.get('num_kp', 24) # the number of implicit keypoints
|
98 |
-
self.fc_kp = nn.Linear(dims[-1], 3 * num_kp) # implicit keypoints
|
99 |
-
|
100 |
-
# print('dims[-1]: ', dims[-1])
|
101 |
-
self.fc_scale = nn.Linear(dims[-1], 1) # scale
|
102 |
-
self.fc_pitch = nn.Linear(dims[-1], num_bins) # pitch bins
|
103 |
-
self.fc_yaw = nn.Linear(dims[-1], num_bins) # yaw bins
|
104 |
-
self.fc_roll = nn.Linear(dims[-1], num_bins) # roll bins
|
105 |
-
self.fc_t = nn.Linear(dims[-1], 3) # translation
|
106 |
-
self.fc_exp = nn.Linear(dims[-1], 3 * num_kp) # expression / delta
|
107 |
-
|
108 |
-
def _init_weights(self, m):
|
109 |
-
if isinstance(m, (nn.Conv2d, nn.Linear)):
|
110 |
-
trunc_normal_(m.weight, std=.02)
|
111 |
-
nn.init.constant_(m.bias, 0)
|
112 |
-
|
113 |
-
def forward_features(self, x):
|
114 |
-
for i in range(4):
|
115 |
-
x = self.downsample_layers[i](x)
|
116 |
-
x = self.stages[i](x)
|
117 |
-
return self.norm(x.mean([-2, -1])) # global average pooling, (N, C, H, W) -> (N, C)
|
118 |
-
|
119 |
-
def forward(self, x):
|
120 |
-
x = self.forward_features(x)
|
121 |
-
|
122 |
-
# implicit keypoints
|
123 |
-
kp = self.fc_kp(x)
|
124 |
-
|
125 |
-
# pose and expression deformation
|
126 |
-
pitch = self.fc_pitch(x)
|
127 |
-
yaw = self.fc_yaw(x)
|
128 |
-
roll = self.fc_roll(x)
|
129 |
-
t = self.fc_t(x)
|
130 |
-
exp = self.fc_exp(x)
|
131 |
-
scale = self.fc_scale(x)
|
132 |
-
|
133 |
-
ret_dct = {
|
134 |
-
'pitch': pitch,
|
135 |
-
'yaw': yaw,
|
136 |
-
'roll': roll,
|
137 |
-
't': t,
|
138 |
-
'exp': exp,
|
139 |
-
'scale': scale,
|
140 |
-
|
141 |
-
'kp': kp, # canonical keypoint
|
142 |
-
}
|
143 |
-
|
144 |
-
return ret_dct
|
145 |
-
|
146 |
-
|
147 |
-
def convnextv2_tiny(**kwargs):
|
148 |
-
model = ConvNeXtV2(depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], **kwargs)
|
149 |
-
return model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/modules/dense_motion.py
DELETED
@@ -1,104 +0,0 @@
|
|
1 |
-
# coding: utf-8
|
2 |
-
|
3 |
-
"""
|
4 |
-
The module that predicting a dense motion from sparse motion representation given by kp_source and kp_driving
|
5 |
-
"""
|
6 |
-
|
7 |
-
from torch import nn
|
8 |
-
import torch.nn.functional as F
|
9 |
-
import torch
|
10 |
-
from .util import Hourglass, make_coordinate_grid, kp2gaussian
|
11 |
-
|
12 |
-
|
13 |
-
class DenseMotionNetwork(nn.Module):
|
14 |
-
def __init__(self, block_expansion, num_blocks, max_features, num_kp, feature_channel, reshape_depth, compress, estimate_occlusion_map=True):
|
15 |
-
super(DenseMotionNetwork, self).__init__()
|
16 |
-
self.hourglass = Hourglass(block_expansion=block_expansion, in_features=(num_kp+1)*(compress+1), max_features=max_features, num_blocks=num_blocks) # ~60+G
|
17 |
-
|
18 |
-
self.mask = nn.Conv3d(self.hourglass.out_filters, num_kp + 1, kernel_size=7, padding=3) # 65G! NOTE: computation cost is large
|
19 |
-
self.compress = nn.Conv3d(feature_channel, compress, kernel_size=1) # 0.8G
|
20 |
-
self.norm = nn.BatchNorm3d(compress, affine=True)
|
21 |
-
self.num_kp = num_kp
|
22 |
-
self.flag_estimate_occlusion_map = estimate_occlusion_map
|
23 |
-
|
24 |
-
if self.flag_estimate_occlusion_map:
|
25 |
-
self.occlusion = nn.Conv2d(self.hourglass.out_filters*reshape_depth, 1, kernel_size=7, padding=3)
|
26 |
-
else:
|
27 |
-
self.occlusion = None
|
28 |
-
|
29 |
-
def create_sparse_motions(self, feature, kp_driving, kp_source):
|
30 |
-
bs, _, d, h, w = feature.shape # (bs, 4, 16, 64, 64)
|
31 |
-
identity_grid = make_coordinate_grid((d, h, w), ref=kp_source) # (16, 64, 64, 3)
|
32 |
-
identity_grid = identity_grid.view(1, 1, d, h, w, 3) # (1, 1, d=16, h=64, w=64, 3)
|
33 |
-
coordinate_grid = identity_grid - kp_driving.view(bs, self.num_kp, 1, 1, 1, 3)
|
34 |
-
|
35 |
-
k = coordinate_grid.shape[1]
|
36 |
-
|
37 |
-
# NOTE: there lacks an one-order flow
|
38 |
-
driving_to_source = coordinate_grid + kp_source.view(bs, self.num_kp, 1, 1, 1, 3) # (bs, num_kp, d, h, w, 3)
|
39 |
-
|
40 |
-
# adding background feature
|
41 |
-
identity_grid = identity_grid.repeat(bs, 1, 1, 1, 1, 1)
|
42 |
-
sparse_motions = torch.cat([identity_grid, driving_to_source], dim=1) # (bs, 1+num_kp, d, h, w, 3)
|
43 |
-
return sparse_motions
|
44 |
-
|
45 |
-
def create_deformed_feature(self, feature, sparse_motions):
|
46 |
-
bs, _, d, h, w = feature.shape
|
47 |
-
feature_repeat = feature.unsqueeze(1).unsqueeze(1).repeat(1, self.num_kp+1, 1, 1, 1, 1, 1) # (bs, num_kp+1, 1, c, d, h, w)
|
48 |
-
feature_repeat = feature_repeat.view(bs * (self.num_kp+1), -1, d, h, w) # (bs*(num_kp+1), c, d, h, w)
|
49 |
-
sparse_motions = sparse_motions.view((bs * (self.num_kp+1), d, h, w, -1)) # (bs*(num_kp+1), d, h, w, 3)
|
50 |
-
sparse_deformed = F.grid_sample(feature_repeat, sparse_motions, align_corners=False)
|
51 |
-
sparse_deformed = sparse_deformed.view((bs, self.num_kp+1, -1, d, h, w)) # (bs, num_kp+1, c, d, h, w)
|
52 |
-
|
53 |
-
return sparse_deformed
|
54 |
-
|
55 |
-
def create_heatmap_representations(self, feature, kp_driving, kp_source):
|
56 |
-
spatial_size = feature.shape[3:] # (d=16, h=64, w=64)
|
57 |
-
gaussian_driving = kp2gaussian(kp_driving, spatial_size=spatial_size, kp_variance=0.01) # (bs, num_kp, d, h, w)
|
58 |
-
gaussian_source = kp2gaussian(kp_source, spatial_size=spatial_size, kp_variance=0.01) # (bs, num_kp, d, h, w)
|
59 |
-
heatmap = gaussian_driving - gaussian_source # (bs, num_kp, d, h, w)
|
60 |
-
|
61 |
-
# adding background feature
|
62 |
-
zeros = torch.zeros(heatmap.shape[0], 1, spatial_size[0], spatial_size[1], spatial_size[2]).type(heatmap.type()).to(heatmap.device)
|
63 |
-
heatmap = torch.cat([zeros, heatmap], dim=1)
|
64 |
-
heatmap = heatmap.unsqueeze(2) # (bs, 1+num_kp, 1, d, h, w)
|
65 |
-
return heatmap
|
66 |
-
|
67 |
-
def forward(self, feature, kp_driving, kp_source):
|
68 |
-
bs, _, d, h, w = feature.shape # (bs, 32, 16, 64, 64)
|
69 |
-
|
70 |
-
feature = self.compress(feature) # (bs, 4, 16, 64, 64)
|
71 |
-
feature = self.norm(feature) # (bs, 4, 16, 64, 64)
|
72 |
-
feature = F.relu(feature) # (bs, 4, 16, 64, 64)
|
73 |
-
|
74 |
-
out_dict = dict()
|
75 |
-
|
76 |
-
# 1. deform 3d feature
|
77 |
-
sparse_motion = self.create_sparse_motions(feature, kp_driving, kp_source) # (bs, 1+num_kp, d, h, w, 3)
|
78 |
-
deformed_feature = self.create_deformed_feature(feature, sparse_motion) # (bs, 1+num_kp, c=4, d=16, h=64, w=64)
|
79 |
-
|
80 |
-
# 2. (bs, 1+num_kp, d, h, w)
|
81 |
-
heatmap = self.create_heatmap_representations(deformed_feature, kp_driving, kp_source) # (bs, 1+num_kp, 1, d, h, w)
|
82 |
-
|
83 |
-
input = torch.cat([heatmap, deformed_feature], dim=2) # (bs, 1+num_kp, c=5, d=16, h=64, w=64)
|
84 |
-
input = input.view(bs, -1, d, h, w) # (bs, (1+num_kp)*c=105, d=16, h=64, w=64)
|
85 |
-
|
86 |
-
prediction = self.hourglass(input)
|
87 |
-
|
88 |
-
mask = self.mask(prediction)
|
89 |
-
mask = F.softmax(mask, dim=1) # (bs, 1+num_kp, d=16, h=64, w=64)
|
90 |
-
out_dict['mask'] = mask
|
91 |
-
mask = mask.unsqueeze(2) # (bs, num_kp+1, 1, d, h, w)
|
92 |
-
sparse_motion = sparse_motion.permute(0, 1, 5, 2, 3, 4) # (bs, num_kp+1, 3, d, h, w)
|
93 |
-
deformation = (sparse_motion * mask).sum(dim=1) # (bs, 3, d, h, w) mask take effect in this place
|
94 |
-
deformation = deformation.permute(0, 2, 3, 4, 1) # (bs, d, h, w, 3)
|
95 |
-
|
96 |
-
out_dict['deformation'] = deformation
|
97 |
-
|
98 |
-
if self.flag_estimate_occlusion_map:
|
99 |
-
bs, _, d, h, w = prediction.shape
|
100 |
-
prediction_reshape = prediction.view(bs, -1, h, w)
|
101 |
-
occlusion_map = torch.sigmoid(self.occlusion(prediction_reshape)) # Bx1x64x64
|
102 |
-
out_dict['occlusion_map'] = occlusion_map
|
103 |
-
|
104 |
-
return out_dict
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/modules/motion_extractor.py
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
# coding: utf-8
|
2 |
-
|
3 |
-
"""
|
4 |
-
Motion extractor(M), which directly predicts the canonical keypoints, head pose and expression deformation of the input image
|
5 |
-
"""
|
6 |
-
|
7 |
-
from torch import nn
|
8 |
-
import torch
|
9 |
-
|
10 |
-
from .convnextv2 import convnextv2_tiny
|
11 |
-
from .util import filter_state_dict
|
12 |
-
|
13 |
-
model_dict = {
|
14 |
-
'convnextv2_tiny': convnextv2_tiny,
|
15 |
-
}
|
16 |
-
|
17 |
-
|
18 |
-
class MotionExtractor(nn.Module):
|
19 |
-
def __init__(self, **kwargs):
|
20 |
-
super(MotionExtractor, self).__init__()
|
21 |
-
|
22 |
-
# default is convnextv2_base
|
23 |
-
backbone = kwargs.get('backbone', 'convnextv2_tiny')
|
24 |
-
self.detector = model_dict.get(backbone)(**kwargs)
|
25 |
-
|
26 |
-
def load_pretrained(self, init_path: str):
|
27 |
-
if init_path not in (None, ''):
|
28 |
-
state_dict = torch.load(init_path, map_location=lambda storage, loc: storage)['model']
|
29 |
-
state_dict = filter_state_dict(state_dict, remove_name='head')
|
30 |
-
ret = self.detector.load_state_dict(state_dict, strict=False)
|
31 |
-
print(f'Load pretrained model from {init_path}, ret: {ret}')
|
32 |
-
|
33 |
-
def forward(self, x):
|
34 |
-
out = self.detector(x)
|
35 |
-
return out
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/modules/spade_generator.py
DELETED
@@ -1,59 +0,0 @@
|
|
1 |
-
# coding: utf-8
|
2 |
-
|
3 |
-
"""
|
4 |
-
Spade decoder(G) defined in the paper, which input the warped feature to generate the animated image.
|
5 |
-
"""
|
6 |
-
|
7 |
-
import torch
|
8 |
-
from torch import nn
|
9 |
-
import torch.nn.functional as F
|
10 |
-
from .util import SPADEResnetBlock
|
11 |
-
|
12 |
-
|
13 |
-
class SPADEDecoder(nn.Module):
|
14 |
-
def __init__(self, upscale=1, max_features=256, block_expansion=64, out_channels=64, num_down_blocks=2):
|
15 |
-
for i in range(num_down_blocks):
|
16 |
-
input_channels = min(max_features, block_expansion * (2 ** (i + 1)))
|
17 |
-
self.upscale = upscale
|
18 |
-
super().__init__()
|
19 |
-
norm_G = 'spadespectralinstance'
|
20 |
-
label_num_channels = input_channels # 256
|
21 |
-
|
22 |
-
self.fc = nn.Conv2d(input_channels, 2 * input_channels, 3, padding=1)
|
23 |
-
self.G_middle_0 = SPADEResnetBlock(2 * input_channels, 2 * input_channels, norm_G, label_num_channels)
|
24 |
-
self.G_middle_1 = SPADEResnetBlock(2 * input_channels, 2 * input_channels, norm_G, label_num_channels)
|
25 |
-
self.G_middle_2 = SPADEResnetBlock(2 * input_channels, 2 * input_channels, norm_G, label_num_channels)
|
26 |
-
self.G_middle_3 = SPADEResnetBlock(2 * input_channels, 2 * input_channels, norm_G, label_num_channels)
|
27 |
-
self.G_middle_4 = SPADEResnetBlock(2 * input_channels, 2 * input_channels, norm_G, label_num_channels)
|
28 |
-
self.G_middle_5 = SPADEResnetBlock(2 * input_channels, 2 * input_channels, norm_G, label_num_channels)
|
29 |
-
self.up_0 = SPADEResnetBlock(2 * input_channels, input_channels, norm_G, label_num_channels)
|
30 |
-
self.up_1 = SPADEResnetBlock(input_channels, out_channels, norm_G, label_num_channels)
|
31 |
-
self.up = nn.Upsample(scale_factor=2)
|
32 |
-
|
33 |
-
if self.upscale is None or self.upscale <= 1:
|
34 |
-
self.conv_img = nn.Conv2d(out_channels, 3, 3, padding=1)
|
35 |
-
else:
|
36 |
-
self.conv_img = nn.Sequential(
|
37 |
-
nn.Conv2d(out_channels, 3 * (2 * 2), kernel_size=3, padding=1),
|
38 |
-
nn.PixelShuffle(upscale_factor=2)
|
39 |
-
)
|
40 |
-
|
41 |
-
def forward(self, feature):
|
42 |
-
seg = feature # Bx256x64x64
|
43 |
-
x = self.fc(feature) # Bx512x64x64
|
44 |
-
x = self.G_middle_0(x, seg)
|
45 |
-
x = self.G_middle_1(x, seg)
|
46 |
-
x = self.G_middle_2(x, seg)
|
47 |
-
x = self.G_middle_3(x, seg)
|
48 |
-
x = self.G_middle_4(x, seg)
|
49 |
-
x = self.G_middle_5(x, seg)
|
50 |
-
|
51 |
-
x = self.up(x) # Bx512x64x64 -> Bx512x128x128
|
52 |
-
x = self.up_0(x, seg) # Bx512x128x128 -> Bx256x128x128
|
53 |
-
x = self.up(x) # Bx256x128x128 -> Bx256x256x256
|
54 |
-
x = self.up_1(x, seg) # Bx256x256x256 -> Bx64x256x256
|
55 |
-
|
56 |
-
x = self.conv_img(F.leaky_relu(x, 2e-1)) # Bx64x256x256 -> Bx3xHxW
|
57 |
-
x = torch.sigmoid(x) # Bx3xHxW
|
58 |
-
|
59 |
-
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/modules/stitching_retargeting_network.py
DELETED
@@ -1,38 +0,0 @@
|
|
1 |
-
# coding: utf-8
|
2 |
-
|
3 |
-
"""
|
4 |
-
Stitching module(S) and two retargeting modules(R) defined in the paper.
|
5 |
-
|
6 |
-
- The stitching module pastes the animated portrait back into the original image space without pixel misalignment, such as in
|
7 |
-
the stitching region.
|
8 |
-
|
9 |
-
- The eyes retargeting module is designed to address the issue of incomplete eye closure during cross-id reenactment, especially
|
10 |
-
when a person with small eyes drives a person with larger eyes.
|
11 |
-
|
12 |
-
- The lip retargeting module is designed similarly to the eye retargeting module, and can also normalize the input by ensuring that
|
13 |
-
the lips are in a closed state, which facilitates better animation driving.
|
14 |
-
"""
|
15 |
-
from torch import nn
|
16 |
-
|
17 |
-
|
18 |
-
class StitchingRetargetingNetwork(nn.Module):
|
19 |
-
def __init__(self, input_size, hidden_sizes, output_size):
|
20 |
-
super(StitchingRetargetingNetwork, self).__init__()
|
21 |
-
layers = []
|
22 |
-
for i in range(len(hidden_sizes)):
|
23 |
-
if i == 0:
|
24 |
-
layers.append(nn.Linear(input_size, hidden_sizes[i]))
|
25 |
-
else:
|
26 |
-
layers.append(nn.Linear(hidden_sizes[i - 1], hidden_sizes[i]))
|
27 |
-
layers.append(nn.ReLU(inplace=True))
|
28 |
-
layers.append(nn.Linear(hidden_sizes[-1], output_size))
|
29 |
-
self.mlp = nn.Sequential(*layers)
|
30 |
-
|
31 |
-
def initialize_weights_to_zero(self):
|
32 |
-
for m in self.modules():
|
33 |
-
if isinstance(m, nn.Linear):
|
34 |
-
nn.init.zeros_(m.weight)
|
35 |
-
nn.init.zeros_(m.bias)
|
36 |
-
|
37 |
-
def forward(self, x):
|
38 |
-
return self.mlp(x)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/modules/util.py
DELETED
@@ -1,441 +0,0 @@
|
|
1 |
-
# coding: utf-8
|
2 |
-
|
3 |
-
"""
|
4 |
-
This file defines various neural network modules and utility functions, including convolutional and residual blocks,
|
5 |
-
normalizations, and functions for spatial transformation and tensor manipulation.
|
6 |
-
"""
|
7 |
-
|
8 |
-
from torch import nn
|
9 |
-
import torch.nn.functional as F
|
10 |
-
import torch
|
11 |
-
import torch.nn.utils.spectral_norm as spectral_norm
|
12 |
-
import math
|
13 |
-
import warnings
|
14 |
-
|
15 |
-
|
16 |
-
def kp2gaussian(kp, spatial_size, kp_variance):
|
17 |
-
"""
|
18 |
-
Transform a keypoint into gaussian like representation
|
19 |
-
"""
|
20 |
-
mean = kp
|
21 |
-
|
22 |
-
coordinate_grid = make_coordinate_grid(spatial_size, mean)
|
23 |
-
number_of_leading_dimensions = len(mean.shape) - 1
|
24 |
-
shape = (1,) * number_of_leading_dimensions + coordinate_grid.shape
|
25 |
-
coordinate_grid = coordinate_grid.view(*shape)
|
26 |
-
repeats = mean.shape[:number_of_leading_dimensions] + (1, 1, 1, 1)
|
27 |
-
coordinate_grid = coordinate_grid.repeat(*repeats)
|
28 |
-
|
29 |
-
# Preprocess kp shape
|
30 |
-
shape = mean.shape[:number_of_leading_dimensions] + (1, 1, 1, 3)
|
31 |
-
mean = mean.view(*shape)
|
32 |
-
|
33 |
-
mean_sub = (coordinate_grid - mean)
|
34 |
-
|
35 |
-
out = torch.exp(-0.5 * (mean_sub ** 2).sum(-1) / kp_variance)
|
36 |
-
|
37 |
-
return out
|
38 |
-
|
39 |
-
|
40 |
-
def make_coordinate_grid(spatial_size, ref, **kwargs):
|
41 |
-
d, h, w = spatial_size
|
42 |
-
x = torch.arange(w).type(ref.dtype).to(ref.device)
|
43 |
-
y = torch.arange(h).type(ref.dtype).to(ref.device)
|
44 |
-
z = torch.arange(d).type(ref.dtype).to(ref.device)
|
45 |
-
|
46 |
-
# NOTE: must be right-down-in
|
47 |
-
x = (2 * (x / (w - 1)) - 1) # the x axis faces to the right
|
48 |
-
y = (2 * (y / (h - 1)) - 1) # the y axis faces to the bottom
|
49 |
-
z = (2 * (z / (d - 1)) - 1) # the z axis faces to the inner
|
50 |
-
|
51 |
-
yy = y.view(1, -1, 1).repeat(d, 1, w)
|
52 |
-
xx = x.view(1, 1, -1).repeat(d, h, 1)
|
53 |
-
zz = z.view(-1, 1, 1).repeat(1, h, w)
|
54 |
-
|
55 |
-
meshed = torch.cat([xx.unsqueeze_(3), yy.unsqueeze_(3), zz.unsqueeze_(3)], 3)
|
56 |
-
|
57 |
-
return meshed
|
58 |
-
|
59 |
-
|
60 |
-
class ConvT2d(nn.Module):
|
61 |
-
"""
|
62 |
-
Upsampling block for use in decoder.
|
63 |
-
"""
|
64 |
-
|
65 |
-
def __init__(self, in_features, out_features, kernel_size=3, stride=2, padding=1, output_padding=1):
|
66 |
-
super(ConvT2d, self).__init__()
|
67 |
-
|
68 |
-
self.convT = nn.ConvTranspose2d(in_features, out_features, kernel_size=kernel_size, stride=stride,
|
69 |
-
padding=padding, output_padding=output_padding)
|
70 |
-
self.norm = nn.InstanceNorm2d(out_features)
|
71 |
-
|
72 |
-
def forward(self, x):
|
73 |
-
out = self.convT(x)
|
74 |
-
out = self.norm(out)
|
75 |
-
out = F.leaky_relu(out)
|
76 |
-
return out
|
77 |
-
|
78 |
-
|
79 |
-
class ResBlock3d(nn.Module):
|
80 |
-
"""
|
81 |
-
Res block, preserve spatial resolution.
|
82 |
-
"""
|
83 |
-
|
84 |
-
def __init__(self, in_features, kernel_size, padding):
|
85 |
-
super(ResBlock3d, self).__init__()
|
86 |
-
self.conv1 = nn.Conv3d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size, padding=padding)
|
87 |
-
self.conv2 = nn.Conv3d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size, padding=padding)
|
88 |
-
self.norm1 = nn.BatchNorm3d(in_features, affine=True)
|
89 |
-
self.norm2 = nn.BatchNorm3d(in_features, affine=True)
|
90 |
-
|
91 |
-
def forward(self, x):
|
92 |
-
out = self.norm1(x)
|
93 |
-
out = F.relu(out)
|
94 |
-
out = self.conv1(out)
|
95 |
-
out = self.norm2(out)
|
96 |
-
out = F.relu(out)
|
97 |
-
out = self.conv2(out)
|
98 |
-
out += x
|
99 |
-
return out
|
100 |
-
|
101 |
-
|
102 |
-
class UpBlock3d(nn.Module):
|
103 |
-
"""
|
104 |
-
Upsampling block for use in decoder.
|
105 |
-
"""
|
106 |
-
|
107 |
-
def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):
|
108 |
-
super(UpBlock3d, self).__init__()
|
109 |
-
|
110 |
-
self.conv = nn.Conv3d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,
|
111 |
-
padding=padding, groups=groups)
|
112 |
-
self.norm = nn.BatchNorm3d(out_features, affine=True)
|
113 |
-
|
114 |
-
def forward(self, x):
|
115 |
-
out = F.interpolate(x, scale_factor=(1, 2, 2))
|
116 |
-
out = self.conv(out)
|
117 |
-
out = self.norm(out)
|
118 |
-
out = F.relu(out)
|
119 |
-
return out
|
120 |
-
|
121 |
-
|
122 |
-
class DownBlock2d(nn.Module):
|
123 |
-
"""
|
124 |
-
Downsampling block for use in encoder.
|
125 |
-
"""
|
126 |
-
|
127 |
-
def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):
|
128 |
-
super(DownBlock2d, self).__init__()
|
129 |
-
self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size, padding=padding, groups=groups)
|
130 |
-
self.norm = nn.BatchNorm2d(out_features, affine=True)
|
131 |
-
self.pool = nn.AvgPool2d(kernel_size=(2, 2))
|
132 |
-
|
133 |
-
def forward(self, x):
|
134 |
-
out = self.conv(x)
|
135 |
-
out = self.norm(out)
|
136 |
-
out = F.relu(out)
|
137 |
-
out = self.pool(out)
|
138 |
-
return out
|
139 |
-
|
140 |
-
|
141 |
-
class DownBlock3d(nn.Module):
|
142 |
-
"""
|
143 |
-
Downsampling block for use in encoder.
|
144 |
-
"""
|
145 |
-
|
146 |
-
def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):
|
147 |
-
super(DownBlock3d, self).__init__()
|
148 |
-
'''
|
149 |
-
self.conv = nn.Conv3d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,
|
150 |
-
padding=padding, groups=groups, stride=(1, 2, 2))
|
151 |
-
'''
|
152 |
-
self.conv = nn.Conv3d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,
|
153 |
-
padding=padding, groups=groups)
|
154 |
-
self.norm = nn.BatchNorm3d(out_features, affine=True)
|
155 |
-
self.pool = nn.AvgPool3d(kernel_size=(1, 2, 2))
|
156 |
-
|
157 |
-
def forward(self, x):
|
158 |
-
out = self.conv(x)
|
159 |
-
out = self.norm(out)
|
160 |
-
out = F.relu(out)
|
161 |
-
out = self.pool(out)
|
162 |
-
return out
|
163 |
-
|
164 |
-
|
165 |
-
class SameBlock2d(nn.Module):
|
166 |
-
"""
|
167 |
-
Simple block, preserve spatial resolution.
|
168 |
-
"""
|
169 |
-
|
170 |
-
def __init__(self, in_features, out_features, groups=1, kernel_size=3, padding=1, lrelu=False):
|
171 |
-
super(SameBlock2d, self).__init__()
|
172 |
-
self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size, padding=padding, groups=groups)
|
173 |
-
self.norm = nn.BatchNorm2d(out_features, affine=True)
|
174 |
-
if lrelu:
|
175 |
-
self.ac = nn.LeakyReLU()
|
176 |
-
else:
|
177 |
-
self.ac = nn.ReLU()
|
178 |
-
|
179 |
-
def forward(self, x):
|
180 |
-
out = self.conv(x)
|
181 |
-
out = self.norm(out)
|
182 |
-
out = self.ac(out)
|
183 |
-
return out
|
184 |
-
|
185 |
-
|
186 |
-
class Encoder(nn.Module):
|
187 |
-
"""
|
188 |
-
Hourglass Encoder
|
189 |
-
"""
|
190 |
-
|
191 |
-
def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
|
192 |
-
super(Encoder, self).__init__()
|
193 |
-
|
194 |
-
down_blocks = []
|
195 |
-
for i in range(num_blocks):
|
196 |
-
down_blocks.append(DownBlock3d(in_features if i == 0 else min(max_features, block_expansion * (2 ** i)), min(max_features, block_expansion * (2 ** (i + 1))), kernel_size=3, padding=1))
|
197 |
-
self.down_blocks = nn.ModuleList(down_blocks)
|
198 |
-
|
199 |
-
def forward(self, x):
|
200 |
-
outs = [x]
|
201 |
-
for down_block in self.down_blocks:
|
202 |
-
outs.append(down_block(outs[-1]))
|
203 |
-
return outs
|
204 |
-
|
205 |
-
|
206 |
-
class Decoder(nn.Module):
|
207 |
-
"""
|
208 |
-
Hourglass Decoder
|
209 |
-
"""
|
210 |
-
|
211 |
-
def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
|
212 |
-
super(Decoder, self).__init__()
|
213 |
-
|
214 |
-
up_blocks = []
|
215 |
-
|
216 |
-
for i in range(num_blocks)[::-1]:
|
217 |
-
in_filters = (1 if i == num_blocks - 1 else 2) * min(max_features, block_expansion * (2 ** (i + 1)))
|
218 |
-
out_filters = min(max_features, block_expansion * (2 ** i))
|
219 |
-
up_blocks.append(UpBlock3d(in_filters, out_filters, kernel_size=3, padding=1))
|
220 |
-
|
221 |
-
self.up_blocks = nn.ModuleList(up_blocks)
|
222 |
-
self.out_filters = block_expansion + in_features
|
223 |
-
|
224 |
-
self.conv = nn.Conv3d(in_channels=self.out_filters, out_channels=self.out_filters, kernel_size=3, padding=1)
|
225 |
-
self.norm = nn.BatchNorm3d(self.out_filters, affine=True)
|
226 |
-
|
227 |
-
def forward(self, x):
|
228 |
-
out = x.pop()
|
229 |
-
for up_block in self.up_blocks:
|
230 |
-
out = up_block(out)
|
231 |
-
skip = x.pop()
|
232 |
-
out = torch.cat([out, skip], dim=1)
|
233 |
-
out = self.conv(out)
|
234 |
-
out = self.norm(out)
|
235 |
-
out = F.relu(out)
|
236 |
-
return out
|
237 |
-
|
238 |
-
|
239 |
-
class Hourglass(nn.Module):
|
240 |
-
"""
|
241 |
-
Hourglass architecture.
|
242 |
-
"""
|
243 |
-
|
244 |
-
def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
|
245 |
-
super(Hourglass, self).__init__()
|
246 |
-
self.encoder = Encoder(block_expansion, in_features, num_blocks, max_features)
|
247 |
-
self.decoder = Decoder(block_expansion, in_features, num_blocks, max_features)
|
248 |
-
self.out_filters = self.decoder.out_filters
|
249 |
-
|
250 |
-
def forward(self, x):
|
251 |
-
return self.decoder(self.encoder(x))
|
252 |
-
|
253 |
-
|
254 |
-
class SPADE(nn.Module):
|
255 |
-
def __init__(self, norm_nc, label_nc):
|
256 |
-
super().__init__()
|
257 |
-
|
258 |
-
self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False)
|
259 |
-
nhidden = 128
|
260 |
-
|
261 |
-
self.mlp_shared = nn.Sequential(
|
262 |
-
nn.Conv2d(label_nc, nhidden, kernel_size=3, padding=1),
|
263 |
-
nn.ReLU())
|
264 |
-
self.mlp_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=3, padding=1)
|
265 |
-
self.mlp_beta = nn.Conv2d(nhidden, norm_nc, kernel_size=3, padding=1)
|
266 |
-
|
267 |
-
def forward(self, x, segmap):
|
268 |
-
normalized = self.param_free_norm(x)
|
269 |
-
segmap = F.interpolate(segmap, size=x.size()[2:], mode='nearest')
|
270 |
-
actv = self.mlp_shared(segmap)
|
271 |
-
gamma = self.mlp_gamma(actv)
|
272 |
-
beta = self.mlp_beta(actv)
|
273 |
-
out = normalized * (1 + gamma) + beta
|
274 |
-
return out
|
275 |
-
|
276 |
-
|
277 |
-
class SPADEResnetBlock(nn.Module):
|
278 |
-
def __init__(self, fin, fout, norm_G, label_nc, use_se=False, dilation=1):
|
279 |
-
super().__init__()
|
280 |
-
# Attributes
|
281 |
-
self.learned_shortcut = (fin != fout)
|
282 |
-
fmiddle = min(fin, fout)
|
283 |
-
self.use_se = use_se
|
284 |
-
# create conv layers
|
285 |
-
self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=dilation, dilation=dilation)
|
286 |
-
self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=dilation, dilation=dilation)
|
287 |
-
if self.learned_shortcut:
|
288 |
-
self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False)
|
289 |
-
# apply spectral norm if specified
|
290 |
-
if 'spectral' in norm_G:
|
291 |
-
self.conv_0 = spectral_norm(self.conv_0)
|
292 |
-
self.conv_1 = spectral_norm(self.conv_1)
|
293 |
-
if self.learned_shortcut:
|
294 |
-
self.conv_s = spectral_norm(self.conv_s)
|
295 |
-
# define normalization layers
|
296 |
-
self.norm_0 = SPADE(fin, label_nc)
|
297 |
-
self.norm_1 = SPADE(fmiddle, label_nc)
|
298 |
-
if self.learned_shortcut:
|
299 |
-
self.norm_s = SPADE(fin, label_nc)
|
300 |
-
|
301 |
-
def forward(self, x, seg1):
|
302 |
-
x_s = self.shortcut(x, seg1)
|
303 |
-
dx = self.conv_0(self.actvn(self.norm_0(x, seg1)))
|
304 |
-
dx = self.conv_1(self.actvn(self.norm_1(dx, seg1)))
|
305 |
-
out = x_s + dx
|
306 |
-
return out
|
307 |
-
|
308 |
-
def shortcut(self, x, seg1):
|
309 |
-
if self.learned_shortcut:
|
310 |
-
x_s = self.conv_s(self.norm_s(x, seg1))
|
311 |
-
else:
|
312 |
-
x_s = x
|
313 |
-
return x_s
|
314 |
-
|
315 |
-
def actvn(self, x):
|
316 |
-
return F.leaky_relu(x, 2e-1)
|
317 |
-
|
318 |
-
|
319 |
-
def filter_state_dict(state_dict, remove_name='fc'):
|
320 |
-
new_state_dict = {}
|
321 |
-
for key in state_dict:
|
322 |
-
if remove_name in key:
|
323 |
-
continue
|
324 |
-
new_state_dict[key] = state_dict[key]
|
325 |
-
return new_state_dict
|
326 |
-
|
327 |
-
|
328 |
-
class GRN(nn.Module):
|
329 |
-
""" GRN (Global Response Normalization) layer
|
330 |
-
"""
|
331 |
-
|
332 |
-
def __init__(self, dim):
|
333 |
-
super().__init__()
|
334 |
-
self.gamma = nn.Parameter(torch.zeros(1, 1, 1, dim))
|
335 |
-
self.beta = nn.Parameter(torch.zeros(1, 1, 1, dim))
|
336 |
-
|
337 |
-
def forward(self, x):
|
338 |
-
Gx = torch.norm(x, p=2, dim=(1, 2), keepdim=True)
|
339 |
-
Nx = Gx / (Gx.mean(dim=-1, keepdim=True) + 1e-6)
|
340 |
-
return self.gamma * (x * Nx) + self.beta + x
|
341 |
-
|
342 |
-
|
343 |
-
class LayerNorm(nn.Module):
|
344 |
-
r""" LayerNorm that supports two data formats: channels_last (default) or channels_first.
|
345 |
-
The ordering of the dimensions in the inputs. channels_last corresponds to inputs with
|
346 |
-
shape (batch_size, height, width, channels) while channels_first corresponds to inputs
|
347 |
-
with shape (batch_size, channels, height, width).
|
348 |
-
"""
|
349 |
-
|
350 |
-
def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
|
351 |
-
super().__init__()
|
352 |
-
self.weight = nn.Parameter(torch.ones(normalized_shape))
|
353 |
-
self.bias = nn.Parameter(torch.zeros(normalized_shape))
|
354 |
-
self.eps = eps
|
355 |
-
self.data_format = data_format
|
356 |
-
if self.data_format not in ["channels_last", "channels_first"]:
|
357 |
-
raise NotImplementedError
|
358 |
-
self.normalized_shape = (normalized_shape, )
|
359 |
-
|
360 |
-
def forward(self, x):
|
361 |
-
if self.data_format == "channels_last":
|
362 |
-
return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
|
363 |
-
elif self.data_format == "channels_first":
|
364 |
-
u = x.mean(1, keepdim=True)
|
365 |
-
s = (x - u).pow(2).mean(1, keepdim=True)
|
366 |
-
x = (x - u) / torch.sqrt(s + self.eps)
|
367 |
-
x = self.weight[:, None, None] * x + self.bias[:, None, None]
|
368 |
-
return x
|
369 |
-
|
370 |
-
|
371 |
-
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
|
372 |
-
# Cut & paste from PyTorch official master until it's in a few official releases - RW
|
373 |
-
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
|
374 |
-
def norm_cdf(x):
|
375 |
-
# Computes standard normal cumulative distribution function
|
376 |
-
return (1. + math.erf(x / math.sqrt(2.))) / 2.
|
377 |
-
|
378 |
-
if (mean < a - 2 * std) or (mean > b + 2 * std):
|
379 |
-
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
|
380 |
-
"The distribution of values may be incorrect.",
|
381 |
-
stacklevel=2)
|
382 |
-
|
383 |
-
with torch.no_grad():
|
384 |
-
# Values are generated by using a truncated uniform distribution and
|
385 |
-
# then using the inverse CDF for the normal distribution.
|
386 |
-
# Get upper and lower cdf values
|
387 |
-
l = norm_cdf((a - mean) / std)
|
388 |
-
u = norm_cdf((b - mean) / std)
|
389 |
-
|
390 |
-
# Uniformly fill tensor with values from [l, u], then translate to
|
391 |
-
# [2l-1, 2u-1].
|
392 |
-
tensor.uniform_(2 * l - 1, 2 * u - 1)
|
393 |
-
|
394 |
-
# Use inverse cdf transform for normal distribution to get truncated
|
395 |
-
# standard normal
|
396 |
-
tensor.erfinv_()
|
397 |
-
|
398 |
-
# Transform to proper mean, std
|
399 |
-
tensor.mul_(std * math.sqrt(2.))
|
400 |
-
tensor.add_(mean)
|
401 |
-
|
402 |
-
# Clamp to ensure it's in the proper range
|
403 |
-
tensor.clamp_(min=a, max=b)
|
404 |
-
return tensor
|
405 |
-
|
406 |
-
|
407 |
-
def drop_path(x, drop_prob=0., training=False, scale_by_keep=True):
|
408 |
-
""" Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
|
409 |
-
|
410 |
-
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
|
411 |
-
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
|
412 |
-
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
|
413 |
-
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
|
414 |
-
'survival rate' as the argument.
|
415 |
-
|
416 |
-
"""
|
417 |
-
if drop_prob == 0. or not training:
|
418 |
-
return x
|
419 |
-
keep_prob = 1 - drop_prob
|
420 |
-
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
|
421 |
-
random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
|
422 |
-
if keep_prob > 0.0 and scale_by_keep:
|
423 |
-
random_tensor.div_(keep_prob)
|
424 |
-
return x * random_tensor
|
425 |
-
|
426 |
-
|
427 |
-
class DropPath(nn.Module):
|
428 |
-
""" Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
|
429 |
-
"""
|
430 |
-
|
431 |
-
def __init__(self, drop_prob=None, scale_by_keep=True):
|
432 |
-
super(DropPath, self).__init__()
|
433 |
-
self.drop_prob = drop_prob
|
434 |
-
self.scale_by_keep = scale_by_keep
|
435 |
-
|
436 |
-
def forward(self, x):
|
437 |
-
return drop_path(x, self.drop_prob, self.training, self.scale_by_keep)
|
438 |
-
|
439 |
-
|
440 |
-
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
|
441 |
-
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/modules/warping_network.py
DELETED
@@ -1,77 +0,0 @@
|
|
1 |
-
# coding: utf-8
|
2 |
-
|
3 |
-
"""
|
4 |
-
Warping field estimator(W) defined in the paper, which generates a warping field using the implicit
|
5 |
-
keypoint representations x_s and x_d, and employs this flow field to warp the source feature volume f_s.
|
6 |
-
"""
|
7 |
-
|
8 |
-
from torch import nn
|
9 |
-
import torch.nn.functional as F
|
10 |
-
from .util import SameBlock2d
|
11 |
-
from .dense_motion import DenseMotionNetwork
|
12 |
-
|
13 |
-
|
14 |
-
class WarpingNetwork(nn.Module):
|
15 |
-
def __init__(
|
16 |
-
self,
|
17 |
-
num_kp,
|
18 |
-
block_expansion,
|
19 |
-
max_features,
|
20 |
-
num_down_blocks,
|
21 |
-
reshape_channel,
|
22 |
-
estimate_occlusion_map=False,
|
23 |
-
dense_motion_params=None,
|
24 |
-
**kwargs
|
25 |
-
):
|
26 |
-
super(WarpingNetwork, self).__init__()
|
27 |
-
|
28 |
-
self.upscale = kwargs.get('upscale', 1)
|
29 |
-
self.flag_use_occlusion_map = kwargs.get('flag_use_occlusion_map', True)
|
30 |
-
|
31 |
-
if dense_motion_params is not None:
|
32 |
-
self.dense_motion_network = DenseMotionNetwork(
|
33 |
-
num_kp=num_kp,
|
34 |
-
feature_channel=reshape_channel,
|
35 |
-
estimate_occlusion_map=estimate_occlusion_map,
|
36 |
-
**dense_motion_params
|
37 |
-
)
|
38 |
-
else:
|
39 |
-
self.dense_motion_network = None
|
40 |
-
|
41 |
-
self.third = SameBlock2d(max_features, block_expansion * (2 ** num_down_blocks), kernel_size=(3, 3), padding=(1, 1), lrelu=True)
|
42 |
-
self.fourth = nn.Conv2d(in_channels=block_expansion * (2 ** num_down_blocks), out_channels=block_expansion * (2 ** num_down_blocks), kernel_size=1, stride=1)
|
43 |
-
|
44 |
-
self.estimate_occlusion_map = estimate_occlusion_map
|
45 |
-
|
46 |
-
def deform_input(self, inp, deformation):
|
47 |
-
return F.grid_sample(inp, deformation, align_corners=False)
|
48 |
-
|
49 |
-
def forward(self, feature_3d, kp_driving, kp_source):
|
50 |
-
if self.dense_motion_network is not None:
|
51 |
-
# Feature warper, Transforming feature representation according to deformation and occlusion
|
52 |
-
dense_motion = self.dense_motion_network(
|
53 |
-
feature=feature_3d, kp_driving=kp_driving, kp_source=kp_source
|
54 |
-
)
|
55 |
-
if 'occlusion_map' in dense_motion:
|
56 |
-
occlusion_map = dense_motion['occlusion_map'] # Bx1x64x64
|
57 |
-
else:
|
58 |
-
occlusion_map = None
|
59 |
-
|
60 |
-
deformation = dense_motion['deformation'] # Bx16x64x64x3
|
61 |
-
out = self.deform_input(feature_3d, deformation) # Bx32x16x64x64
|
62 |
-
|
63 |
-
bs, c, d, h, w = out.shape # Bx32x16x64x64
|
64 |
-
out = out.view(bs, c * d, h, w) # -> Bx512x64x64
|
65 |
-
out = self.third(out) # -> Bx256x64x64
|
66 |
-
out = self.fourth(out) # -> Bx256x64x64
|
67 |
-
|
68 |
-
if self.flag_use_occlusion_map and (occlusion_map is not None):
|
69 |
-
out = out * occlusion_map
|
70 |
-
|
71 |
-
ret_dct = {
|
72 |
-
'occlusion_map': occlusion_map,
|
73 |
-
'deformation': deformation,
|
74 |
-
'out': out,
|
75 |
-
}
|
76 |
-
|
77 |
-
return ret_dct
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/template_maker.py
DELETED
@@ -1,65 +0,0 @@
|
|
1 |
-
# coding: utf-8
|
2 |
-
|
3 |
-
"""
|
4 |
-
Make video template
|
5 |
-
"""
|
6 |
-
|
7 |
-
import os
|
8 |
-
import cv2
|
9 |
-
import numpy as np
|
10 |
-
import pickle
|
11 |
-
from rich.progress import track
|
12 |
-
from .utils.cropper import Cropper
|
13 |
-
|
14 |
-
from .utils.io import load_driving_info
|
15 |
-
from .utils.camera import get_rotation_matrix
|
16 |
-
from .utils.helper import mkdir, basename
|
17 |
-
from .utils.rprint import rlog as log
|
18 |
-
from .config.crop_config import CropConfig
|
19 |
-
from .config.inference_config import InferenceConfig
|
20 |
-
from .live_portrait_wrapper import LivePortraitWrapper
|
21 |
-
|
22 |
-
class TemplateMaker:
|
23 |
-
|
24 |
-
def __init__(self, inference_cfg: InferenceConfig, crop_cfg: CropConfig):
|
25 |
-
self.live_portrait_wrapper: LivePortraitWrapper = LivePortraitWrapper(cfg=inference_cfg)
|
26 |
-
self.cropper = Cropper(crop_cfg=crop_cfg)
|
27 |
-
|
28 |
-
def make_motion_template(self, video_fp: str, output_path: str, **kwargs):
|
29 |
-
""" make video template (.pkl format)
|
30 |
-
video_fp: driving video file path
|
31 |
-
output_path: where to save the pickle file
|
32 |
-
"""
|
33 |
-
|
34 |
-
driving_rgb_lst = load_driving_info(video_fp)
|
35 |
-
driving_rgb_lst = [cv2.resize(_, (256, 256)) for _ in driving_rgb_lst]
|
36 |
-
driving_lmk_lst = self.cropper.get_retargeting_lmk_info(driving_rgb_lst)
|
37 |
-
I_d_lst = self.live_portrait_wrapper.prepare_driving_videos(driving_rgb_lst)
|
38 |
-
|
39 |
-
n_frames = I_d_lst.shape[0]
|
40 |
-
|
41 |
-
templates = []
|
42 |
-
|
43 |
-
|
44 |
-
for i in track(range(n_frames), description='Making templates...', total=n_frames):
|
45 |
-
I_d_i = I_d_lst[i]
|
46 |
-
x_d_i_info = self.live_portrait_wrapper.get_kp_info(I_d_i)
|
47 |
-
R_d_i = get_rotation_matrix(x_d_i_info['pitch'], x_d_i_info['yaw'], x_d_i_info['roll'])
|
48 |
-
# collect s_d, R_d, δ_d and t_d for inference
|
49 |
-
template_dct = {
|
50 |
-
'n_frames': n_frames,
|
51 |
-
'frames_index': i,
|
52 |
-
}
|
53 |
-
template_dct['scale'] = x_d_i_info['scale'].cpu().numpy().astype(np.float32)
|
54 |
-
template_dct['R_d'] = R_d_i.cpu().numpy().astype(np.float32)
|
55 |
-
template_dct['exp'] = x_d_i_info['exp'].cpu().numpy().astype(np.float32)
|
56 |
-
template_dct['t'] = x_d_i_info['t'].cpu().numpy().astype(np.float32)
|
57 |
-
|
58 |
-
templates.append(template_dct)
|
59 |
-
|
60 |
-
mkdir(output_path)
|
61 |
-
# Save the dictionary as a pickle file
|
62 |
-
pickle_fp = os.path.join(output_path, f'{basename(video_fp)}.pkl')
|
63 |
-
with open(pickle_fp, 'wb') as f:
|
64 |
-
pickle.dump([templates, driving_lmk_lst], f)
|
65 |
-
log(f"Template saved at {pickle_fp}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/utils/__init__.py
DELETED
File without changes
|
src/utils/camera.py
DELETED
@@ -1,75 +0,0 @@
|
|
1 |
-
# coding: utf-8
|
2 |
-
|
3 |
-
"""
|
4 |
-
functions for processing and transforming 3D facial keypoints
|
5 |
-
"""
|
6 |
-
|
7 |
-
import numpy as np
|
8 |
-
import torch
|
9 |
-
import torch.nn.functional as F
|
10 |
-
|
11 |
-
PI = np.pi
|
12 |
-
|
13 |
-
|
14 |
-
def headpose_pred_to_degree(pred):
|
15 |
-
"""
|
16 |
-
pred: (bs, 66) or (bs, 1) or others
|
17 |
-
"""
|
18 |
-
if pred.ndim > 1 and pred.shape[1] == 66:
|
19 |
-
# NOTE: note that the average is modified to 97.5
|
20 |
-
device = pred.device
|
21 |
-
idx_tensor = [idx for idx in range(0, 66)]
|
22 |
-
idx_tensor = torch.FloatTensor(idx_tensor).to(device)
|
23 |
-
pred = F.softmax(pred, dim=1)
|
24 |
-
degree = torch.sum(pred*idx_tensor, axis=1) * 3 - 97.5
|
25 |
-
|
26 |
-
return degree
|
27 |
-
|
28 |
-
return pred
|
29 |
-
|
30 |
-
|
31 |
-
def get_rotation_matrix(pitch_, yaw_, roll_):
|
32 |
-
""" the input is in degree
|
33 |
-
"""
|
34 |
-
# calculate the rotation matrix: vps @ rot
|
35 |
-
|
36 |
-
# transform to radian
|
37 |
-
pitch = pitch_ / 180 * PI
|
38 |
-
yaw = yaw_ / 180 * PI
|
39 |
-
roll = roll_ / 180 * PI
|
40 |
-
|
41 |
-
device = pitch.device
|
42 |
-
|
43 |
-
if pitch.ndim == 1:
|
44 |
-
pitch = pitch.unsqueeze(1)
|
45 |
-
if yaw.ndim == 1:
|
46 |
-
yaw = yaw.unsqueeze(1)
|
47 |
-
if roll.ndim == 1:
|
48 |
-
roll = roll.unsqueeze(1)
|
49 |
-
|
50 |
-
# calculate the euler matrix
|
51 |
-
bs = pitch.shape[0]
|
52 |
-
ones = torch.ones([bs, 1]).to(device)
|
53 |
-
zeros = torch.zeros([bs, 1]).to(device)
|
54 |
-
x, y, z = pitch, yaw, roll
|
55 |
-
|
56 |
-
rot_x = torch.cat([
|
57 |
-
ones, zeros, zeros,
|
58 |
-
zeros, torch.cos(x), -torch.sin(x),
|
59 |
-
zeros, torch.sin(x), torch.cos(x)
|
60 |
-
], dim=1).reshape([bs, 3, 3])
|
61 |
-
|
62 |
-
rot_y = torch.cat([
|
63 |
-
torch.cos(y), zeros, torch.sin(y),
|
64 |
-
zeros, ones, zeros,
|
65 |
-
-torch.sin(y), zeros, torch.cos(y)
|
66 |
-
], dim=1).reshape([bs, 3, 3])
|
67 |
-
|
68 |
-
rot_z = torch.cat([
|
69 |
-
torch.cos(z), -torch.sin(z), zeros,
|
70 |
-
torch.sin(z), torch.cos(z), zeros,
|
71 |
-
zeros, zeros, ones
|
72 |
-
], dim=1).reshape([bs, 3, 3])
|
73 |
-
|
74 |
-
rot = rot_z @ rot_y @ rot_x
|
75 |
-
return rot.permute(0, 2, 1) # transpose
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/utils/crop.py
DELETED
@@ -1,412 +0,0 @@
|
|
1 |
-
# coding: utf-8
|
2 |
-
|
3 |
-
"""
|
4 |
-
cropping function and the related preprocess functions for cropping
|
5 |
-
"""
|
6 |
-
|
7 |
-
import numpy as np
|
8 |
-
import os.path as osp
|
9 |
-
from math import sin, cos, acos, degrees
|
10 |
-
import cv2; cv2.setNumThreads(0); cv2.ocl.setUseOpenCL(False) # NOTE: enforce single thread
|
11 |
-
from .rprint import rprint as print
|
12 |
-
|
13 |
-
DTYPE = np.float32
|
14 |
-
CV2_INTERP = cv2.INTER_LINEAR
|
15 |
-
|
16 |
-
def make_abs_path(fn):
|
17 |
-
return osp.join(osp.dirname(osp.realpath(__file__)), fn)
|
18 |
-
|
19 |
-
def _transform_img(img, M, dsize, flags=CV2_INTERP, borderMode=None):
|
20 |
-
""" conduct similarity or affine transformation to the image, do not do border operation!
|
21 |
-
img:
|
22 |
-
M: 2x3 matrix or 3x3 matrix
|
23 |
-
dsize: target shape (width, height)
|
24 |
-
"""
|
25 |
-
if isinstance(dsize, tuple) or isinstance(dsize, list):
|
26 |
-
_dsize = tuple(dsize)
|
27 |
-
else:
|
28 |
-
_dsize = (dsize, dsize)
|
29 |
-
|
30 |
-
if borderMode is not None:
|
31 |
-
return cv2.warpAffine(img, M[:2, :], dsize=_dsize, flags=flags, borderMode=borderMode, borderValue=(0, 0, 0))
|
32 |
-
else:
|
33 |
-
return cv2.warpAffine(img, M[:2, :], dsize=_dsize, flags=flags)
|
34 |
-
|
35 |
-
|
36 |
-
def _transform_pts(pts, M):
|
37 |
-
""" conduct similarity or affine transformation to the pts
|
38 |
-
pts: Nx2 ndarray
|
39 |
-
M: 2x3 matrix or 3x3 matrix
|
40 |
-
return: Nx2
|
41 |
-
"""
|
42 |
-
return pts @ M[:2, :2].T + M[:2, 2]
|
43 |
-
|
44 |
-
|
45 |
-
def parse_pt2_from_pt101(pt101, use_lip=True):
|
46 |
-
"""
|
47 |
-
parsing the 2 points according to the 101 points, which cancels the roll
|
48 |
-
"""
|
49 |
-
# the former version use the eye center, but it is not robust, now use interpolation
|
50 |
-
pt_left_eye = np.mean(pt101[[39, 42, 45, 48]], axis=0) # left eye center
|
51 |
-
pt_right_eye = np.mean(pt101[[51, 54, 57, 60]], axis=0) # right eye center
|
52 |
-
|
53 |
-
if use_lip:
|
54 |
-
# use lip
|
55 |
-
pt_center_eye = (pt_left_eye + pt_right_eye) / 2
|
56 |
-
pt_center_lip = (pt101[75] + pt101[81]) / 2
|
57 |
-
pt2 = np.stack([pt_center_eye, pt_center_lip], axis=0)
|
58 |
-
else:
|
59 |
-
pt2 = np.stack([pt_left_eye, pt_right_eye], axis=0)
|
60 |
-
return pt2
|
61 |
-
|
62 |
-
|
63 |
-
def parse_pt2_from_pt106(pt106, use_lip=True):
|
64 |
-
"""
|
65 |
-
parsing the 2 points according to the 106 points, which cancels the roll
|
66 |
-
"""
|
67 |
-
pt_left_eye = np.mean(pt106[[33, 35, 40, 39]], axis=0) # left eye center
|
68 |
-
pt_right_eye = np.mean(pt106[[87, 89, 94, 93]], axis=0) # right eye center
|
69 |
-
|
70 |
-
if use_lip:
|
71 |
-
# use lip
|
72 |
-
pt_center_eye = (pt_left_eye + pt_right_eye) / 2
|
73 |
-
pt_center_lip = (pt106[52] + pt106[61]) / 2
|
74 |
-
pt2 = np.stack([pt_center_eye, pt_center_lip], axis=0)
|
75 |
-
else:
|
76 |
-
pt2 = np.stack([pt_left_eye, pt_right_eye], axis=0)
|
77 |
-
return pt2
|
78 |
-
|
79 |
-
|
80 |
-
def parse_pt2_from_pt203(pt203, use_lip=True):
|
81 |
-
"""
|
82 |
-
parsing the 2 points according to the 203 points, which cancels the roll
|
83 |
-
"""
|
84 |
-
pt_left_eye = np.mean(pt203[[0, 6, 12, 18]], axis=0) # left eye center
|
85 |
-
pt_right_eye = np.mean(pt203[[24, 30, 36, 42]], axis=0) # right eye center
|
86 |
-
if use_lip:
|
87 |
-
# use lip
|
88 |
-
pt_center_eye = (pt_left_eye + pt_right_eye) / 2
|
89 |
-
pt_center_lip = (pt203[48] + pt203[66]) / 2
|
90 |
-
pt2 = np.stack([pt_center_eye, pt_center_lip], axis=0)
|
91 |
-
else:
|
92 |
-
pt2 = np.stack([pt_left_eye, pt_right_eye], axis=0)
|
93 |
-
return pt2
|
94 |
-
|
95 |
-
|
96 |
-
def parse_pt2_from_pt68(pt68, use_lip=True):
|
97 |
-
"""
|
98 |
-
parsing the 2 points according to the 68 points, which cancels the roll
|
99 |
-
"""
|
100 |
-
lm_idx = np.array([31, 37, 40, 43, 46, 49, 55], dtype=np.int32) - 1
|
101 |
-
if use_lip:
|
102 |
-
pt5 = np.stack([
|
103 |
-
np.mean(pt68[lm_idx[[1, 2]], :], 0), # left eye
|
104 |
-
np.mean(pt68[lm_idx[[3, 4]], :], 0), # right eye
|
105 |
-
pt68[lm_idx[0], :], # nose
|
106 |
-
pt68[lm_idx[5], :], # lip
|
107 |
-
pt68[lm_idx[6], :] # lip
|
108 |
-
], axis=0)
|
109 |
-
|
110 |
-
pt2 = np.stack([
|
111 |
-
(pt5[0] + pt5[1]) / 2,
|
112 |
-
(pt5[3] + pt5[4]) / 2
|
113 |
-
], axis=0)
|
114 |
-
else:
|
115 |
-
pt2 = np.stack([
|
116 |
-
np.mean(pt68[lm_idx[[1, 2]], :], 0), # left eye
|
117 |
-
np.mean(pt68[lm_idx[[3, 4]], :], 0), # right eye
|
118 |
-
], axis=0)
|
119 |
-
|
120 |
-
return pt2
|
121 |
-
|
122 |
-
|
123 |
-
def parse_pt2_from_pt5(pt5, use_lip=True):
|
124 |
-
"""
|
125 |
-
parsing the 2 points according to the 5 points, which cancels the roll
|
126 |
-
"""
|
127 |
-
if use_lip:
|
128 |
-
pt2 = np.stack([
|
129 |
-
(pt5[0] + pt5[1]) / 2,
|
130 |
-
(pt5[3] + pt5[4]) / 2
|
131 |
-
], axis=0)
|
132 |
-
else:
|
133 |
-
pt2 = np.stack([
|
134 |
-
pt5[0],
|
135 |
-
pt5[1]
|
136 |
-
], axis=0)
|
137 |
-
return pt2
|
138 |
-
|
139 |
-
|
140 |
-
def parse_pt2_from_pt_x(pts, use_lip=True):
|
141 |
-
if pts.shape[0] == 101:
|
142 |
-
pt2 = parse_pt2_from_pt101(pts, use_lip=use_lip)
|
143 |
-
elif pts.shape[0] == 106:
|
144 |
-
pt2 = parse_pt2_from_pt106(pts, use_lip=use_lip)
|
145 |
-
elif pts.shape[0] == 68:
|
146 |
-
pt2 = parse_pt2_from_pt68(pts, use_lip=use_lip)
|
147 |
-
elif pts.shape[0] == 5:
|
148 |
-
pt2 = parse_pt2_from_pt5(pts, use_lip=use_lip)
|
149 |
-
elif pts.shape[0] == 203:
|
150 |
-
pt2 = parse_pt2_from_pt203(pts, use_lip=use_lip)
|
151 |
-
elif pts.shape[0] > 101:
|
152 |
-
# take the first 101 points
|
153 |
-
pt2 = parse_pt2_from_pt101(pts[:101], use_lip=use_lip)
|
154 |
-
else:
|
155 |
-
raise Exception(f'Unknow shape: {pts.shape}')
|
156 |
-
|
157 |
-
if not use_lip:
|
158 |
-
# NOTE: to compile with the latter code, need to rotate the pt2 90 degrees clockwise manually
|
159 |
-
v = pt2[1] - pt2[0]
|
160 |
-
pt2[1, 0] = pt2[0, 0] - v[1]
|
161 |
-
pt2[1, 1] = pt2[0, 1] + v[0]
|
162 |
-
|
163 |
-
return pt2
|
164 |
-
|
165 |
-
|
166 |
-
def parse_rect_from_landmark(
|
167 |
-
pts,
|
168 |
-
scale=1.5,
|
169 |
-
need_square=True,
|
170 |
-
vx_ratio=0,
|
171 |
-
vy_ratio=0,
|
172 |
-
use_deg_flag=False,
|
173 |
-
**kwargs
|
174 |
-
):
|
175 |
-
"""parsing center, size, angle from 101/68/5/x landmarks
|
176 |
-
vx_ratio: the offset ratio along the pupil axis x-axis, multiplied by size
|
177 |
-
vy_ratio: the offset ratio along the pupil axis y-axis, multiplied by size, which is used to contain more forehead area
|
178 |
-
|
179 |
-
judge with pts.shape
|
180 |
-
"""
|
181 |
-
pt2 = parse_pt2_from_pt_x(pts, use_lip=kwargs.get('use_lip', True))
|
182 |
-
|
183 |
-
uy = pt2[1] - pt2[0]
|
184 |
-
l = np.linalg.norm(uy)
|
185 |
-
if l <= 1e-3:
|
186 |
-
uy = np.array([0, 1], dtype=DTYPE)
|
187 |
-
else:
|
188 |
-
uy /= l
|
189 |
-
ux = np.array((uy[1], -uy[0]), dtype=DTYPE)
|
190 |
-
|
191 |
-
# the rotation degree of the x-axis, the clockwise is positive, the counterclockwise is negative (image coordinate system)
|
192 |
-
# print(uy)
|
193 |
-
# print(ux)
|
194 |
-
angle = acos(ux[0])
|
195 |
-
if ux[1] < 0:
|
196 |
-
angle = -angle
|
197 |
-
|
198 |
-
# rotation matrix
|
199 |
-
M = np.array([ux, uy])
|
200 |
-
|
201 |
-
# calculate the size which contains the angle degree of the bbox, and the center
|
202 |
-
center0 = np.mean(pts, axis=0)
|
203 |
-
rpts = (pts - center0) @ M.T # (M @ P.T).T = P @ M.T
|
204 |
-
lt_pt = np.min(rpts, axis=0)
|
205 |
-
rb_pt = np.max(rpts, axis=0)
|
206 |
-
center1 = (lt_pt + rb_pt) / 2
|
207 |
-
|
208 |
-
size = rb_pt - lt_pt
|
209 |
-
if need_square:
|
210 |
-
m = max(size[0], size[1])
|
211 |
-
size[0] = m
|
212 |
-
size[1] = m
|
213 |
-
|
214 |
-
size *= scale # scale size
|
215 |
-
center = center0 + ux * center1[0] + uy * center1[1] # counterclockwise rotation, equivalent to M.T @ center1.T
|
216 |
-
center = center + ux * (vx_ratio * size) + uy * \
|
217 |
-
(vy_ratio * size) # considering the offset in vx and vy direction
|
218 |
-
|
219 |
-
if use_deg_flag:
|
220 |
-
angle = degrees(angle)
|
221 |
-
|
222 |
-
return center, size, angle
|
223 |
-
|
224 |
-
|
225 |
-
def parse_bbox_from_landmark(pts, **kwargs):
|
226 |
-
center, size, angle = parse_rect_from_landmark(pts, **kwargs)
|
227 |
-
cx, cy = center
|
228 |
-
w, h = size
|
229 |
-
|
230 |
-
# calculate the vertex positions before rotation
|
231 |
-
bbox = np.array([
|
232 |
-
[cx-w/2, cy-h/2], # left, top
|
233 |
-
[cx+w/2, cy-h/2],
|
234 |
-
[cx+w/2, cy+h/2], # right, bottom
|
235 |
-
[cx-w/2, cy+h/2]
|
236 |
-
], dtype=DTYPE)
|
237 |
-
|
238 |
-
# construct rotation matrix
|
239 |
-
bbox_rot = bbox.copy()
|
240 |
-
R = np.array([
|
241 |
-
[np.cos(angle), -np.sin(angle)],
|
242 |
-
[np.sin(angle), np.cos(angle)]
|
243 |
-
], dtype=DTYPE)
|
244 |
-
|
245 |
-
# calculate the relative position of each vertex from the rotation center, then rotate these positions, and finally add the coordinates of the rotation center
|
246 |
-
bbox_rot = (bbox_rot - center) @ R.T + center
|
247 |
-
|
248 |
-
return {
|
249 |
-
'center': center, # 2x1
|
250 |
-
'size': size, # scalar
|
251 |
-
'angle': angle, # rad, counterclockwise
|
252 |
-
'bbox': bbox, # 4x2
|
253 |
-
'bbox_rot': bbox_rot, # 4x2
|
254 |
-
}
|
255 |
-
|
256 |
-
|
257 |
-
def crop_image_by_bbox(img, bbox, lmk=None, dsize=512, angle=None, flag_rot=False, **kwargs):
|
258 |
-
left, top, right, bot = bbox
|
259 |
-
if int(right - left) != int(bot - top):
|
260 |
-
print(f'right-left {right-left} != bot-top {bot-top}')
|
261 |
-
size = right - left
|
262 |
-
|
263 |
-
src_center = np.array([(left + right) / 2, (top + bot) / 2], dtype=DTYPE)
|
264 |
-
tgt_center = np.array([dsize / 2, dsize / 2], dtype=DTYPE)
|
265 |
-
|
266 |
-
s = dsize / size # scale
|
267 |
-
if flag_rot and angle is not None:
|
268 |
-
costheta, sintheta = cos(angle), sin(angle)
|
269 |
-
cx, cy = src_center[0], src_center[1] # ori center
|
270 |
-
tcx, tcy = tgt_center[0], tgt_center[1] # target center
|
271 |
-
# need to infer
|
272 |
-
M_o2c = np.array(
|
273 |
-
[[s * costheta, s * sintheta, tcx - s * (costheta * cx + sintheta * cy)],
|
274 |
-
[-s * sintheta, s * costheta, tcy - s * (-sintheta * cx + costheta * cy)]],
|
275 |
-
dtype=DTYPE
|
276 |
-
)
|
277 |
-
else:
|
278 |
-
M_o2c = np.array(
|
279 |
-
[[s, 0, tgt_center[0] - s * src_center[0]],
|
280 |
-
[0, s, tgt_center[1] - s * src_center[1]]],
|
281 |
-
dtype=DTYPE
|
282 |
-
)
|
283 |
-
|
284 |
-
if flag_rot and angle is None:
|
285 |
-
print('angle is None, but flag_rotate is True', style="bold yellow")
|
286 |
-
|
287 |
-
img_crop = _transform_img(img, M_o2c, dsize=dsize, borderMode=kwargs.get('borderMode', None))
|
288 |
-
|
289 |
-
lmk_crop = _transform_pts(lmk, M_o2c) if lmk is not None else None
|
290 |
-
|
291 |
-
M_o2c = np.vstack([M_o2c, np.array([0, 0, 1], dtype=DTYPE)])
|
292 |
-
M_c2o = np.linalg.inv(M_o2c)
|
293 |
-
|
294 |
-
# cv2.imwrite('crop.jpg', img_crop)
|
295 |
-
|
296 |
-
return {
|
297 |
-
'img_crop': img_crop,
|
298 |
-
'lmk_crop': lmk_crop,
|
299 |
-
'M_o2c': M_o2c,
|
300 |
-
'M_c2o': M_c2o,
|
301 |
-
}
|
302 |
-
|
303 |
-
|
304 |
-
def _estimate_similar_transform_from_pts(
|
305 |
-
pts,
|
306 |
-
dsize,
|
307 |
-
scale=1.5,
|
308 |
-
vx_ratio=0,
|
309 |
-
vy_ratio=-0.1,
|
310 |
-
flag_do_rot=True,
|
311 |
-
**kwargs
|
312 |
-
):
|
313 |
-
""" calculate the affine matrix of the cropped image from sparse points, the original image to the cropped image, the inverse is the cropped image to the original image
|
314 |
-
pts: landmark, 101 or 68 points or other points, Nx2
|
315 |
-
scale: the larger scale factor, the smaller face ratio
|
316 |
-
vx_ratio: x shift
|
317 |
-
vy_ratio: y shift, the smaller the y shift, the lower the face region
|
318 |
-
rot_flag: if it is true, conduct correction
|
319 |
-
"""
|
320 |
-
center, size, angle = parse_rect_from_landmark(
|
321 |
-
pts, scale=scale, vx_ratio=vx_ratio, vy_ratio=vy_ratio,
|
322 |
-
use_lip=kwargs.get('use_lip', True)
|
323 |
-
)
|
324 |
-
|
325 |
-
s = dsize / size[0] # scale
|
326 |
-
tgt_center = np.array([dsize / 2, dsize / 2], dtype=DTYPE) # center of dsize
|
327 |
-
|
328 |
-
if flag_do_rot:
|
329 |
-
costheta, sintheta = cos(angle), sin(angle)
|
330 |
-
cx, cy = center[0], center[1] # ori center
|
331 |
-
tcx, tcy = tgt_center[0], tgt_center[1] # target center
|
332 |
-
# need to infer
|
333 |
-
M_INV = np.array(
|
334 |
-
[[s * costheta, s * sintheta, tcx - s * (costheta * cx + sintheta * cy)],
|
335 |
-
[-s * sintheta, s * costheta, tcy - s * (-sintheta * cx + costheta * cy)]],
|
336 |
-
dtype=DTYPE
|
337 |
-
)
|
338 |
-
else:
|
339 |
-
M_INV = np.array(
|
340 |
-
[[s, 0, tgt_center[0] - s * center[0]],
|
341 |
-
[0, s, tgt_center[1] - s * center[1]]],
|
342 |
-
dtype=DTYPE
|
343 |
-
)
|
344 |
-
|
345 |
-
M_INV_H = np.vstack([M_INV, np.array([0, 0, 1])])
|
346 |
-
M = np.linalg.inv(M_INV_H)
|
347 |
-
|
348 |
-
# M_INV is from the original image to the cropped image, M is from the cropped image to the original image
|
349 |
-
return M_INV, M[:2, ...]
|
350 |
-
|
351 |
-
|
352 |
-
def crop_image(img, pts: np.ndarray, **kwargs):
|
353 |
-
dsize = kwargs.get('dsize', 224)
|
354 |
-
scale = kwargs.get('scale', 1.5) # 1.5 | 1.6
|
355 |
-
vy_ratio = kwargs.get('vy_ratio', -0.1) # -0.0625 | -0.1
|
356 |
-
|
357 |
-
M_INV, _ = _estimate_similar_transform_from_pts(
|
358 |
-
pts,
|
359 |
-
dsize=dsize,
|
360 |
-
scale=scale,
|
361 |
-
vy_ratio=vy_ratio,
|
362 |
-
flag_do_rot=kwargs.get('flag_do_rot', True),
|
363 |
-
)
|
364 |
-
|
365 |
-
if img is None:
|
366 |
-
M_INV_H = np.vstack([M_INV, np.array([0, 0, 1], dtype=DTYPE)])
|
367 |
-
M = np.linalg.inv(M_INV_H)
|
368 |
-
ret_dct = {
|
369 |
-
'M': M[:2, ...], # from the original image to the cropped image
|
370 |
-
'M_o2c': M[:2, ...], # from the cropped image to the original image
|
371 |
-
'img_crop': None,
|
372 |
-
'pt_crop': None,
|
373 |
-
}
|
374 |
-
return ret_dct
|
375 |
-
|
376 |
-
img_crop = _transform_img(img, M_INV, dsize) # origin to crop
|
377 |
-
pt_crop = _transform_pts(pts, M_INV)
|
378 |
-
|
379 |
-
M_o2c = np.vstack([M_INV, np.array([0, 0, 1], dtype=DTYPE)])
|
380 |
-
M_c2o = np.linalg.inv(M_o2c)
|
381 |
-
|
382 |
-
ret_dct = {
|
383 |
-
'M_o2c': M_o2c, # from the original image to the cropped image 3x3
|
384 |
-
'M_c2o': M_c2o, # from the cropped image to the original image 3x3
|
385 |
-
'img_crop': img_crop, # the cropped image
|
386 |
-
'pt_crop': pt_crop, # the landmarks of the cropped image
|
387 |
-
}
|
388 |
-
|
389 |
-
return ret_dct
|
390 |
-
|
391 |
-
def average_bbox_lst(bbox_lst):
|
392 |
-
if len(bbox_lst) == 0:
|
393 |
-
return None
|
394 |
-
bbox_arr = np.array(bbox_lst)
|
395 |
-
return np.mean(bbox_arr, axis=0).tolist()
|
396 |
-
|
397 |
-
def prepare_paste_back(mask_crop, crop_M_c2o, dsize):
|
398 |
-
"""prepare mask for later image paste back
|
399 |
-
"""
|
400 |
-
if mask_crop is None:
|
401 |
-
mask_crop = cv2.imread(make_abs_path('./resources/mask_template.png'), cv2.IMREAD_COLOR)
|
402 |
-
mask_ori = _transform_img(mask_crop, crop_M_c2o, dsize)
|
403 |
-
mask_ori = mask_ori.astype(np.float32) / 255.
|
404 |
-
return mask_ori
|
405 |
-
|
406 |
-
def paste_back(image_to_processed, crop_M_c2o, rgb_ori, mask_ori):
|
407 |
-
"""paste back the image
|
408 |
-
"""
|
409 |
-
dsize = (rgb_ori.shape[1], rgb_ori.shape[0])
|
410 |
-
result = _transform_img(image_to_processed, crop_M_c2o, dsize=dsize)
|
411 |
-
result = np.clip(mask_ori * result + (1 - mask_ori) * rgb_ori, 0, 255).astype(np.uint8)
|
412 |
-
return result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/utils/cropper.py
DELETED
@@ -1,145 +0,0 @@
|
|
1 |
-
# coding: utf-8
|
2 |
-
|
3 |
-
import gradio as gr
|
4 |
-
import numpy as np
|
5 |
-
import os.path as osp
|
6 |
-
from typing import List, Union, Tuple
|
7 |
-
from dataclasses import dataclass, field
|
8 |
-
import cv2; cv2.setNumThreads(0); cv2.ocl.setUseOpenCL(False)
|
9 |
-
|
10 |
-
from .landmark_runner import LandmarkRunner
|
11 |
-
from .face_analysis_diy import FaceAnalysisDIY
|
12 |
-
from .helper import prefix
|
13 |
-
from .crop import crop_image, crop_image_by_bbox, parse_bbox_from_landmark, average_bbox_lst
|
14 |
-
from .timer import Timer
|
15 |
-
from .rprint import rlog as log
|
16 |
-
from .io import load_image_rgb
|
17 |
-
from .video import VideoWriter, get_fps, change_video_fps
|
18 |
-
|
19 |
-
|
20 |
-
def make_abs_path(fn):
|
21 |
-
return osp.join(osp.dirname(osp.realpath(__file__)), fn)
|
22 |
-
|
23 |
-
|
24 |
-
@dataclass
|
25 |
-
class Trajectory:
|
26 |
-
start: int = -1 # 起始帧 闭区间
|
27 |
-
end: int = -1 # 结束帧 闭区间
|
28 |
-
lmk_lst: Union[Tuple, List, np.ndarray] = field(default_factory=list) # lmk list
|
29 |
-
bbox_lst: Union[Tuple, List, np.ndarray] = field(default_factory=list) # bbox list
|
30 |
-
frame_rgb_lst: Union[Tuple, List, np.ndarray] = field(default_factory=list) # frame list
|
31 |
-
frame_rgb_crop_lst: Union[Tuple, List, np.ndarray] = field(default_factory=list) # frame crop list
|
32 |
-
|
33 |
-
|
34 |
-
class Cropper(object):
|
35 |
-
def __init__(self, **kwargs) -> None:
|
36 |
-
device_id = kwargs.get('device_id', 0)
|
37 |
-
self.landmark_runner = LandmarkRunner(
|
38 |
-
ckpt_path=make_abs_path('../../pretrained_weights/liveportrait/landmark.onnx'),
|
39 |
-
onnx_provider='cpu',
|
40 |
-
device_id=device_id
|
41 |
-
)
|
42 |
-
self.landmark_runner.warmup()
|
43 |
-
|
44 |
-
self.face_analysis_wrapper = FaceAnalysisDIY(
|
45 |
-
name='buffalo_l',
|
46 |
-
root=make_abs_path('../../pretrained_weights/insightface'),
|
47 |
-
providers=["CPUExecutionProvider"]
|
48 |
-
)
|
49 |
-
self.face_analysis_wrapper.prepare(ctx_id=device_id, det_size=(512, 512))
|
50 |
-
self.face_analysis_wrapper.warmup()
|
51 |
-
|
52 |
-
self.crop_cfg = kwargs.get('crop_cfg', None)
|
53 |
-
|
54 |
-
def update_config(self, user_args):
|
55 |
-
for k, v in user_args.items():
|
56 |
-
if hasattr(self.crop_cfg, k):
|
57 |
-
setattr(self.crop_cfg, k, v)
|
58 |
-
|
59 |
-
def crop_single_image(self, obj, **kwargs):
|
60 |
-
direction = kwargs.get('direction', 'large-small')
|
61 |
-
|
62 |
-
# crop and align a single image
|
63 |
-
if isinstance(obj, str):
|
64 |
-
img_rgb = load_image_rgb(obj)
|
65 |
-
elif isinstance(obj, np.ndarray):
|
66 |
-
img_rgb = obj
|
67 |
-
|
68 |
-
src_face = self.face_analysis_wrapper.get(
|
69 |
-
img_rgb,
|
70 |
-
flag_do_landmark_2d_106=True,
|
71 |
-
direction=direction
|
72 |
-
)
|
73 |
-
|
74 |
-
if len(src_face) == 0:
|
75 |
-
log('No face detected in the source image.')
|
76 |
-
raise gr.Error("No face detected in the source image 💥!", duration=5)
|
77 |
-
raise Exception("No face detected in the source image!")
|
78 |
-
elif len(src_face) > 1:
|
79 |
-
log(f'More than one face detected in the image, only pick one face by rule {direction}.')
|
80 |
-
|
81 |
-
src_face = src_face[0]
|
82 |
-
pts = src_face.landmark_2d_106
|
83 |
-
|
84 |
-
# crop the face
|
85 |
-
ret_dct = crop_image(
|
86 |
-
img_rgb, # ndarray
|
87 |
-
pts, # 106x2 or Nx2
|
88 |
-
dsize=kwargs.get('dsize', 512),
|
89 |
-
scale=kwargs.get('scale', 2.3),
|
90 |
-
vy_ratio=kwargs.get('vy_ratio', -0.15),
|
91 |
-
)
|
92 |
-
# update a 256x256 version for network input or else
|
93 |
-
ret_dct['img_crop_256x256'] = cv2.resize(ret_dct['img_crop'], (256, 256), interpolation=cv2.INTER_AREA)
|
94 |
-
ret_dct['pt_crop_256x256'] = ret_dct['pt_crop'] * 256 / kwargs.get('dsize', 512)
|
95 |
-
|
96 |
-
recon_ret = self.landmark_runner.run(img_rgb, pts)
|
97 |
-
lmk = recon_ret['pts']
|
98 |
-
ret_dct['lmk_crop'] = lmk
|
99 |
-
|
100 |
-
return ret_dct
|
101 |
-
|
102 |
-
def get_retargeting_lmk_info(self, driving_rgb_lst):
|
103 |
-
# TODO: implement a tracking-based version
|
104 |
-
driving_lmk_lst = []
|
105 |
-
for driving_image in driving_rgb_lst:
|
106 |
-
ret_dct = self.crop_single_image(driving_image)
|
107 |
-
driving_lmk_lst.append(ret_dct['lmk_crop'])
|
108 |
-
return driving_lmk_lst
|
109 |
-
|
110 |
-
def make_video_clip(self, driving_rgb_lst, output_path, output_fps=30, **kwargs):
|
111 |
-
trajectory = Trajectory()
|
112 |
-
direction = kwargs.get('direction', 'large-small')
|
113 |
-
for idx, driving_image in enumerate(driving_rgb_lst):
|
114 |
-
if idx == 0 or trajectory.start == -1:
|
115 |
-
src_face = self.face_analysis_wrapper.get(
|
116 |
-
driving_image,
|
117 |
-
flag_do_landmark_2d_106=True,
|
118 |
-
direction=direction
|
119 |
-
)
|
120 |
-
if len(src_face) == 0:
|
121 |
-
# No face detected in the driving_image
|
122 |
-
continue
|
123 |
-
elif len(src_face) > 1:
|
124 |
-
log(f'More than one face detected in the driving frame_{idx}, only pick one face by rule {direction}.')
|
125 |
-
src_face = src_face[0]
|
126 |
-
pts = src_face.landmark_2d_106
|
127 |
-
lmk_203 = self.landmark_runner(driving_image, pts)['pts']
|
128 |
-
trajectory.start, trajectory.end = idx, idx
|
129 |
-
else:
|
130 |
-
lmk_203 = self.face_recon_wrapper(driving_image, trajectory.lmk_lst[-1])['pts']
|
131 |
-
trajectory.end = idx
|
132 |
-
|
133 |
-
trajectory.lmk_lst.append(lmk_203)
|
134 |
-
ret_bbox = parse_bbox_from_landmark(lmk_203, scale=self.crop_cfg.globalscale, vy_ratio=elf.crop_cfg.vy_ratio)['bbox']
|
135 |
-
bbox = [ret_bbox[0, 0], ret_bbox[0, 1], ret_bbox[2, 0], ret_bbox[2, 1]] # 4,
|
136 |
-
trajectory.bbox_lst.append(bbox) # bbox
|
137 |
-
trajectory.frame_rgb_lst.append(driving_image)
|
138 |
-
|
139 |
-
global_bbox = average_bbox_lst(trajectory.bbox_lst)
|
140 |
-
for idx, (frame_rgb, lmk) in enumerate(zip(trajectory.frame_rgb_lst, trajectory.lmk_lst)):
|
141 |
-
ret_dct = crop_image_by_bbox(
|
142 |
-
frame_rgb, global_bbox, lmk=lmk,
|
143 |
-
dsize=self.video_crop_cfg.dsize, flag_rot=self.video_crop_cfg.flag_rot, borderValue=self.video_crop_cfg.borderValue
|
144 |
-
)
|
145 |
-
frame_rgb_crop = ret_dct['img_crop']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/utils/dependencies/insightface/__init__.py
DELETED
@@ -1,20 +0,0 @@
|
|
1 |
-
# coding: utf-8
|
2 |
-
# pylint: disable=wrong-import-position
|
3 |
-
"""InsightFace: A Face Analysis Toolkit."""
|
4 |
-
from __future__ import absolute_import
|
5 |
-
|
6 |
-
try:
|
7 |
-
#import mxnet as mx
|
8 |
-
import onnxruntime
|
9 |
-
except ImportError:
|
10 |
-
raise ImportError(
|
11 |
-
"Unable to import dependency onnxruntime. "
|
12 |
-
)
|
13 |
-
|
14 |
-
__version__ = '0.7.3'
|
15 |
-
|
16 |
-
from . import model_zoo
|
17 |
-
from . import utils
|
18 |
-
from . import app
|
19 |
-
from . import data
|
20 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/utils/dependencies/insightface/app/__init__.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
from .face_analysis import *
|
|
|
|
src/utils/dependencies/insightface/app/common.py
DELETED
@@ -1,49 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
from numpy.linalg import norm as l2norm
|
3 |
-
#from easydict import EasyDict
|
4 |
-
|
5 |
-
class Face(dict):
|
6 |
-
|
7 |
-
def __init__(self, d=None, **kwargs):
|
8 |
-
if d is None:
|
9 |
-
d = {}
|
10 |
-
if kwargs:
|
11 |
-
d.update(**kwargs)
|
12 |
-
for k, v in d.items():
|
13 |
-
setattr(self, k, v)
|
14 |
-
# Class attributes
|
15 |
-
#for k in self.__class__.__dict__.keys():
|
16 |
-
# if not (k.startswith('__') and k.endswith('__')) and not k in ('update', 'pop'):
|
17 |
-
# setattr(self, k, getattr(self, k))
|
18 |
-
|
19 |
-
def __setattr__(self, name, value):
|
20 |
-
if isinstance(value, (list, tuple)):
|
21 |
-
value = [self.__class__(x)
|
22 |
-
if isinstance(x, dict) else x for x in value]
|
23 |
-
elif isinstance(value, dict) and not isinstance(value, self.__class__):
|
24 |
-
value = self.__class__(value)
|
25 |
-
super(Face, self).__setattr__(name, value)
|
26 |
-
super(Face, self).__setitem__(name, value)
|
27 |
-
|
28 |
-
__setitem__ = __setattr__
|
29 |
-
|
30 |
-
def __getattr__(self, name):
|
31 |
-
return None
|
32 |
-
|
33 |
-
@property
|
34 |
-
def embedding_norm(self):
|
35 |
-
if self.embedding is None:
|
36 |
-
return None
|
37 |
-
return l2norm(self.embedding)
|
38 |
-
|
39 |
-
@property
|
40 |
-
def normed_embedding(self):
|
41 |
-
if self.embedding is None:
|
42 |
-
return None
|
43 |
-
return self.embedding / self.embedding_norm
|
44 |
-
|
45 |
-
@property
|
46 |
-
def sex(self):
|
47 |
-
if self.gender is None:
|
48 |
-
return None
|
49 |
-
return 'M' if self.gender==1 else 'F'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/utils/dependencies/insightface/app/face_analysis.py
DELETED
@@ -1,110 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
# @Organization : insightface.ai
|
3 |
-
# @Author : Jia Guo
|
4 |
-
# @Time : 2021-05-04
|
5 |
-
# @Function :
|
6 |
-
|
7 |
-
|
8 |
-
from __future__ import division
|
9 |
-
|
10 |
-
import glob
|
11 |
-
import os.path as osp
|
12 |
-
|
13 |
-
import numpy as np
|
14 |
-
import onnxruntime
|
15 |
-
from numpy.linalg import norm
|
16 |
-
|
17 |
-
from ..model_zoo import model_zoo
|
18 |
-
from ..utils import ensure_available
|
19 |
-
from .common import Face
|
20 |
-
|
21 |
-
|
22 |
-
DEFAULT_MP_NAME = 'buffalo_l'
|
23 |
-
__all__ = ['FaceAnalysis']
|
24 |
-
|
25 |
-
class FaceAnalysis:
|
26 |
-
def __init__(self, name=DEFAULT_MP_NAME, root='~/.insightface', allowed_modules=None, **kwargs):
|
27 |
-
onnxruntime.set_default_logger_severity(3)
|
28 |
-
self.models = {}
|
29 |
-
self.model_dir = ensure_available('models', name, root=root)
|
30 |
-
onnx_files = glob.glob(osp.join(self.model_dir, '*.onnx'))
|
31 |
-
onnx_files = sorted(onnx_files)
|
32 |
-
for onnx_file in onnx_files:
|
33 |
-
model = model_zoo.get_model(onnx_file, **kwargs)
|
34 |
-
if model is None:
|
35 |
-
print('model not recognized:', onnx_file)
|
36 |
-
elif allowed_modules is not None and model.taskname not in allowed_modules:
|
37 |
-
print('model ignore:', onnx_file, model.taskname)
|
38 |
-
del model
|
39 |
-
elif model.taskname not in self.models and (allowed_modules is None or model.taskname in allowed_modules):
|
40 |
-
# print('find model:', onnx_file, model.taskname, model.input_shape, model.input_mean, model.input_std)
|
41 |
-
self.models[model.taskname] = model
|
42 |
-
else:
|
43 |
-
print('duplicated model task type, ignore:', onnx_file, model.taskname)
|
44 |
-
del model
|
45 |
-
assert 'detection' in self.models
|
46 |
-
self.det_model = self.models['detection']
|
47 |
-
|
48 |
-
|
49 |
-
def prepare(self, ctx_id, det_thresh=0.5, det_size=(640, 640)):
|
50 |
-
self.det_thresh = det_thresh
|
51 |
-
assert det_size is not None
|
52 |
-
# print('set det-size:', det_size)
|
53 |
-
self.det_size = det_size
|
54 |
-
for taskname, model in self.models.items():
|
55 |
-
if taskname=='detection':
|
56 |
-
model.prepare(ctx_id, input_size=det_size, det_thresh=det_thresh)
|
57 |
-
else:
|
58 |
-
model.prepare(ctx_id)
|
59 |
-
|
60 |
-
def get(self, img, max_num=0):
|
61 |
-
bboxes, kpss = self.det_model.detect(img,
|
62 |
-
max_num=max_num,
|
63 |
-
metric='default')
|
64 |
-
if bboxes.shape[0] == 0:
|
65 |
-
return []
|
66 |
-
ret = []
|
67 |
-
for i in range(bboxes.shape[0]):
|
68 |
-
bbox = bboxes[i, 0:4]
|
69 |
-
det_score = bboxes[i, 4]
|
70 |
-
kps = None
|
71 |
-
if kpss is not None:
|
72 |
-
kps = kpss[i]
|
73 |
-
face = Face(bbox=bbox, kps=kps, det_score=det_score)
|
74 |
-
for taskname, model in self.models.items():
|
75 |
-
if taskname=='detection':
|
76 |
-
continue
|
77 |
-
model.get(img, face)
|
78 |
-
ret.append(face)
|
79 |
-
return ret
|
80 |
-
|
81 |
-
def draw_on(self, img, faces):
|
82 |
-
import cv2
|
83 |
-
dimg = img.copy()
|
84 |
-
for i in range(len(faces)):
|
85 |
-
face = faces[i]
|
86 |
-
box = face.bbox.astype(np.int)
|
87 |
-
color = (0, 0, 255)
|
88 |
-
cv2.rectangle(dimg, (box[0], box[1]), (box[2], box[3]), color, 2)
|
89 |
-
if face.kps is not None:
|
90 |
-
kps = face.kps.astype(np.int)
|
91 |
-
#print(landmark.shape)
|
92 |
-
for l in range(kps.shape[0]):
|
93 |
-
color = (0, 0, 255)
|
94 |
-
if l == 0 or l == 3:
|
95 |
-
color = (0, 255, 0)
|
96 |
-
cv2.circle(dimg, (kps[l][0], kps[l][1]), 1, color,
|
97 |
-
2)
|
98 |
-
if face.gender is not None and face.age is not None:
|
99 |
-
cv2.putText(dimg,'%s,%d'%(face.sex,face.age), (box[0]-1, box[1]-4),cv2.FONT_HERSHEY_COMPLEX,0.7,(0,255,0),1)
|
100 |
-
|
101 |
-
#for key, value in face.items():
|
102 |
-
# if key.startswith('landmark_3d'):
|
103 |
-
# print(key, value.shape)
|
104 |
-
# print(value[0:10,:])
|
105 |
-
# lmk = np.round(value).astype(np.int)
|
106 |
-
# for l in range(lmk.shape[0]):
|
107 |
-
# color = (255, 0, 0)
|
108 |
-
# cv2.circle(dimg, (lmk[l][0], lmk[l][1]), 1, color,
|
109 |
-
# 2)
|
110 |
-
return dimg
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/utils/dependencies/insightface/data/__init__.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
from .image import get_image
|
2 |
-
from .pickle_object import get_object
|
|
|
|
|
|
src/utils/dependencies/insightface/data/image.py
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
import cv2
|
2 |
-
import os
|
3 |
-
import os.path as osp
|
4 |
-
from pathlib import Path
|
5 |
-
|
6 |
-
class ImageCache:
|
7 |
-
data = {}
|
8 |
-
|
9 |
-
def get_image(name, to_rgb=False):
|
10 |
-
key = (name, to_rgb)
|
11 |
-
if key in ImageCache.data:
|
12 |
-
return ImageCache.data[key]
|
13 |
-
images_dir = osp.join(Path(__file__).parent.absolute(), 'images')
|
14 |
-
ext_names = ['.jpg', '.png', '.jpeg']
|
15 |
-
image_file = None
|
16 |
-
for ext_name in ext_names:
|
17 |
-
_image_file = osp.join(images_dir, "%s%s"%(name, ext_name))
|
18 |
-
if osp.exists(_image_file):
|
19 |
-
image_file = _image_file
|
20 |
-
break
|
21 |
-
assert image_file is not None, '%s not found'%name
|
22 |
-
img = cv2.imread(image_file)
|
23 |
-
if to_rgb:
|
24 |
-
img = img[:,:,::-1]
|
25 |
-
ImageCache.data[key] = img
|
26 |
-
return img
|
27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/utils/dependencies/insightface/data/images/Tom_Hanks_54745.png
DELETED
Binary file (12.1 kB)
|
|
src/utils/dependencies/insightface/data/images/mask_black.jpg
DELETED
Binary file (21.3 kB)
|
|
src/utils/dependencies/insightface/data/images/mask_blue.jpg
DELETED
Binary file (44.7 kB)
|
|
src/utils/dependencies/insightface/data/images/mask_green.jpg
DELETED
Binary file (6.12 kB)
|
|
src/utils/dependencies/insightface/data/images/mask_white.jpg
DELETED
Binary file (78.9 kB)
|
|
src/utils/dependencies/insightface/data/images/t1.jpg
DELETED
Binary file (129 kB)
|
|
src/utils/dependencies/insightface/data/objects/meanshape_68.pkl
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:39ffecf84ba73f0d0d7e49380833ba88713c9fcdec51df4f7ac45a48b8f4cc51
|
3 |
-
size 974
|
|
|
|
|
|
|
|
src/utils/dependencies/insightface/data/pickle_object.py
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
import cv2
|
2 |
-
import os
|
3 |
-
import os.path as osp
|
4 |
-
from pathlib import Path
|
5 |
-
import pickle
|
6 |
-
|
7 |
-
def get_object(name):
|
8 |
-
objects_dir = osp.join(Path(__file__).parent.absolute(), 'objects')
|
9 |
-
if not name.endswith('.pkl'):
|
10 |
-
name = name+".pkl"
|
11 |
-
filepath = osp.join(objects_dir, name)
|
12 |
-
if not osp.exists(filepath):
|
13 |
-
return None
|
14 |
-
with open(filepath, 'rb') as f:
|
15 |
-
obj = pickle.load(f)
|
16 |
-
return obj
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/utils/dependencies/insightface/data/rec_builder.py
DELETED
@@ -1,71 +0,0 @@
|
|
1 |
-
import pickle
|
2 |
-
import numpy as np
|
3 |
-
import os
|
4 |
-
import os.path as osp
|
5 |
-
import sys
|
6 |
-
import mxnet as mx
|
7 |
-
|
8 |
-
|
9 |
-
class RecBuilder():
|
10 |
-
def __init__(self, path, image_size=(112, 112)):
|
11 |
-
self.path = path
|
12 |
-
self.image_size = image_size
|
13 |
-
self.widx = 0
|
14 |
-
self.wlabel = 0
|
15 |
-
self.max_label = -1
|
16 |
-
assert not osp.exists(path), '%s exists' % path
|
17 |
-
os.makedirs(path)
|
18 |
-
self.writer = mx.recordio.MXIndexedRecordIO(os.path.join(path, 'train.idx'),
|
19 |
-
os.path.join(path, 'train.rec'),
|
20 |
-
'w')
|
21 |
-
self.meta = []
|
22 |
-
|
23 |
-
def add(self, imgs):
|
24 |
-
#!!! img should be BGR!!!!
|
25 |
-
#assert label >= 0
|
26 |
-
#assert label > self.last_label
|
27 |
-
assert len(imgs) > 0
|
28 |
-
label = self.wlabel
|
29 |
-
for img in imgs:
|
30 |
-
idx = self.widx
|
31 |
-
image_meta = {'image_index': idx, 'image_classes': [label]}
|
32 |
-
header = mx.recordio.IRHeader(0, label, idx, 0)
|
33 |
-
if isinstance(img, np.ndarray):
|
34 |
-
s = mx.recordio.pack_img(header,img,quality=95,img_fmt='.jpg')
|
35 |
-
else:
|
36 |
-
s = mx.recordio.pack(header, img)
|
37 |
-
self.writer.write_idx(idx, s)
|
38 |
-
self.meta.append(image_meta)
|
39 |
-
self.widx += 1
|
40 |
-
self.max_label = label
|
41 |
-
self.wlabel += 1
|
42 |
-
|
43 |
-
|
44 |
-
def add_image(self, img, label):
|
45 |
-
#!!! img should be BGR!!!!
|
46 |
-
#assert label >= 0
|
47 |
-
#assert label > self.last_label
|
48 |
-
idx = self.widx
|
49 |
-
header = mx.recordio.IRHeader(0, label, idx, 0)
|
50 |
-
if isinstance(label, list):
|
51 |
-
idlabel = label[0]
|
52 |
-
else:
|
53 |
-
idlabel = label
|
54 |
-
image_meta = {'image_index': idx, 'image_classes': [idlabel]}
|
55 |
-
if isinstance(img, np.ndarray):
|
56 |
-
s = mx.recordio.pack_img(header,img,quality=95,img_fmt='.jpg')
|
57 |
-
else:
|
58 |
-
s = mx.recordio.pack(header, img)
|
59 |
-
self.writer.write_idx(idx, s)
|
60 |
-
self.meta.append(image_meta)
|
61 |
-
self.widx += 1
|
62 |
-
self.max_label = max(self.max_label, idlabel)
|
63 |
-
|
64 |
-
def close(self):
|
65 |
-
with open(osp.join(self.path, 'train.meta'), 'wb') as pfile:
|
66 |
-
pickle.dump(self.meta, pfile, protocol=pickle.HIGHEST_PROTOCOL)
|
67 |
-
print('stat:', self.widx, self.wlabel)
|
68 |
-
with open(os.path.join(self.path, 'property'), 'w') as f:
|
69 |
-
f.write("%d,%d,%d\n" % (self.max_label+1, self.image_size[0], self.image_size[1]))
|
70 |
-
f.write("%d\n" % (self.widx))
|
71 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/utils/dependencies/insightface/model_zoo/__init__.py
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
from .model_zoo import get_model
|
2 |
-
from .arcface_onnx import ArcFaceONNX
|
3 |
-
from .retinaface import RetinaFace
|
4 |
-
from .scrfd import SCRFD
|
5 |
-
from .landmark import Landmark
|
6 |
-
from .attribute import Attribute
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/utils/dependencies/insightface/model_zoo/arcface_onnx.py
DELETED
@@ -1,92 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
# @Organization : insightface.ai
|
3 |
-
# @Author : Jia Guo
|
4 |
-
# @Time : 2021-05-04
|
5 |
-
# @Function :
|
6 |
-
|
7 |
-
from __future__ import division
|
8 |
-
import numpy as np
|
9 |
-
import cv2
|
10 |
-
import onnx
|
11 |
-
import onnxruntime
|
12 |
-
from ..utils import face_align
|
13 |
-
|
14 |
-
__all__ = [
|
15 |
-
'ArcFaceONNX',
|
16 |
-
]
|
17 |
-
|
18 |
-
|
19 |
-
class ArcFaceONNX:
|
20 |
-
def __init__(self, model_file=None, session=None):
|
21 |
-
assert model_file is not None
|
22 |
-
self.model_file = model_file
|
23 |
-
self.session = session
|
24 |
-
self.taskname = 'recognition'
|
25 |
-
find_sub = False
|
26 |
-
find_mul = False
|
27 |
-
model = onnx.load(self.model_file)
|
28 |
-
graph = model.graph
|
29 |
-
for nid, node in enumerate(graph.node[:8]):
|
30 |
-
#print(nid, node.name)
|
31 |
-
if node.name.startswith('Sub') or node.name.startswith('_minus'):
|
32 |
-
find_sub = True
|
33 |
-
if node.name.startswith('Mul') or node.name.startswith('_mul'):
|
34 |
-
find_mul = True
|
35 |
-
if find_sub and find_mul:
|
36 |
-
#mxnet arcface model
|
37 |
-
input_mean = 0.0
|
38 |
-
input_std = 1.0
|
39 |
-
else:
|
40 |
-
input_mean = 127.5
|
41 |
-
input_std = 127.5
|
42 |
-
self.input_mean = input_mean
|
43 |
-
self.input_std = input_std
|
44 |
-
#print('input mean and std:', self.input_mean, self.input_std)
|
45 |
-
if self.session is None:
|
46 |
-
self.session = onnxruntime.InferenceSession(self.model_file, None)
|
47 |
-
input_cfg = self.session.get_inputs()[0]
|
48 |
-
input_shape = input_cfg.shape
|
49 |
-
input_name = input_cfg.name
|
50 |
-
self.input_size = tuple(input_shape[2:4][::-1])
|
51 |
-
self.input_shape = input_shape
|
52 |
-
outputs = self.session.get_outputs()
|
53 |
-
output_names = []
|
54 |
-
for out in outputs:
|
55 |
-
output_names.append(out.name)
|
56 |
-
self.input_name = input_name
|
57 |
-
self.output_names = output_names
|
58 |
-
assert len(self.output_names)==1
|
59 |
-
self.output_shape = outputs[0].shape
|
60 |
-
|
61 |
-
def prepare(self, ctx_id, **kwargs):
|
62 |
-
if ctx_id<0:
|
63 |
-
self.session.set_providers(['CPUExecutionProvider'])
|
64 |
-
|
65 |
-
def get(self, img, face):
|
66 |
-
aimg = face_align.norm_crop(img, landmark=face.kps, image_size=self.input_size[0])
|
67 |
-
face.embedding = self.get_feat(aimg).flatten()
|
68 |
-
return face.embedding
|
69 |
-
|
70 |
-
def compute_sim(self, feat1, feat2):
|
71 |
-
from numpy.linalg import norm
|
72 |
-
feat1 = feat1.ravel()
|
73 |
-
feat2 = feat2.ravel()
|
74 |
-
sim = np.dot(feat1, feat2) / (norm(feat1) * norm(feat2))
|
75 |
-
return sim
|
76 |
-
|
77 |
-
def get_feat(self, imgs):
|
78 |
-
if not isinstance(imgs, list):
|
79 |
-
imgs = [imgs]
|
80 |
-
input_size = self.input_size
|
81 |
-
|
82 |
-
blob = cv2.dnn.blobFromImages(imgs, 1.0 / self.input_std, input_size,
|
83 |
-
(self.input_mean, self.input_mean, self.input_mean), swapRB=True)
|
84 |
-
net_out = self.session.run(self.output_names, {self.input_name: blob})[0]
|
85 |
-
return net_out
|
86 |
-
|
87 |
-
def forward(self, batch_data):
|
88 |
-
blob = (batch_data - self.input_mean) / self.input_std
|
89 |
-
net_out = self.session.run(self.output_names, {self.input_name: blob})[0]
|
90 |
-
return net_out
|
91 |
-
|
92 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/utils/dependencies/insightface/model_zoo/attribute.py
DELETED
@@ -1,94 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
# @Organization : insightface.ai
|
3 |
-
# @Author : Jia Guo
|
4 |
-
# @Time : 2021-06-19
|
5 |
-
# @Function :
|
6 |
-
|
7 |
-
from __future__ import division
|
8 |
-
import numpy as np
|
9 |
-
import cv2
|
10 |
-
import onnx
|
11 |
-
import onnxruntime
|
12 |
-
from ..utils import face_align
|
13 |
-
|
14 |
-
__all__ = [
|
15 |
-
'Attribute',
|
16 |
-
]
|
17 |
-
|
18 |
-
|
19 |
-
class Attribute:
|
20 |
-
def __init__(self, model_file=None, session=None):
|
21 |
-
assert model_file is not None
|
22 |
-
self.model_file = model_file
|
23 |
-
self.session = session
|
24 |
-
find_sub = False
|
25 |
-
find_mul = False
|
26 |
-
model = onnx.load(self.model_file)
|
27 |
-
graph = model.graph
|
28 |
-
for nid, node in enumerate(graph.node[:8]):
|
29 |
-
#print(nid, node.name)
|
30 |
-
if node.name.startswith('Sub') or node.name.startswith('_minus'):
|
31 |
-
find_sub = True
|
32 |
-
if node.name.startswith('Mul') or node.name.startswith('_mul'):
|
33 |
-
find_mul = True
|
34 |
-
if nid<3 and node.name=='bn_data':
|
35 |
-
find_sub = True
|
36 |
-
find_mul = True
|
37 |
-
if find_sub and find_mul:
|
38 |
-
#mxnet arcface model
|
39 |
-
input_mean = 0.0
|
40 |
-
input_std = 1.0
|
41 |
-
else:
|
42 |
-
input_mean = 127.5
|
43 |
-
input_std = 128.0
|
44 |
-
self.input_mean = input_mean
|
45 |
-
self.input_std = input_std
|
46 |
-
#print('input mean and std:', model_file, self.input_mean, self.input_std)
|
47 |
-
if self.session is None:
|
48 |
-
self.session = onnxruntime.InferenceSession(self.model_file, None)
|
49 |
-
input_cfg = self.session.get_inputs()[0]
|
50 |
-
input_shape = input_cfg.shape
|
51 |
-
input_name = input_cfg.name
|
52 |
-
self.input_size = tuple(input_shape[2:4][::-1])
|
53 |
-
self.input_shape = input_shape
|
54 |
-
outputs = self.session.get_outputs()
|
55 |
-
output_names = []
|
56 |
-
for out in outputs:
|
57 |
-
output_names.append(out.name)
|
58 |
-
self.input_name = input_name
|
59 |
-
self.output_names = output_names
|
60 |
-
assert len(self.output_names)==1
|
61 |
-
output_shape = outputs[0].shape
|
62 |
-
#print('init output_shape:', output_shape)
|
63 |
-
if output_shape[1]==3:
|
64 |
-
self.taskname = 'genderage'
|
65 |
-
else:
|
66 |
-
self.taskname = 'attribute_%d'%output_shape[1]
|
67 |
-
|
68 |
-
def prepare(self, ctx_id, **kwargs):
|
69 |
-
if ctx_id<0:
|
70 |
-
self.session.set_providers(['CPUExecutionProvider'])
|
71 |
-
|
72 |
-
def get(self, img, face):
|
73 |
-
bbox = face.bbox
|
74 |
-
w, h = (bbox[2] - bbox[0]), (bbox[3] - bbox[1])
|
75 |
-
center = (bbox[2] + bbox[0]) / 2, (bbox[3] + bbox[1]) / 2
|
76 |
-
rotate = 0
|
77 |
-
_scale = self.input_size[0] / (max(w, h)*1.5)
|
78 |
-
#print('param:', img.shape, bbox, center, self.input_size, _scale, rotate)
|
79 |
-
aimg, M = face_align.transform(img, center, self.input_size[0], _scale, rotate)
|
80 |
-
input_size = tuple(aimg.shape[0:2][::-1])
|
81 |
-
#assert input_size==self.input_size
|
82 |
-
blob = cv2.dnn.blobFromImage(aimg, 1.0/self.input_std, input_size, (self.input_mean, self.input_mean, self.input_mean), swapRB=True)
|
83 |
-
pred = self.session.run(self.output_names, {self.input_name : blob})[0][0]
|
84 |
-
if self.taskname=='genderage':
|
85 |
-
assert len(pred)==3
|
86 |
-
gender = np.argmax(pred[:2])
|
87 |
-
age = int(np.round(pred[2]*100))
|
88 |
-
face['gender'] = gender
|
89 |
-
face['age'] = age
|
90 |
-
return gender, age
|
91 |
-
else:
|
92 |
-
return pred
|
93 |
-
|
94 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/utils/dependencies/insightface/model_zoo/inswapper.py
DELETED
@@ -1,114 +0,0 @@
|
|
1 |
-
import time
|
2 |
-
import numpy as np
|
3 |
-
import onnxruntime
|
4 |
-
import cv2
|
5 |
-
import onnx
|
6 |
-
from onnx import numpy_helper
|
7 |
-
from ..utils import face_align
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
class INSwapper():
|
13 |
-
def __init__(self, model_file=None, session=None):
|
14 |
-
self.model_file = model_file
|
15 |
-
self.session = session
|
16 |
-
model = onnx.load(self.model_file)
|
17 |
-
graph = model.graph
|
18 |
-
self.emap = numpy_helper.to_array(graph.initializer[-1])
|
19 |
-
self.input_mean = 0.0
|
20 |
-
self.input_std = 255.0
|
21 |
-
#print('input mean and std:', model_file, self.input_mean, self.input_std)
|
22 |
-
if self.session is None:
|
23 |
-
self.session = onnxruntime.InferenceSession(self.model_file, None)
|
24 |
-
inputs = self.session.get_inputs()
|
25 |
-
self.input_names = []
|
26 |
-
for inp in inputs:
|
27 |
-
self.input_names.append(inp.name)
|
28 |
-
outputs = self.session.get_outputs()
|
29 |
-
output_names = []
|
30 |
-
for out in outputs:
|
31 |
-
output_names.append(out.name)
|
32 |
-
self.output_names = output_names
|
33 |
-
assert len(self.output_names)==1
|
34 |
-
output_shape = outputs[0].shape
|
35 |
-
input_cfg = inputs[0]
|
36 |
-
input_shape = input_cfg.shape
|
37 |
-
self.input_shape = input_shape
|
38 |
-
# print('inswapper-shape:', self.input_shape)
|
39 |
-
self.input_size = tuple(input_shape[2:4][::-1])
|
40 |
-
|
41 |
-
def forward(self, img, latent):
|
42 |
-
img = (img - self.input_mean) / self.input_std
|
43 |
-
pred = self.session.run(self.output_names, {self.input_names[0]: img, self.input_names[1]: latent})[0]
|
44 |
-
return pred
|
45 |
-
|
46 |
-
def get(self, img, target_face, source_face, paste_back=True):
|
47 |
-
face_mask = np.zeros((img.shape[0], img.shape[1]), np.uint8)
|
48 |
-
cv2.fillPoly(face_mask, np.array([target_face.landmark_2d_106[[1,9,10,11,12,13,14,15,16,2,3,4,5,6,7,8,0,24,23,22,21,20,19,18,32,31,30,29,28,27,26,25,17,101,105,104,103,51,49,48,43]].astype('int64')]), 1)
|
49 |
-
aimg, M = face_align.norm_crop2(img, target_face.kps, self.input_size[0])
|
50 |
-
blob = cv2.dnn.blobFromImage(aimg, 1.0 / self.input_std, self.input_size,
|
51 |
-
(self.input_mean, self.input_mean, self.input_mean), swapRB=True)
|
52 |
-
latent = source_face.normed_embedding.reshape((1,-1))
|
53 |
-
latent = np.dot(latent, self.emap)
|
54 |
-
latent /= np.linalg.norm(latent)
|
55 |
-
pred = self.session.run(self.output_names, {self.input_names[0]: blob, self.input_names[1]: latent})[0]
|
56 |
-
#print(latent.shape, latent.dtype, pred.shape)
|
57 |
-
img_fake = pred.transpose((0,2,3,1))[0]
|
58 |
-
bgr_fake = np.clip(255 * img_fake, 0, 255).astype(np.uint8)[:,:,::-1]
|
59 |
-
if not paste_back:
|
60 |
-
return bgr_fake, M
|
61 |
-
else:
|
62 |
-
target_img = img
|
63 |
-
fake_diff = bgr_fake.astype(np.float32) - aimg.astype(np.float32)
|
64 |
-
fake_diff = np.abs(fake_diff).mean(axis=2)
|
65 |
-
fake_diff[:2,:] = 0
|
66 |
-
fake_diff[-2:,:] = 0
|
67 |
-
fake_diff[:,:2] = 0
|
68 |
-
fake_diff[:,-2:] = 0
|
69 |
-
IM = cv2.invertAffineTransform(M)
|
70 |
-
img_white = np.full((aimg.shape[0],aimg.shape[1]), 255, dtype=np.float32)
|
71 |
-
bgr_fake = cv2.warpAffine(bgr_fake, IM, (target_img.shape[1], target_img.shape[0]), borderValue=0.0)
|
72 |
-
img_white = cv2.warpAffine(img_white, IM, (target_img.shape[1], target_img.shape[0]), borderValue=0.0)
|
73 |
-
fake_diff = cv2.warpAffine(fake_diff, IM, (target_img.shape[1], target_img.shape[0]), borderValue=0.0)
|
74 |
-
img_white[img_white>20] = 255
|
75 |
-
fthresh = 10
|
76 |
-
fake_diff[fake_diff<fthresh] = 0
|
77 |
-
fake_diff[fake_diff>=fthresh] = 255
|
78 |
-
img_mask = img_white
|
79 |
-
mask_h_inds, mask_w_inds = np.where(img_mask==255)
|
80 |
-
mask_h = np.max(mask_h_inds) - np.min(mask_h_inds)
|
81 |
-
mask_w = np.max(mask_w_inds) - np.min(mask_w_inds)
|
82 |
-
mask_size = int(np.sqrt(mask_h*mask_w))
|
83 |
-
k = max(mask_size//10, 10)
|
84 |
-
#k = max(mask_size//20, 6)
|
85 |
-
#k = 6
|
86 |
-
kernel = np.ones((k,k),np.uint8)
|
87 |
-
img_mask = cv2.erode(img_mask,kernel,iterations = 1)
|
88 |
-
kernel = np.ones((2,2),np.uint8)
|
89 |
-
fake_diff = cv2.dilate(fake_diff,kernel,iterations = 1)
|
90 |
-
|
91 |
-
face_mask = cv2.erode(face_mask,np.ones((11,11),np.uint8),iterations = 1)
|
92 |
-
fake_diff[face_mask==1] = 255
|
93 |
-
|
94 |
-
k = max(mask_size//20, 5)
|
95 |
-
#k = 3
|
96 |
-
#k = 3
|
97 |
-
kernel_size = (k, k)
|
98 |
-
blur_size = tuple(2*i+1 for i in kernel_size)
|
99 |
-
img_mask = cv2.GaussianBlur(img_mask, blur_size, 0)
|
100 |
-
k = 5
|
101 |
-
kernel_size = (k, k)
|
102 |
-
blur_size = tuple(2*i+1 for i in kernel_size)
|
103 |
-
fake_diff = cv2.blur(fake_diff, (11,11), 0)
|
104 |
-
##fake_diff = cv2.GaussianBlur(fake_diff, blur_size, 0)
|
105 |
-
# print('blur_size: ', blur_size)
|
106 |
-
# fake_diff = cv2.blur(fake_diff, (21, 21), 0) # blur_size
|
107 |
-
img_mask /= 255
|
108 |
-
fake_diff /= 255
|
109 |
-
# img_mask = fake_diff
|
110 |
-
img_mask = img_mask*fake_diff
|
111 |
-
img_mask = np.reshape(img_mask, [img_mask.shape[0],img_mask.shape[1],1])
|
112 |
-
fake_merged = img_mask * bgr_fake + (1-img_mask) * target_img.astype(np.float32)
|
113 |
-
fake_merged = fake_merged.astype(np.uint8)
|
114 |
-
return fake_merged
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/utils/dependencies/insightface/model_zoo/landmark.py
DELETED
@@ -1,114 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
# @Organization : insightface.ai
|
3 |
-
# @Author : Jia Guo
|
4 |
-
# @Time : 2021-05-04
|
5 |
-
# @Function :
|
6 |
-
|
7 |
-
from __future__ import division
|
8 |
-
import numpy as np
|
9 |
-
import cv2
|
10 |
-
import onnx
|
11 |
-
import onnxruntime
|
12 |
-
from ..utils import face_align
|
13 |
-
from ..utils import transform
|
14 |
-
from ..data import get_object
|
15 |
-
|
16 |
-
__all__ = [
|
17 |
-
'Landmark',
|
18 |
-
]
|
19 |
-
|
20 |
-
|
21 |
-
class Landmark:
|
22 |
-
def __init__(self, model_file=None, session=None):
|
23 |
-
assert model_file is not None
|
24 |
-
self.model_file = model_file
|
25 |
-
self.session = session
|
26 |
-
find_sub = False
|
27 |
-
find_mul = False
|
28 |
-
model = onnx.load(self.model_file)
|
29 |
-
graph = model.graph
|
30 |
-
for nid, node in enumerate(graph.node[:8]):
|
31 |
-
#print(nid, node.name)
|
32 |
-
if node.name.startswith('Sub') or node.name.startswith('_minus'):
|
33 |
-
find_sub = True
|
34 |
-
if node.name.startswith('Mul') or node.name.startswith('_mul'):
|
35 |
-
find_mul = True
|
36 |
-
if nid<3 and node.name=='bn_data':
|
37 |
-
find_sub = True
|
38 |
-
find_mul = True
|
39 |
-
if find_sub and find_mul:
|
40 |
-
#mxnet arcface model
|
41 |
-
input_mean = 0.0
|
42 |
-
input_std = 1.0
|
43 |
-
else:
|
44 |
-
input_mean = 127.5
|
45 |
-
input_std = 128.0
|
46 |
-
self.input_mean = input_mean
|
47 |
-
self.input_std = input_std
|
48 |
-
#print('input mean and std:', model_file, self.input_mean, self.input_std)
|
49 |
-
if self.session is None:
|
50 |
-
self.session = onnxruntime.InferenceSession(self.model_file, None)
|
51 |
-
input_cfg = self.session.get_inputs()[0]
|
52 |
-
input_shape = input_cfg.shape
|
53 |
-
input_name = input_cfg.name
|
54 |
-
self.input_size = tuple(input_shape[2:4][::-1])
|
55 |
-
self.input_shape = input_shape
|
56 |
-
outputs = self.session.get_outputs()
|
57 |
-
output_names = []
|
58 |
-
for out in outputs:
|
59 |
-
output_names.append(out.name)
|
60 |
-
self.input_name = input_name
|
61 |
-
self.output_names = output_names
|
62 |
-
assert len(self.output_names)==1
|
63 |
-
output_shape = outputs[0].shape
|
64 |
-
self.require_pose = False
|
65 |
-
#print('init output_shape:', output_shape)
|
66 |
-
if output_shape[1]==3309:
|
67 |
-
self.lmk_dim = 3
|
68 |
-
self.lmk_num = 68
|
69 |
-
self.mean_lmk = get_object('meanshape_68.pkl')
|
70 |
-
self.require_pose = True
|
71 |
-
else:
|
72 |
-
self.lmk_dim = 2
|
73 |
-
self.lmk_num = output_shape[1]//self.lmk_dim
|
74 |
-
self.taskname = 'landmark_%dd_%d'%(self.lmk_dim, self.lmk_num)
|
75 |
-
|
76 |
-
def prepare(self, ctx_id, **kwargs):
|
77 |
-
if ctx_id<0:
|
78 |
-
self.session.set_providers(['CPUExecutionProvider'])
|
79 |
-
|
80 |
-
def get(self, img, face):
|
81 |
-
bbox = face.bbox
|
82 |
-
w, h = (bbox[2] - bbox[0]), (bbox[3] - bbox[1])
|
83 |
-
center = (bbox[2] + bbox[0]) / 2, (bbox[3] + bbox[1]) / 2
|
84 |
-
rotate = 0
|
85 |
-
_scale = self.input_size[0] / (max(w, h)*1.5)
|
86 |
-
#print('param:', img.shape, bbox, center, self.input_size, _scale, rotate)
|
87 |
-
aimg, M = face_align.transform(img, center, self.input_size[0], _scale, rotate)
|
88 |
-
input_size = tuple(aimg.shape[0:2][::-1])
|
89 |
-
#assert input_size==self.input_size
|
90 |
-
blob = cv2.dnn.blobFromImage(aimg, 1.0/self.input_std, input_size, (self.input_mean, self.input_mean, self.input_mean), swapRB=True)
|
91 |
-
pred = self.session.run(self.output_names, {self.input_name : blob})[0][0]
|
92 |
-
if pred.shape[0] >= 3000:
|
93 |
-
pred = pred.reshape((-1, 3))
|
94 |
-
else:
|
95 |
-
pred = pred.reshape((-1, 2))
|
96 |
-
if self.lmk_num < pred.shape[0]:
|
97 |
-
pred = pred[self.lmk_num*-1:,:]
|
98 |
-
pred[:, 0:2] += 1
|
99 |
-
pred[:, 0:2] *= (self.input_size[0] // 2)
|
100 |
-
if pred.shape[1] == 3:
|
101 |
-
pred[:, 2] *= (self.input_size[0] // 2)
|
102 |
-
|
103 |
-
IM = cv2.invertAffineTransform(M)
|
104 |
-
pred = face_align.trans_points(pred, IM)
|
105 |
-
face[self.taskname] = pred
|
106 |
-
if self.require_pose:
|
107 |
-
P = transform.estimate_affine_matrix_3d23d(self.mean_lmk, pred)
|
108 |
-
s, R, t = transform.P2sRt(P)
|
109 |
-
rx, ry, rz = transform.matrix2angle(R)
|
110 |
-
pose = np.array( [rx, ry, rz], dtype=np.float32 )
|
111 |
-
face['pose'] = pose #pitch, yaw, roll
|
112 |
-
return pred
|
113 |
-
|
114 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/utils/dependencies/insightface/model_zoo/model_store.py
DELETED
@@ -1,103 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
This code file mainly comes from https://github.com/dmlc/gluon-cv/blob/master/gluoncv/model_zoo/model_store.py
|
3 |
-
"""
|
4 |
-
from __future__ import print_function
|
5 |
-
|
6 |
-
__all__ = ['get_model_file']
|
7 |
-
import os
|
8 |
-
import zipfile
|
9 |
-
import glob
|
10 |
-
|
11 |
-
from ..utils import download, check_sha1
|
12 |
-
|
13 |
-
_model_sha1 = {
|
14 |
-
name: checksum
|
15 |
-
for checksum, name in [
|
16 |
-
('95be21b58e29e9c1237f229dae534bd854009ce0', 'arcface_r100_v1'),
|
17 |
-
('', 'arcface_mfn_v1'),
|
18 |
-
('39fd1e087a2a2ed70a154ac01fecaa86c315d01b', 'retinaface_r50_v1'),
|
19 |
-
('2c9de8116d1f448fd1d4661f90308faae34c990a', 'retinaface_mnet025_v1'),
|
20 |
-
('0db1d07921d005e6c9a5b38e059452fc5645e5a4', 'retinaface_mnet025_v2'),
|
21 |
-
('7dd8111652b7aac2490c5dcddeb268e53ac643e6', 'genderage_v1'),
|
22 |
-
]
|
23 |
-
}
|
24 |
-
|
25 |
-
base_repo_url = 'https://insightface.ai/files/'
|
26 |
-
_url_format = '{repo_url}models/{file_name}.zip'
|
27 |
-
|
28 |
-
|
29 |
-
def short_hash(name):
|
30 |
-
if name not in _model_sha1:
|
31 |
-
raise ValueError(
|
32 |
-
'Pretrained model for {name} is not available.'.format(name=name))
|
33 |
-
return _model_sha1[name][:8]
|
34 |
-
|
35 |
-
|
36 |
-
def find_params_file(dir_path):
|
37 |
-
if not os.path.exists(dir_path):
|
38 |
-
return None
|
39 |
-
paths = glob.glob("%s/*.params" % dir_path)
|
40 |
-
if len(paths) == 0:
|
41 |
-
return None
|
42 |
-
paths = sorted(paths)
|
43 |
-
return paths[-1]
|
44 |
-
|
45 |
-
|
46 |
-
def get_model_file(name, root=os.path.join('~', '.insightface', 'models')):
|
47 |
-
r"""Return location for the pretrained on local file system.
|
48 |
-
|
49 |
-
This function will download from online model zoo when model cannot be found or has mismatch.
|
50 |
-
The root directory will be created if it doesn't exist.
|
51 |
-
|
52 |
-
Parameters
|
53 |
-
----------
|
54 |
-
name : str
|
55 |
-
Name of the model.
|
56 |
-
root : str, default '~/.mxnet/models'
|
57 |
-
Location for keeping the model parameters.
|
58 |
-
|
59 |
-
Returns
|
60 |
-
-------
|
61 |
-
file_path
|
62 |
-
Path to the requested pretrained model file.
|
63 |
-
"""
|
64 |
-
|
65 |
-
file_name = name
|
66 |
-
root = os.path.expanduser(root)
|
67 |
-
dir_path = os.path.join(root, name)
|
68 |
-
file_path = find_params_file(dir_path)
|
69 |
-
#file_path = os.path.join(root, file_name + '.params')
|
70 |
-
sha1_hash = _model_sha1[name]
|
71 |
-
if file_path is not None:
|
72 |
-
if check_sha1(file_path, sha1_hash):
|
73 |
-
return file_path
|
74 |
-
else:
|
75 |
-
print(
|
76 |
-
'Mismatch in the content of model file detected. Downloading again.'
|
77 |
-
)
|
78 |
-
else:
|
79 |
-
print('Model file is not found. Downloading.')
|
80 |
-
|
81 |
-
if not os.path.exists(root):
|
82 |
-
os.makedirs(root)
|
83 |
-
if not os.path.exists(dir_path):
|
84 |
-
os.makedirs(dir_path)
|
85 |
-
|
86 |
-
zip_file_path = os.path.join(root, file_name + '.zip')
|
87 |
-
repo_url = base_repo_url
|
88 |
-
if repo_url[-1] != '/':
|
89 |
-
repo_url = repo_url + '/'
|
90 |
-
download(_url_format.format(repo_url=repo_url, file_name=file_name),
|
91 |
-
path=zip_file_path,
|
92 |
-
overwrite=True)
|
93 |
-
with zipfile.ZipFile(zip_file_path) as zf:
|
94 |
-
zf.extractall(dir_path)
|
95 |
-
os.remove(zip_file_path)
|
96 |
-
file_path = find_params_file(dir_path)
|
97 |
-
|
98 |
-
if check_sha1(file_path, sha1_hash):
|
99 |
-
return file_path
|
100 |
-
else:
|
101 |
-
raise ValueError(
|
102 |
-
'Downloaded file has different hash. Please try again.')
|
103 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/utils/dependencies/insightface/model_zoo/model_zoo.py
DELETED
@@ -1,97 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
# @Organization : insightface.ai
|
3 |
-
# @Author : Jia Guo
|
4 |
-
# @Time : 2021-05-04
|
5 |
-
# @Function :
|
6 |
-
|
7 |
-
import os
|
8 |
-
import os.path as osp
|
9 |
-
import glob
|
10 |
-
import onnxruntime
|
11 |
-
from .arcface_onnx import *
|
12 |
-
from .retinaface import *
|
13 |
-
#from .scrfd import *
|
14 |
-
from .landmark import *
|
15 |
-
from .attribute import Attribute
|
16 |
-
from .inswapper import INSwapper
|
17 |
-
from ..utils import download_onnx
|
18 |
-
|
19 |
-
__all__ = ['get_model']
|
20 |
-
|
21 |
-
|
22 |
-
class PickableInferenceSession(onnxruntime.InferenceSession):
|
23 |
-
# This is a wrapper to make the current InferenceSession class pickable.
|
24 |
-
def __init__(self, model_path, **kwargs):
|
25 |
-
super().__init__(model_path, **kwargs)
|
26 |
-
self.model_path = model_path
|
27 |
-
|
28 |
-
def __getstate__(self):
|
29 |
-
return {'model_path': self.model_path}
|
30 |
-
|
31 |
-
def __setstate__(self, values):
|
32 |
-
model_path = values['model_path']
|
33 |
-
self.__init__(model_path)
|
34 |
-
|
35 |
-
class ModelRouter:
|
36 |
-
def __init__(self, onnx_file):
|
37 |
-
self.onnx_file = onnx_file
|
38 |
-
|
39 |
-
def get_model(self, **kwargs):
|
40 |
-
session = PickableInferenceSession(self.onnx_file, **kwargs)
|
41 |
-
# print(f'Applied providers: {session._providers}, with options: {session._provider_options}')
|
42 |
-
inputs = session.get_inputs()
|
43 |
-
input_cfg = inputs[0]
|
44 |
-
input_shape = input_cfg.shape
|
45 |
-
outputs = session.get_outputs()
|
46 |
-
|
47 |
-
if len(outputs)>=5:
|
48 |
-
return RetinaFace(model_file=self.onnx_file, session=session)
|
49 |
-
elif input_shape[2]==192 and input_shape[3]==192:
|
50 |
-
return Landmark(model_file=self.onnx_file, session=session)
|
51 |
-
elif input_shape[2]==96 and input_shape[3]==96:
|
52 |
-
return Attribute(model_file=self.onnx_file, session=session)
|
53 |
-
elif len(inputs)==2 and input_shape[2]==128 and input_shape[3]==128:
|
54 |
-
return INSwapper(model_file=self.onnx_file, session=session)
|
55 |
-
elif input_shape[2]==input_shape[3] and input_shape[2]>=112 and input_shape[2]%16==0:
|
56 |
-
return ArcFaceONNX(model_file=self.onnx_file, session=session)
|
57 |
-
else:
|
58 |
-
#raise RuntimeError('error on model routing')
|
59 |
-
return None
|
60 |
-
|
61 |
-
def find_onnx_file(dir_path):
|
62 |
-
if not os.path.exists(dir_path):
|
63 |
-
return None
|
64 |
-
paths = glob.glob("%s/*.onnx" % dir_path)
|
65 |
-
if len(paths) == 0:
|
66 |
-
return None
|
67 |
-
paths = sorted(paths)
|
68 |
-
return paths[-1]
|
69 |
-
|
70 |
-
def get_default_providers():
|
71 |
-
return ['CUDAExecutionProvider', 'CPUExecutionProvider']
|
72 |
-
|
73 |
-
def get_default_provider_options():
|
74 |
-
return None
|
75 |
-
|
76 |
-
def get_model(name, **kwargs):
|
77 |
-
root = kwargs.get('root', '~/.insightface')
|
78 |
-
root = os.path.expanduser(root)
|
79 |
-
model_root = osp.join(root, 'models')
|
80 |
-
allow_download = kwargs.get('download', False)
|
81 |
-
download_zip = kwargs.get('download_zip', False)
|
82 |
-
if not name.endswith('.onnx'):
|
83 |
-
model_dir = os.path.join(model_root, name)
|
84 |
-
model_file = find_onnx_file(model_dir)
|
85 |
-
if model_file is None:
|
86 |
-
return None
|
87 |
-
else:
|
88 |
-
model_file = name
|
89 |
-
if not osp.exists(model_file) and allow_download:
|
90 |
-
model_file = download_onnx('models', model_file, root=root, download_zip=download_zip)
|
91 |
-
assert osp.exists(model_file), 'model_file %s should exist'%model_file
|
92 |
-
assert osp.isfile(model_file), 'model_file %s should be a file'%model_file
|
93 |
-
router = ModelRouter(model_file)
|
94 |
-
providers = kwargs.get('providers', get_default_providers())
|
95 |
-
provider_options = kwargs.get('provider_options', get_default_provider_options())
|
96 |
-
model = router.get_model(providers=providers, provider_options=provider_options)
|
97 |
-
return model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/utils/dependencies/insightface/model_zoo/retinaface.py
DELETED
@@ -1,301 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
# @Organization : insightface.ai
|
3 |
-
# @Author : Jia Guo
|
4 |
-
# @Time : 2021-09-18
|
5 |
-
# @Function :
|
6 |
-
|
7 |
-
from __future__ import division
|
8 |
-
import datetime
|
9 |
-
import numpy as np
|
10 |
-
import onnx
|
11 |
-
import onnxruntime
|
12 |
-
import os
|
13 |
-
import os.path as osp
|
14 |
-
import cv2
|
15 |
-
import sys
|
16 |
-
|
17 |
-
def softmax(z):
|
18 |
-
assert len(z.shape) == 2
|
19 |
-
s = np.max(z, axis=1)
|
20 |
-
s = s[:, np.newaxis] # necessary step to do broadcasting
|
21 |
-
e_x = np.exp(z - s)
|
22 |
-
div = np.sum(e_x, axis=1)
|
23 |
-
div = div[:, np.newaxis] # dito
|
24 |
-
return e_x / div
|
25 |
-
|
26 |
-
def distance2bbox(points, distance, max_shape=None):
|
27 |
-
"""Decode distance prediction to bounding box.
|
28 |
-
|
29 |
-
Args:
|
30 |
-
points (Tensor): Shape (n, 2), [x, y].
|
31 |
-
distance (Tensor): Distance from the given point to 4
|
32 |
-
boundaries (left, top, right, bottom).
|
33 |
-
max_shape (tuple): Shape of the image.
|
34 |
-
|
35 |
-
Returns:
|
36 |
-
Tensor: Decoded bboxes.
|
37 |
-
"""
|
38 |
-
x1 = points[:, 0] - distance[:, 0]
|
39 |
-
y1 = points[:, 1] - distance[:, 1]
|
40 |
-
x2 = points[:, 0] + distance[:, 2]
|
41 |
-
y2 = points[:, 1] + distance[:, 3]
|
42 |
-
if max_shape is not None:
|
43 |
-
x1 = x1.clamp(min=0, max=max_shape[1])
|
44 |
-
y1 = y1.clamp(min=0, max=max_shape[0])
|
45 |
-
x2 = x2.clamp(min=0, max=max_shape[1])
|
46 |
-
y2 = y2.clamp(min=0, max=max_shape[0])
|
47 |
-
return np.stack([x1, y1, x2, y2], axis=-1)
|
48 |
-
|
49 |
-
def distance2kps(points, distance, max_shape=None):
|
50 |
-
"""Decode distance prediction to bounding box.
|
51 |
-
|
52 |
-
Args:
|
53 |
-
points (Tensor): Shape (n, 2), [x, y].
|
54 |
-
distance (Tensor): Distance from the given point to 4
|
55 |
-
boundaries (left, top, right, bottom).
|
56 |
-
max_shape (tuple): Shape of the image.
|
57 |
-
|
58 |
-
Returns:
|
59 |
-
Tensor: Decoded bboxes.
|
60 |
-
"""
|
61 |
-
preds = []
|
62 |
-
for i in range(0, distance.shape[1], 2):
|
63 |
-
px = points[:, i%2] + distance[:, i]
|
64 |
-
py = points[:, i%2+1] + distance[:, i+1]
|
65 |
-
if max_shape is not None:
|
66 |
-
px = px.clamp(min=0, max=max_shape[1])
|
67 |
-
py = py.clamp(min=0, max=max_shape[0])
|
68 |
-
preds.append(px)
|
69 |
-
preds.append(py)
|
70 |
-
return np.stack(preds, axis=-1)
|
71 |
-
|
72 |
-
class RetinaFace:
|
73 |
-
def __init__(self, model_file=None, session=None):
|
74 |
-
import onnxruntime
|
75 |
-
self.model_file = model_file
|
76 |
-
self.session = session
|
77 |
-
self.taskname = 'detection'
|
78 |
-
if self.session is None:
|
79 |
-
assert self.model_file is not None
|
80 |
-
assert osp.exists(self.model_file)
|
81 |
-
self.session = onnxruntime.InferenceSession(self.model_file, None)
|
82 |
-
self.center_cache = {}
|
83 |
-
self.nms_thresh = 0.4
|
84 |
-
self.det_thresh = 0.5
|
85 |
-
self._init_vars()
|
86 |
-
|
87 |
-
def _init_vars(self):
|
88 |
-
input_cfg = self.session.get_inputs()[0]
|
89 |
-
input_shape = input_cfg.shape
|
90 |
-
#print(input_shape)
|
91 |
-
if isinstance(input_shape[2], str):
|
92 |
-
self.input_size = None
|
93 |
-
else:
|
94 |
-
self.input_size = tuple(input_shape[2:4][::-1])
|
95 |
-
#print('image_size:', self.image_size)
|
96 |
-
input_name = input_cfg.name
|
97 |
-
self.input_shape = input_shape
|
98 |
-
outputs = self.session.get_outputs()
|
99 |
-
output_names = []
|
100 |
-
for o in outputs:
|
101 |
-
output_names.append(o.name)
|
102 |
-
self.input_name = input_name
|
103 |
-
self.output_names = output_names
|
104 |
-
self.input_mean = 127.5
|
105 |
-
self.input_std = 128.0
|
106 |
-
#print(self.output_names)
|
107 |
-
#assert len(outputs)==10 or len(outputs)==15
|
108 |
-
self.use_kps = False
|
109 |
-
self._anchor_ratio = 1.0
|
110 |
-
self._num_anchors = 1
|
111 |
-
if len(outputs)==6:
|
112 |
-
self.fmc = 3
|
113 |
-
self._feat_stride_fpn = [8, 16, 32]
|
114 |
-
self._num_anchors = 2
|
115 |
-
elif len(outputs)==9:
|
116 |
-
self.fmc = 3
|
117 |
-
self._feat_stride_fpn = [8, 16, 32]
|
118 |
-
self._num_anchors = 2
|
119 |
-
self.use_kps = True
|
120 |
-
elif len(outputs)==10:
|
121 |
-
self.fmc = 5
|
122 |
-
self._feat_stride_fpn = [8, 16, 32, 64, 128]
|
123 |
-
self._num_anchors = 1
|
124 |
-
elif len(outputs)==15:
|
125 |
-
self.fmc = 5
|
126 |
-
self._feat_stride_fpn = [8, 16, 32, 64, 128]
|
127 |
-
self._num_anchors = 1
|
128 |
-
self.use_kps = True
|
129 |
-
|
130 |
-
def prepare(self, ctx_id, **kwargs):
|
131 |
-
if ctx_id<0:
|
132 |
-
self.session.set_providers(['CPUExecutionProvider'])
|
133 |
-
nms_thresh = kwargs.get('nms_thresh', None)
|
134 |
-
if nms_thresh is not None:
|
135 |
-
self.nms_thresh = nms_thresh
|
136 |
-
det_thresh = kwargs.get('det_thresh', None)
|
137 |
-
if det_thresh is not None:
|
138 |
-
self.det_thresh = det_thresh
|
139 |
-
input_size = kwargs.get('input_size', None)
|
140 |
-
if input_size is not None:
|
141 |
-
if self.input_size is not None:
|
142 |
-
print('warning: det_size is already set in detection model, ignore')
|
143 |
-
else:
|
144 |
-
self.input_size = input_size
|
145 |
-
|
146 |
-
def forward(self, img, threshold):
|
147 |
-
scores_list = []
|
148 |
-
bboxes_list = []
|
149 |
-
kpss_list = []
|
150 |
-
input_size = tuple(img.shape[0:2][::-1])
|
151 |
-
blob = cv2.dnn.blobFromImage(img, 1.0/self.input_std, input_size, (self.input_mean, self.input_mean, self.input_mean), swapRB=True)
|
152 |
-
net_outs = self.session.run(self.output_names, {self.input_name : blob})
|
153 |
-
|
154 |
-
input_height = blob.shape[2]
|
155 |
-
input_width = blob.shape[3]
|
156 |
-
fmc = self.fmc
|
157 |
-
for idx, stride in enumerate(self._feat_stride_fpn):
|
158 |
-
scores = net_outs[idx]
|
159 |
-
bbox_preds = net_outs[idx+fmc]
|
160 |
-
bbox_preds = bbox_preds * stride
|
161 |
-
if self.use_kps:
|
162 |
-
kps_preds = net_outs[idx+fmc*2] * stride
|
163 |
-
height = input_height // stride
|
164 |
-
width = input_width // stride
|
165 |
-
K = height * width
|
166 |
-
key = (height, width, stride)
|
167 |
-
if key in self.center_cache:
|
168 |
-
anchor_centers = self.center_cache[key]
|
169 |
-
else:
|
170 |
-
#solution-1, c style:
|
171 |
-
#anchor_centers = np.zeros( (height, width, 2), dtype=np.float32 )
|
172 |
-
#for i in range(height):
|
173 |
-
# anchor_centers[i, :, 1] = i
|
174 |
-
#for i in range(width):
|
175 |
-
# anchor_centers[:, i, 0] = i
|
176 |
-
|
177 |
-
#solution-2:
|
178 |
-
#ax = np.arange(width, dtype=np.float32)
|
179 |
-
#ay = np.arange(height, dtype=np.float32)
|
180 |
-
#xv, yv = np.meshgrid(np.arange(width), np.arange(height))
|
181 |
-
#anchor_centers = np.stack([xv, yv], axis=-1).astype(np.float32)
|
182 |
-
|
183 |
-
#solution-3:
|
184 |
-
anchor_centers = np.stack(np.mgrid[:height, :width][::-1], axis=-1).astype(np.float32)
|
185 |
-
#print(anchor_centers.shape)
|
186 |
-
|
187 |
-
anchor_centers = (anchor_centers * stride).reshape( (-1, 2) )
|
188 |
-
if self._num_anchors>1:
|
189 |
-
anchor_centers = np.stack([anchor_centers]*self._num_anchors, axis=1).reshape( (-1,2) )
|
190 |
-
if len(self.center_cache)<100:
|
191 |
-
self.center_cache[key] = anchor_centers
|
192 |
-
|
193 |
-
pos_inds = np.where(scores>=threshold)[0]
|
194 |
-
bboxes = distance2bbox(anchor_centers, bbox_preds)
|
195 |
-
pos_scores = scores[pos_inds]
|
196 |
-
pos_bboxes = bboxes[pos_inds]
|
197 |
-
scores_list.append(pos_scores)
|
198 |
-
bboxes_list.append(pos_bboxes)
|
199 |
-
if self.use_kps:
|
200 |
-
kpss = distance2kps(anchor_centers, kps_preds)
|
201 |
-
#kpss = kps_preds
|
202 |
-
kpss = kpss.reshape( (kpss.shape[0], -1, 2) )
|
203 |
-
pos_kpss = kpss[pos_inds]
|
204 |
-
kpss_list.append(pos_kpss)
|
205 |
-
return scores_list, bboxes_list, kpss_list
|
206 |
-
|
207 |
-
def detect(self, img, input_size = None, max_num=0, metric='default'):
|
208 |
-
assert input_size is not None or self.input_size is not None
|
209 |
-
input_size = self.input_size if input_size is None else input_size
|
210 |
-
|
211 |
-
im_ratio = float(img.shape[0]) / img.shape[1]
|
212 |
-
model_ratio = float(input_size[1]) / input_size[0]
|
213 |
-
if im_ratio>model_ratio:
|
214 |
-
new_height = input_size[1]
|
215 |
-
new_width = int(new_height / im_ratio)
|
216 |
-
else:
|
217 |
-
new_width = input_size[0]
|
218 |
-
new_height = int(new_width * im_ratio)
|
219 |
-
det_scale = float(new_height) / img.shape[0]
|
220 |
-
resized_img = cv2.resize(img, (new_width, new_height))
|
221 |
-
det_img = np.zeros( (input_size[1], input_size[0], 3), dtype=np.uint8 )
|
222 |
-
det_img[:new_height, :new_width, :] = resized_img
|
223 |
-
|
224 |
-
scores_list, bboxes_list, kpss_list = self.forward(det_img, self.det_thresh)
|
225 |
-
|
226 |
-
scores = np.vstack(scores_list)
|
227 |
-
scores_ravel = scores.ravel()
|
228 |
-
order = scores_ravel.argsort()[::-1]
|
229 |
-
bboxes = np.vstack(bboxes_list) / det_scale
|
230 |
-
if self.use_kps:
|
231 |
-
kpss = np.vstack(kpss_list) / det_scale
|
232 |
-
pre_det = np.hstack((bboxes, scores)).astype(np.float32, copy=False)
|
233 |
-
pre_det = pre_det[order, :]
|
234 |
-
keep = self.nms(pre_det)
|
235 |
-
det = pre_det[keep, :]
|
236 |
-
if self.use_kps:
|
237 |
-
kpss = kpss[order,:,:]
|
238 |
-
kpss = kpss[keep,:,:]
|
239 |
-
else:
|
240 |
-
kpss = None
|
241 |
-
if max_num > 0 and det.shape[0] > max_num:
|
242 |
-
area = (det[:, 2] - det[:, 0]) * (det[:, 3] -
|
243 |
-
det[:, 1])
|
244 |
-
img_center = img.shape[0] // 2, img.shape[1] // 2
|
245 |
-
offsets = np.vstack([
|
246 |
-
(det[:, 0] + det[:, 2]) / 2 - img_center[1],
|
247 |
-
(det[:, 1] + det[:, 3]) / 2 - img_center[0]
|
248 |
-
])
|
249 |
-
offset_dist_squared = np.sum(np.power(offsets, 2.0), 0)
|
250 |
-
if metric=='max':
|
251 |
-
values = area
|
252 |
-
else:
|
253 |
-
values = area - offset_dist_squared * 2.0 # some extra weight on the centering
|
254 |
-
bindex = np.argsort(
|
255 |
-
values)[::-1] # some extra weight on the centering
|
256 |
-
bindex = bindex[0:max_num]
|
257 |
-
det = det[bindex, :]
|
258 |
-
if kpss is not None:
|
259 |
-
kpss = kpss[bindex, :]
|
260 |
-
return det, kpss
|
261 |
-
|
262 |
-
def nms(self, dets):
|
263 |
-
thresh = self.nms_thresh
|
264 |
-
x1 = dets[:, 0]
|
265 |
-
y1 = dets[:, 1]
|
266 |
-
x2 = dets[:, 2]
|
267 |
-
y2 = dets[:, 3]
|
268 |
-
scores = dets[:, 4]
|
269 |
-
|
270 |
-
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
|
271 |
-
order = scores.argsort()[::-1]
|
272 |
-
|
273 |
-
keep = []
|
274 |
-
while order.size > 0:
|
275 |
-
i = order[0]
|
276 |
-
keep.append(i)
|
277 |
-
xx1 = np.maximum(x1[i], x1[order[1:]])
|
278 |
-
yy1 = np.maximum(y1[i], y1[order[1:]])
|
279 |
-
xx2 = np.minimum(x2[i], x2[order[1:]])
|
280 |
-
yy2 = np.minimum(y2[i], y2[order[1:]])
|
281 |
-
|
282 |
-
w = np.maximum(0.0, xx2 - xx1 + 1)
|
283 |
-
h = np.maximum(0.0, yy2 - yy1 + 1)
|
284 |
-
inter = w * h
|
285 |
-
ovr = inter / (areas[i] + areas[order[1:]] - inter)
|
286 |
-
|
287 |
-
inds = np.where(ovr <= thresh)[0]
|
288 |
-
order = order[inds + 1]
|
289 |
-
|
290 |
-
return keep
|
291 |
-
|
292 |
-
def get_retinaface(name, download=False, root='~/.insightface/models', **kwargs):
|
293 |
-
if not download:
|
294 |
-
assert os.path.exists(name)
|
295 |
-
return RetinaFace(name)
|
296 |
-
else:
|
297 |
-
from .model_store import get_model_file
|
298 |
-
_file = get_model_file("retinaface_%s" % name, root=root)
|
299 |
-
return retinaface(_file)
|
300 |
-
|
301 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/utils/dependencies/insightface/model_zoo/scrfd.py
DELETED
@@ -1,348 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
# @Organization : insightface.ai
|
3 |
-
# @Author : Jia Guo
|
4 |
-
# @Time : 2021-05-04
|
5 |
-
# @Function :
|
6 |
-
|
7 |
-
from __future__ import division
|
8 |
-
import datetime
|
9 |
-
import numpy as np
|
10 |
-
import onnx
|
11 |
-
import onnxruntime
|
12 |
-
import os
|
13 |
-
import os.path as osp
|
14 |
-
import cv2
|
15 |
-
import sys
|
16 |
-
|
17 |
-
def softmax(z):
|
18 |
-
assert len(z.shape) == 2
|
19 |
-
s = np.max(z, axis=1)
|
20 |
-
s = s[:, np.newaxis] # necessary step to do broadcasting
|
21 |
-
e_x = np.exp(z - s)
|
22 |
-
div = np.sum(e_x, axis=1)
|
23 |
-
div = div[:, np.newaxis] # dito
|
24 |
-
return e_x / div
|
25 |
-
|
26 |
-
def distance2bbox(points, distance, max_shape=None):
|
27 |
-
"""Decode distance prediction to bounding box.
|
28 |
-
|
29 |
-
Args:
|
30 |
-
points (Tensor): Shape (n, 2), [x, y].
|
31 |
-
distance (Tensor): Distance from the given point to 4
|
32 |
-
boundaries (left, top, right, bottom).
|
33 |
-
max_shape (tuple): Shape of the image.
|
34 |
-
|
35 |
-
Returns:
|
36 |
-
Tensor: Decoded bboxes.
|
37 |
-
"""
|
38 |
-
x1 = points[:, 0] - distance[:, 0]
|
39 |
-
y1 = points[:, 1] - distance[:, 1]
|
40 |
-
x2 = points[:, 0] + distance[:, 2]
|
41 |
-
y2 = points[:, 1] + distance[:, 3]
|
42 |
-
if max_shape is not None:
|
43 |
-
x1 = x1.clamp(min=0, max=max_shape[1])
|
44 |
-
y1 = y1.clamp(min=0, max=max_shape[0])
|
45 |
-
x2 = x2.clamp(min=0, max=max_shape[1])
|
46 |
-
y2 = y2.clamp(min=0, max=max_shape[0])
|
47 |
-
return np.stack([x1, y1, x2, y2], axis=-1)
|
48 |
-
|
49 |
-
def distance2kps(points, distance, max_shape=None):
|
50 |
-
"""Decode distance prediction to bounding box.
|
51 |
-
|
52 |
-
Args:
|
53 |
-
points (Tensor): Shape (n, 2), [x, y].
|
54 |
-
distance (Tensor): Distance from the given point to 4
|
55 |
-
boundaries (left, top, right, bottom).
|
56 |
-
max_shape (tuple): Shape of the image.
|
57 |
-
|
58 |
-
Returns:
|
59 |
-
Tensor: Decoded bboxes.
|
60 |
-
"""
|
61 |
-
preds = []
|
62 |
-
for i in range(0, distance.shape[1], 2):
|
63 |
-
px = points[:, i%2] + distance[:, i]
|
64 |
-
py = points[:, i%2+1] + distance[:, i+1]
|
65 |
-
if max_shape is not None:
|
66 |
-
px = px.clamp(min=0, max=max_shape[1])
|
67 |
-
py = py.clamp(min=0, max=max_shape[0])
|
68 |
-
preds.append(px)
|
69 |
-
preds.append(py)
|
70 |
-
return np.stack(preds, axis=-1)
|
71 |
-
|
72 |
-
class SCRFD:
|
73 |
-
def __init__(self, model_file=None, session=None):
|
74 |
-
import onnxruntime
|
75 |
-
self.model_file = model_file
|
76 |
-
self.session = session
|
77 |
-
self.taskname = 'detection'
|
78 |
-
self.batched = False
|
79 |
-
if self.session is None:
|
80 |
-
assert self.model_file is not None
|
81 |
-
assert osp.exists(self.model_file)
|
82 |
-
self.session = onnxruntime.InferenceSession(self.model_file, None)
|
83 |
-
self.center_cache = {}
|
84 |
-
self.nms_thresh = 0.4
|
85 |
-
self.det_thresh = 0.5
|
86 |
-
self._init_vars()
|
87 |
-
|
88 |
-
def _init_vars(self):
|
89 |
-
input_cfg = self.session.get_inputs()[0]
|
90 |
-
input_shape = input_cfg.shape
|
91 |
-
#print(input_shape)
|
92 |
-
if isinstance(input_shape[2], str):
|
93 |
-
self.input_size = None
|
94 |
-
else:
|
95 |
-
self.input_size = tuple(input_shape[2:4][::-1])
|
96 |
-
#print('image_size:', self.image_size)
|
97 |
-
input_name = input_cfg.name
|
98 |
-
self.input_shape = input_shape
|
99 |
-
outputs = self.session.get_outputs()
|
100 |
-
if len(outputs[0].shape) == 3:
|
101 |
-
self.batched = True
|
102 |
-
output_names = []
|
103 |
-
for o in outputs:
|
104 |
-
output_names.append(o.name)
|
105 |
-
self.input_name = input_name
|
106 |
-
self.output_names = output_names
|
107 |
-
self.input_mean = 127.5
|
108 |
-
self.input_std = 128.0
|
109 |
-
#print(self.output_names)
|
110 |
-
#assert len(outputs)==10 or len(outputs)==15
|
111 |
-
self.use_kps = False
|
112 |
-
self._anchor_ratio = 1.0
|
113 |
-
self._num_anchors = 1
|
114 |
-
if len(outputs)==6:
|
115 |
-
self.fmc = 3
|
116 |
-
self._feat_stride_fpn = [8, 16, 32]
|
117 |
-
self._num_anchors = 2
|
118 |
-
elif len(outputs)==9:
|
119 |
-
self.fmc = 3
|
120 |
-
self._feat_stride_fpn = [8, 16, 32]
|
121 |
-
self._num_anchors = 2
|
122 |
-
self.use_kps = True
|
123 |
-
elif len(outputs)==10:
|
124 |
-
self.fmc = 5
|
125 |
-
self._feat_stride_fpn = [8, 16, 32, 64, 128]
|
126 |
-
self._num_anchors = 1
|
127 |
-
elif len(outputs)==15:
|
128 |
-
self.fmc = 5
|
129 |
-
self._feat_stride_fpn = [8, 16, 32, 64, 128]
|
130 |
-
self._num_anchors = 1
|
131 |
-
self.use_kps = True
|
132 |
-
|
133 |
-
def prepare(self, ctx_id, **kwargs):
|
134 |
-
if ctx_id<0:
|
135 |
-
self.session.set_providers(['CPUExecutionProvider'])
|
136 |
-
nms_thresh = kwargs.get('nms_thresh', None)
|
137 |
-
if nms_thresh is not None:
|
138 |
-
self.nms_thresh = nms_thresh
|
139 |
-
det_thresh = kwargs.get('det_thresh', None)
|
140 |
-
if det_thresh is not None:
|
141 |
-
self.det_thresh = det_thresh
|
142 |
-
input_size = kwargs.get('input_size', None)
|
143 |
-
if input_size is not None:
|
144 |
-
if self.input_size is not None:
|
145 |
-
print('warning: det_size is already set in scrfd model, ignore')
|
146 |
-
else:
|
147 |
-
self.input_size = input_size
|
148 |
-
|
149 |
-
def forward(self, img, threshold):
|
150 |
-
scores_list = []
|
151 |
-
bboxes_list = []
|
152 |
-
kpss_list = []
|
153 |
-
input_size = tuple(img.shape[0:2][::-1])
|
154 |
-
blob = cv2.dnn.blobFromImage(img, 1.0/self.input_std, input_size, (self.input_mean, self.input_mean, self.input_mean), swapRB=True)
|
155 |
-
net_outs = self.session.run(self.output_names, {self.input_name : blob})
|
156 |
-
|
157 |
-
input_height = blob.shape[2]
|
158 |
-
input_width = blob.shape[3]
|
159 |
-
fmc = self.fmc
|
160 |
-
for idx, stride in enumerate(self._feat_stride_fpn):
|
161 |
-
# If model support batch dim, take first output
|
162 |
-
if self.batched:
|
163 |
-
scores = net_outs[idx][0]
|
164 |
-
bbox_preds = net_outs[idx + fmc][0]
|
165 |
-
bbox_preds = bbox_preds * stride
|
166 |
-
if self.use_kps:
|
167 |
-
kps_preds = net_outs[idx + fmc * 2][0] * stride
|
168 |
-
# If model doesn't support batching take output as is
|
169 |
-
else:
|
170 |
-
scores = net_outs[idx]
|
171 |
-
bbox_preds = net_outs[idx + fmc]
|
172 |
-
bbox_preds = bbox_preds * stride
|
173 |
-
if self.use_kps:
|
174 |
-
kps_preds = net_outs[idx + fmc * 2] * stride
|
175 |
-
|
176 |
-
height = input_height // stride
|
177 |
-
width = input_width // stride
|
178 |
-
K = height * width
|
179 |
-
key = (height, width, stride)
|
180 |
-
if key in self.center_cache:
|
181 |
-
anchor_centers = self.center_cache[key]
|
182 |
-
else:
|
183 |
-
#solution-1, c style:
|
184 |
-
#anchor_centers = np.zeros( (height, width, 2), dtype=np.float32 )
|
185 |
-
#for i in range(height):
|
186 |
-
# anchor_centers[i, :, 1] = i
|
187 |
-
#for i in range(width):
|
188 |
-
# anchor_centers[:, i, 0] = i
|
189 |
-
|
190 |
-
#solution-2:
|
191 |
-
#ax = np.arange(width, dtype=np.float32)
|
192 |
-
#ay = np.arange(height, dtype=np.float32)
|
193 |
-
#xv, yv = np.meshgrid(np.arange(width), np.arange(height))
|
194 |
-
#anchor_centers = np.stack([xv, yv], axis=-1).astype(np.float32)
|
195 |
-
|
196 |
-
#solution-3:
|
197 |
-
anchor_centers = np.stack(np.mgrid[:height, :width][::-1], axis=-1).astype(np.float32)
|
198 |
-
#print(anchor_centers.shape)
|
199 |
-
|
200 |
-
anchor_centers = (anchor_centers * stride).reshape( (-1, 2) )
|
201 |
-
if self._num_anchors>1:
|
202 |
-
anchor_centers = np.stack([anchor_centers]*self._num_anchors, axis=1).reshape( (-1,2) )
|
203 |
-
if len(self.center_cache)<100:
|
204 |
-
self.center_cache[key] = anchor_centers
|
205 |
-
|
206 |
-
pos_inds = np.where(scores>=threshold)[0]
|
207 |
-
bboxes = distance2bbox(anchor_centers, bbox_preds)
|
208 |
-
pos_scores = scores[pos_inds]
|
209 |
-
pos_bboxes = bboxes[pos_inds]
|
210 |
-
scores_list.append(pos_scores)
|
211 |
-
bboxes_list.append(pos_bboxes)
|
212 |
-
if self.use_kps:
|
213 |
-
kpss = distance2kps(anchor_centers, kps_preds)
|
214 |
-
#kpss = kps_preds
|
215 |
-
kpss = kpss.reshape( (kpss.shape[0], -1, 2) )
|
216 |
-
pos_kpss = kpss[pos_inds]
|
217 |
-
kpss_list.append(pos_kpss)
|
218 |
-
return scores_list, bboxes_list, kpss_list
|
219 |
-
|
220 |
-
def detect(self, img, input_size = None, max_num=0, metric='default'):
|
221 |
-
assert input_size is not None or self.input_size is not None
|
222 |
-
input_size = self.input_size if input_size is None else input_size
|
223 |
-
|
224 |
-
im_ratio = float(img.shape[0]) / img.shape[1]
|
225 |
-
model_ratio = float(input_size[1]) / input_size[0]
|
226 |
-
if im_ratio>model_ratio:
|
227 |
-
new_height = input_size[1]
|
228 |
-
new_width = int(new_height / im_ratio)
|
229 |
-
else:
|
230 |
-
new_width = input_size[0]
|
231 |
-
new_height = int(new_width * im_ratio)
|
232 |
-
det_scale = float(new_height) / img.shape[0]
|
233 |
-
resized_img = cv2.resize(img, (new_width, new_height))
|
234 |
-
det_img = np.zeros( (input_size[1], input_size[0], 3), dtype=np.uint8 )
|
235 |
-
det_img[:new_height, :new_width, :] = resized_img
|
236 |
-
|
237 |
-
scores_list, bboxes_list, kpss_list = self.forward(det_img, self.det_thresh)
|
238 |
-
|
239 |
-
scores = np.vstack(scores_list)
|
240 |
-
scores_ravel = scores.ravel()
|
241 |
-
order = scores_ravel.argsort()[::-1]
|
242 |
-
bboxes = np.vstack(bboxes_list) / det_scale
|
243 |
-
if self.use_kps:
|
244 |
-
kpss = np.vstack(kpss_list) / det_scale
|
245 |
-
pre_det = np.hstack((bboxes, scores)).astype(np.float32, copy=False)
|
246 |
-
pre_det = pre_det[order, :]
|
247 |
-
keep = self.nms(pre_det)
|
248 |
-
det = pre_det[keep, :]
|
249 |
-
if self.use_kps:
|
250 |
-
kpss = kpss[order,:,:]
|
251 |
-
kpss = kpss[keep,:,:]
|
252 |
-
else:
|
253 |
-
kpss = None
|
254 |
-
if max_num > 0 and det.shape[0] > max_num:
|
255 |
-
area = (det[:, 2] - det[:, 0]) * (det[:, 3] -
|
256 |
-
det[:, 1])
|
257 |
-
img_center = img.shape[0] // 2, img.shape[1] // 2
|
258 |
-
offsets = np.vstack([
|
259 |
-
(det[:, 0] + det[:, 2]) / 2 - img_center[1],
|
260 |
-
(det[:, 1] + det[:, 3]) / 2 - img_center[0]
|
261 |
-
])
|
262 |
-
offset_dist_squared = np.sum(np.power(offsets, 2.0), 0)
|
263 |
-
if metric=='max':
|
264 |
-
values = area
|
265 |
-
else:
|
266 |
-
values = area - offset_dist_squared * 2.0 # some extra weight on the centering
|
267 |
-
bindex = np.argsort(
|
268 |
-
values)[::-1] # some extra weight on the centering
|
269 |
-
bindex = bindex[0:max_num]
|
270 |
-
det = det[bindex, :]
|
271 |
-
if kpss is not None:
|
272 |
-
kpss = kpss[bindex, :]
|
273 |
-
return det, kpss
|
274 |
-
|
275 |
-
def nms(self, dets):
|
276 |
-
thresh = self.nms_thresh
|
277 |
-
x1 = dets[:, 0]
|
278 |
-
y1 = dets[:, 1]
|
279 |
-
x2 = dets[:, 2]
|
280 |
-
y2 = dets[:, 3]
|
281 |
-
scores = dets[:, 4]
|
282 |
-
|
283 |
-
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
|
284 |
-
order = scores.argsort()[::-1]
|
285 |
-
|
286 |
-
keep = []
|
287 |
-
while order.size > 0:
|
288 |
-
i = order[0]
|
289 |
-
keep.append(i)
|
290 |
-
xx1 = np.maximum(x1[i], x1[order[1:]])
|
291 |
-
yy1 = np.maximum(y1[i], y1[order[1:]])
|
292 |
-
xx2 = np.minimum(x2[i], x2[order[1:]])
|
293 |
-
yy2 = np.minimum(y2[i], y2[order[1:]])
|
294 |
-
|
295 |
-
w = np.maximum(0.0, xx2 - xx1 + 1)
|
296 |
-
h = np.maximum(0.0, yy2 - yy1 + 1)
|
297 |
-
inter = w * h
|
298 |
-
ovr = inter / (areas[i] + areas[order[1:]] - inter)
|
299 |
-
|
300 |
-
inds = np.where(ovr <= thresh)[0]
|
301 |
-
order = order[inds + 1]
|
302 |
-
|
303 |
-
return keep
|
304 |
-
|
305 |
-
def get_scrfd(name, download=False, root='~/.insightface/models', **kwargs):
|
306 |
-
if not download:
|
307 |
-
assert os.path.exists(name)
|
308 |
-
return SCRFD(name)
|
309 |
-
else:
|
310 |
-
from .model_store import get_model_file
|
311 |
-
_file = get_model_file("scrfd_%s" % name, root=root)
|
312 |
-
return SCRFD(_file)
|
313 |
-
|
314 |
-
|
315 |
-
def scrfd_2p5gkps(**kwargs):
|
316 |
-
return get_scrfd("2p5gkps", download=True, **kwargs)
|
317 |
-
|
318 |
-
|
319 |
-
if __name__ == '__main__':
|
320 |
-
import glob
|
321 |
-
detector = SCRFD(model_file='./det.onnx')
|
322 |
-
detector.prepare(-1)
|
323 |
-
img_paths = ['tests/data/t1.jpg']
|
324 |
-
for img_path in img_paths:
|
325 |
-
img = cv2.imread(img_path)
|
326 |
-
|
327 |
-
for _ in range(1):
|
328 |
-
ta = datetime.datetime.now()
|
329 |
-
#bboxes, kpss = detector.detect(img, 0.5, input_size = (640, 640))
|
330 |
-
bboxes, kpss = detector.detect(img, 0.5)
|
331 |
-
tb = datetime.datetime.now()
|
332 |
-
print('all cost:', (tb-ta).total_seconds()*1000)
|
333 |
-
print(img_path, bboxes.shape)
|
334 |
-
if kpss is not None:
|
335 |
-
print(kpss.shape)
|
336 |
-
for i in range(bboxes.shape[0]):
|
337 |
-
bbox = bboxes[i]
|
338 |
-
x1,y1,x2,y2,score = bbox.astype(np.int)
|
339 |
-
cv2.rectangle(img, (x1,y1) , (x2,y2) , (255,0,0) , 2)
|
340 |
-
if kpss is not None:
|
341 |
-
kps = kpss[i]
|
342 |
-
for kp in kps:
|
343 |
-
kp = kp.astype(np.int)
|
344 |
-
cv2.circle(img, tuple(kp) , 1, (0,0,255) , 2)
|
345 |
-
filename = img_path.split('/')[-1]
|
346 |
-
print('output:', filename)
|
347 |
-
cv2.imwrite('./outputs/%s'%filename, img)
|
348 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/utils/dependencies/insightface/utils/__init__.py
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
from __future__ import absolute_import
|
2 |
-
|
3 |
-
from .storage import download, ensure_available, download_onnx
|
4 |
-
from .filesystem import get_model_dir
|
5 |
-
from .filesystem import makedirs, try_import_dali
|
6 |
-
from .constant import *
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/utils/dependencies/insightface/utils/constant.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
|
2 |
-
DEFAULT_MP_NAME = 'buffalo_l'
|
3 |
-
|
|
|
|
|
|
|
|
src/utils/dependencies/insightface/utils/download.py
DELETED
@@ -1,95 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
This code file mainly comes from https://github.com/dmlc/gluon-cv/blob/master/gluoncv/utils/download.py
|
3 |
-
"""
|
4 |
-
import os
|
5 |
-
import hashlib
|
6 |
-
import requests
|
7 |
-
from tqdm import tqdm
|
8 |
-
|
9 |
-
|
10 |
-
def check_sha1(filename, sha1_hash):
|
11 |
-
"""Check whether the sha1 hash of the file content matches the expected hash.
|
12 |
-
Parameters
|
13 |
-
----------
|
14 |
-
filename : str
|
15 |
-
Path to the file.
|
16 |
-
sha1_hash : str
|
17 |
-
Expected sha1 hash in hexadecimal digits.
|
18 |
-
Returns
|
19 |
-
-------
|
20 |
-
bool
|
21 |
-
Whether the file content matches the expected hash.
|
22 |
-
"""
|
23 |
-
sha1 = hashlib.sha1()
|
24 |
-
with open(filename, 'rb') as f:
|
25 |
-
while True:
|
26 |
-
data = f.read(1048576)
|
27 |
-
if not data:
|
28 |
-
break
|
29 |
-
sha1.update(data)
|
30 |
-
|
31 |
-
sha1_file = sha1.hexdigest()
|
32 |
-
l = min(len(sha1_file), len(sha1_hash))
|
33 |
-
return sha1.hexdigest()[0:l] == sha1_hash[0:l]
|
34 |
-
|
35 |
-
|
36 |
-
def download_file(url, path=None, overwrite=False, sha1_hash=None):
|
37 |
-
"""Download an given URL
|
38 |
-
Parameters
|
39 |
-
----------
|
40 |
-
url : str
|
41 |
-
URL to download
|
42 |
-
path : str, optional
|
43 |
-
Destination path to store downloaded file. By default stores to the
|
44 |
-
current directory with same name as in url.
|
45 |
-
overwrite : bool, optional
|
46 |
-
Whether to overwrite destination file if already exists.
|
47 |
-
sha1_hash : str, optional
|
48 |
-
Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified
|
49 |
-
but doesn't match.
|
50 |
-
Returns
|
51 |
-
-------
|
52 |
-
str
|
53 |
-
The file path of the downloaded file.
|
54 |
-
"""
|
55 |
-
if path is None:
|
56 |
-
fname = url.split('/')[-1]
|
57 |
-
else:
|
58 |
-
path = os.path.expanduser(path)
|
59 |
-
if os.path.isdir(path):
|
60 |
-
fname = os.path.join(path, url.split('/')[-1])
|
61 |
-
else:
|
62 |
-
fname = path
|
63 |
-
|
64 |
-
if overwrite or not os.path.exists(fname) or (
|
65 |
-
sha1_hash and not check_sha1(fname, sha1_hash)):
|
66 |
-
dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname)))
|
67 |
-
if not os.path.exists(dirname):
|
68 |
-
os.makedirs(dirname)
|
69 |
-
|
70 |
-
print('Downloading %s from %s...' % (fname, url))
|
71 |
-
r = requests.get(url, stream=True)
|
72 |
-
if r.status_code != 200:
|
73 |
-
raise RuntimeError("Failed downloading url %s" % url)
|
74 |
-
total_length = r.headers.get('content-length')
|
75 |
-
with open(fname, 'wb') as f:
|
76 |
-
if total_length is None: # no content length header
|
77 |
-
for chunk in r.iter_content(chunk_size=1024):
|
78 |
-
if chunk: # filter out keep-alive new chunks
|
79 |
-
f.write(chunk)
|
80 |
-
else:
|
81 |
-
total_length = int(total_length)
|
82 |
-
for chunk in tqdm(r.iter_content(chunk_size=1024),
|
83 |
-
total=int(total_length / 1024. + 0.5),
|
84 |
-
unit='KB',
|
85 |
-
unit_scale=False,
|
86 |
-
dynamic_ncols=True):
|
87 |
-
f.write(chunk)
|
88 |
-
|
89 |
-
if sha1_hash and not check_sha1(fname, sha1_hash):
|
90 |
-
raise UserWarning('File {} is downloaded but the content hash does not match. ' \
|
91 |
-
'The repo may be outdated or download may be incomplete. ' \
|
92 |
-
'If the "repo_url" is overridden, consider switching to ' \
|
93 |
-
'the default repo.'.format(fname))
|
94 |
-
|
95 |
-
return fname
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|