File size: 4,613 Bytes
991ad36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
938e515
 
 
 
 
 
 
991ad36
 
938e515
 
991ad36
938e515
 
 
 
 
991ad36
938e515
 
991ad36
 
938e515
 
 
 
 
991ad36
938e515
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
# import pdb

# import config
# from pathlib import Path
# import sys

# PROJECT_ROOT = Path(__file__).absolute().parents[0].absolute()
# sys.path.insert(0, str(PROJECT_ROOT))
# import os

# import cv2
# import einops
# import numpy as np
# import random
# import time
# import json

# # from pytorch_lightning import seed_everything
# from preprocess.openpose.annotator.util import resize_image, HWC3
# from preprocess.openpose.annotator.openpose import OpenposeDetector

# import argparse
# from PIL import Image
# import torch
# import pdb

# # os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3'

# class OpenPose:
#     def __init__(self, gpu_id: int):
#         # self.gpu_id = gpu_id
#         # torch.cuda.set_device(gpu_id)
#         self.preprocessor = OpenposeDetector()

#     def __call__(self, input_image, resolution=384):
#         # torch.cuda.set_device(self.gpu_id)
#         if isinstance(input_image, Image.Image):
#             input_image = np.asarray(input_image)
#         elif type(input_image) == str:
#             input_image = np.asarray(Image.open(input_image))
#         else:
#             raise ValueError
#         with torch.no_grad():
#             input_image = HWC3(input_image)
#             input_image = resize_image(input_image, resolution)
#             H, W, C = input_image.shape
#             assert (H == 512 and W == 384), 'Incorrect input image shape'
#             pose, detected_map = self.preprocessor(input_image, hand_and_face=False)

#             candidate = pose['bodies']['candidate']
#             subset = pose['bodies']['subset'][0][:18]
#             for i in range(18):
#                 if subset[i] == -1:
#                     candidate.insert(i, [0, 0])
#                     for j in range(i, 18):
#                         if(subset[j]) != -1:
#                             subset[j] += 1
#                 elif subset[i] != i:
#                     candidate.pop(i)
#                     for j in range(i, 18):
#                         if(subset[j]) != -1:
#                             subset[j] -= 1

#             candidate = candidate[:18]

#             for i in range(18):
#                 candidate[i][0] *= 384
#                 candidate[i][1] *= 512

#             keypoints = {"pose_keypoints_2d": candidate}
#             # with open("/home/aigc/ProjectVTON/OpenPose/keypoints/keypoints.json", "w") as f:
#             #     json.dump(keypoints, f)
#             #
#             # # print(candidate)
#             # output_image = cv2.resize(cv2.cvtColor(detected_map, cv2.COLOR_BGR2RGB), (768, 1024))
#             # cv2.imwrite('/home/aigc/ProjectVTON/OpenPose/keypoints/out_pose.jpg', output_image)

#         return keypoints


# if __name__ == '__main__':

#     model = OpenPose()
#     model('./images/bad_model.jpg')


#sample 

from pathlib import Path
import sys
import numpy as np
from PIL import Image
import torch

from preprocess.openpose.annotator.util import resize_image, HWC3
from preprocess.openpose.annotator.openpose import OpenposeDetector

class OpenPose:
    def __init__(self, gpu_id: int = 0):
        self.preprocessor = OpenposeDetector()

    def __call__(self, input_image, resolution=384):
        if isinstance(input_image, Image.Image):
            input_image = np.asarray(input_image)
        elif isinstance(input_image, str):
            input_image = np.asarray(Image.open(input_image))
        else:
            raise ValueError("Invalid image input format.")

        with torch.no_grad():
            input_image = HWC3(input_image)
            input_image = resize_image(input_image, resolution)
            H, W, C = input_image.shape
            assert (H == 512 and W == 384), 'Incorrect input image shape'

            pose, detected_map = self.preprocessor(input_image, hand_and_face=False)

            candidate = pose['bodies']['candidate']
            subset = pose['bodies']['subset'][0][:18]
            for i in range(18):
                if subset[i] == -1:
                    candidate.insert(i, [0, 0])
                    for j in range(i, 18):
                        if(subset[j]) != -1:
                            subset[j] += 1
                elif subset[i] != i:
                    candidate.pop(i)
                    for j in range(i, 18):
                        if(subset[j]) != -1:
                            subset[j] -= 1

            candidate = candidate[:18]

            for i in range(18):
                candidate[i][0] *= 384
                candidate[i][1] *= 512

            keypoints = {"pose_keypoints_2d": candidate}

        return keypoints