Skip to content

Commit 4a2ba2f

Browse files
authored
Merge pull request #348 from ShenYuhan/serving_mask
update serving mask
2 parents 2c8ed66 + f8ac2dc commit 4a2ba2f

File tree

11 files changed

+139
-21
lines changed

11 files changed

+139
-21
lines changed

demo/serving/module_serving/GAN_stgan_celeba/stgan_celeba_serving_demo.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66

77
if __name__ == "__main__":
88
# 指定要使用的图片文件并生成列表[("image", img_1), ("image", img_2), ... ]
9-
file_list = ["../img/man.png"]
9+
file_list = ["../../../../docs/imgs/man.png"]
1010
files = [("image", (open(item, "rb"))) for item in file_list]
1111
# 为每张图片对应指定info和style
1212
data = {"info": ["Male,Black_Hair"], "style": ["Bald"]}
Loading

demo/serving/module_serving/classification_vgg11_imagenet/vgg11_imagenet_serving_demo.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,9 @@
44

55
if __name__ == "__main__":
66
# 指定要预测的图片并生成列表[("image", img_1), ("image", img_2), ... ]
7-
file_list = ["../img/cat.jpg", "../img/flower.jpg"]
7+
file_list = [
8+
"../../../../docs/imgs/cat.jpg", "../../../../docs/imgs/flower.jpg"
9+
]
810
files = [("image", (open(item, "rb"))) for item in file_list]
911
# 指定预测方法为vgg11_imagenet并发送post请求
1012
url = "http://127.0.0.1:8866/predict/image/vgg11_imagenet"

demo/serving/module_serving/object_detection_yolov3_darknet53_coco2017/yolov3_darknet53_coco2017_serving_demo.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,9 @@
66

77
if __name__ == "__main__":
88
# 指定要检测的图片并生成列表[("image", img_1), ("image", img_2), ... ]
9-
file_list = ["../img/cat.jpg", "../img/dog.jpg"]
9+
file_list = [
10+
"../../../../docs/imgs/cat.jpg", "../../../../docs/imgs/dog.jpg"
11+
]
1012
files = [("image", (open(item, "rb"))) for item in file_list]
1113
# 指定检测方法为yolov3_coco2017并发送post请求
1214
url = "http://127.0.0.1:8866/predict/image/yolov3_darknet53_coco2017"

demo/serving/module_serving/semantic_segmentation_deeplabv3p_xception65_humanseg/deeplabv3p_xception65_humanseg_serving_demo.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66

77
if __name__ == "__main__":
88
# 指定要使用的图片文件并生成列表[("image", img_1), ("image", img_2), ... ]
9-
file_list = ["../img/girl.jpg"]
9+
file_list = ["../../../../docs/imgs/girl.jpg"]
1010
files = [("image", (open(item, "rb"))) for item in file_list]
1111
# 指定图片分割方法为deeplabv3p_xception65_humanseg并发送post请求
1212
url = "http://127.0.0.1:8866/predict/image/deeplabv3p_xception65_humanseg"

docs/tutorial/bert_service.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99

1010
<div align="center">
1111

12-
<img src="../docs/imgs/bs.png" aligh="center" width="100%" alt="BS流程图" />
12+
<img src="../imgs/bs.png" aligh="center" width="100%" alt="BS流程图" />
1313

1414
</div>
1515

@@ -203,7 +203,7 @@ result = bc.get_result(input_text=input_text)
203203
```python
204204
[[0.9993321895599361, 0.9994612336158751, 0.9999646544456481, 0.732795298099517, -0.34387934207916204, ... ]]
205205
```
206-
客户端代码demo文件见[示例](../demo/serving/bert_service/bert_service_client.py)
206+
客户端代码demo文件见[示例](../../demo/serving/bert_service/bert_service_client.py)
207207
运行命令如下:
208208
```shell
209209
$ python bert_service_client.py

docs/tutorial/serving.md

+11-11
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ http://0.0.0.0:8866/predict/<CATEGORY\>/\<MODULE>
8181

8282
<p align="center">
8383

84-
<img src="../docs/imgs/web_demo.png" width="60%" />
84+
<img src="../imgs/web_demo.png" width="60%" />
8585

8686
</p>
8787

@@ -117,7 +117,7 @@ $ hub serving start -c serving_config.json
117117

118118
<p align="center">
119119

120-
<img src="../docs/imgs/start_serving_lac.png" width="100%" />
120+
<img src="../imgs/start_serving_lac.png" width="100%" />
121121

122122
</p>
123123

@@ -171,41 +171,41 @@ if __name__ == "__main__":
171171
}
172172
```
173173

174-
此Demo的具体信息和代码请参见[LAC Serving](../demo/serving/module_serving/lexical_analysis_lac)。另外,下面展示了一些其他的一键服务部署Demo。
174+
此Demo的具体信息和代码请参见[LAC Serving](../../demo/serving/module_serving/lexical_analysis_lac)。另外,下面展示了一些其他的一键服务部署Demo。
175175

176176
## Demo——其他模型的一键部署服务
177177

178178
获取其他PaddleHub Serving的一键服务部署场景示例,可参见下列demo
179179

180-
* [图像分类-基于vgg11_imagent](../demo/serving/module_serving/classification_vgg11_imagenet)
180+
* [图像分类-基于vgg11_imagent](../../demo/serving/module_serving/classification_vgg11_imagenet)
181181

182182
&emsp;&emsp;该示例展示了利用vgg11_imagent完成图像分类服务化部署和在线预测,获取图像分类结果。
183183

184-
* [图像生成-基于stgan_celeba](../demo/serving/module_serving/GAN_stgan_celeba)
184+
* [图像生成-基于stgan_celeba](../../demo/serving/module_serving/GAN_stgan_celeba)
185185

186186
&emsp;&emsp;该示例展示了利用stgan_celeba生成图像服务化部署和在线预测,获取指定风格的生成图像。
187187

188-
* [文本审核-基于porn_detection_lstm](../demo/serving/module_serving/text_censorship_porn_detection_lstm)
188+
* [文本审核-基于porn_detection_lstm](../../demo/serving/module_serving/text_censorship_porn_detection_lstm)
189189

190190
&emsp;&emsp;该示例展示了利用porn_detection_lstm完成中文文本黄色敏感信息鉴定的服务化部署和在线预测,获取文本是否敏感及其置信度。
191191

192-
* [中文词法分析-基于lac](../demo/serving/module_serving/lexical_analysis_lac)
192+
* [中文词法分析-基于lac](../../demo/serving/module_serving/lexical_analysis_lac)
193193

194194
&emsp;&emsp;该示例展示了利用lac完成中文文本分词服务化部署和在线预测,获取文本的分词结果,并可通过用户自定义词典干预分词结果。
195195

196-
* [目标检测-基于yolov3_darknet53_coco2017](../demo/serving/module_serving/object_detection_yolov3_darknet53_coco2017)
196+
* [目标检测-基于yolov3_darknet53_coco2017](../../demo/serving/module_serving/object_detection_yolov3_darknet53_coco2017)
197197

198198
&emsp;&emsp;该示例展示了利用yolov3_darknet53_coco2017完成目标检测服务化部署和在线预测,获取检测结果和覆盖识别框的图片。
199199

200-
* [中文语义分析-基于simnet_bow](../demo/serving/module_serving/semantic_model_simnet_bow)
200+
* [中文语义分析-基于simnet_bow](../../demo/serving/module_serving/semantic_model_simnet_bow)
201201

202202
&emsp;&emsp;该示例展示了利用simnet_bow完成中文文本相似度检测服务化部署和在线预测,获取文本的相似程度。
203203

204-
* [图像分割-基于deeplabv3p_xception65_humanseg](../demo/serving/module_serving/semantic_segmentation_deeplabv3p_xception65_humanseg)
204+
* [图像分割-基于deeplabv3p_xception65_humanseg](../../demo/serving/module_serving/semantic_segmentation_deeplabv3p_xception65_humanseg)
205205

206206
&emsp;&emsp;该示例展示了利用deeplabv3p_xception65_humanseg完成图像分割服务化部署和在线预测,获取识别结果和分割后的图像。
207207

208-
* [中文情感分析-基于simnet_bow](../demo/serving/module_serving/semantic_model_simnet_bow)
208+
* [中文情感分析-基于simnet_bow](../../demo/serving/module_serving/semantic_model_simnet_bow)
209209

210210
&emsp;&emsp;该示例展示了利用senta_lstm完成中文文本情感分析服务化部署和在线预测,获取文本的情感分析结果。
211211

paddlehub/commands/serving.py

+9-2
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,8 @@ def __init__(self, name):
103103
self.parser.add_argument("--config", "-c", nargs="?")
104104
self.parser.add_argument("--port", "-p", nargs="?", default=8866)
105105
self.parser.add_argument("--gpu", "-i", nargs="?", default=0)
106+
self.parser.add_argument(
107+
"--use_singleprocess", action="store_true", default=False)
106108

107109
def dump_pid_file(self):
108110
pid = os.getpid()
@@ -336,12 +338,15 @@ def start_multi_app_with_args(self):
336338

337339
def start_serving(self):
338340
config_file = self.args.config
341+
single_mode = self.args.use_singleprocess
339342
if config_file is not None:
340343
if os.path.exists(config_file):
341344
with open(config_file, "r") as fp:
342345
configs = json.load(fp)
343346
use_multiprocess = configs.get("use_multiprocess", False)
344-
if platform.system() == "Windows":
347+
if single_mode is True:
348+
ServingCommand.start_single_app_with_file(configs)
349+
elif platform.system() == "Windows":
345350
print(
346351
"Warning: Windows cannot use multiprocess working "
347352
"mode, PaddleHub Serving will switch to single process mode"
@@ -357,7 +362,9 @@ def start_serving(self):
357362
else:
358363
print("config_file ", config_file, "not exists.")
359364
else:
360-
if platform.system() == "Windows":
365+
if single_mode is True:
366+
self.start_single_app_with_args()
367+
elif platform.system() == "Windows":
361368
print(
362369
"Warning: Windows cannot use multiprocess working "
363370
"mode, PaddleHub Serving will switch to single process mode"

paddlehub/common/utils.py

+39
Original file line numberDiff line numberDiff line change
@@ -22,9 +22,12 @@
2222
import multiprocessing
2323
import hashlib
2424
import platform
25+
import base64
2526

2627
import paddle.fluid as fluid
2728
import six
29+
import numpy as np
30+
import cv2
2831

2932
from paddlehub.module import module_desc_pb2
3033
from paddlehub.common.logger import logger
@@ -51,6 +54,42 @@ def version_compare(version1, version2):
5154
return len(version1) > len(version2)
5255

5356

57+
def base64s_to_cvmats(base64s):
58+
for index, value in enumerate(base64s):
59+
value = bytes(value, encoding="utf8")
60+
value = base64.b64decode(value)
61+
value = np.fromstring(value, np.uint8)
62+
value = cv2.imdecode(value, 1)
63+
64+
base64s[index] = value
65+
return base64s
66+
67+
68+
def handle_mask_results(results):
69+
result = []
70+
if len(results) <= 0:
71+
return results
72+
_id = results[0]["id"]
73+
_item = {
74+
"data": [],
75+
"path": results[0].get("path", ""),
76+
"id": results[0]["id"]
77+
}
78+
for item in results:
79+
if item["id"] == _id:
80+
_item["data"].append(item["data"])
81+
else:
82+
result.append(_item)
83+
_id = _id + 1
84+
_item = {
85+
"data": [item["data"]],
86+
"path": item.get("path", ""),
87+
"id": item.get("id", _id)
88+
}
89+
result.append(_item)
90+
return result
91+
92+
5493
def get_platform():
5594
return platform.platform()
5695

paddlehub/serving/app_single.py

+70-2
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
import os
1919
import base64
2020
import logging
21+
import shutil
2122

2223
cv_module_method = {
2324
"vgg19_imagenet": "predict_classification",
@@ -47,7 +48,9 @@
4748
"faster_rcnn_coco2017": "predict_object_detection",
4849
"cyclegan_cityscapes": "predict_gan",
4950
"deeplabv3p_xception65_humanseg": "predict_semantic_segmentation",
50-
"ace2p": "predict_semantic_segmentation"
51+
"ace2p": "predict_semantic_segmentation",
52+
"pyramidbox_lite_server_mask": "predict_mask",
53+
"pyramidbox_lite_mobile_mask": "predict_mask"
5154
}
5255

5356

@@ -132,6 +135,59 @@ def predict_gan(module, input_img, id, batch_size, extra={}):
132135
return results_pack
133136

134137

138+
def predict_mask(module, input_img, id, batch_size, extra=None, r_img=False):
139+
output_folder = "detection_result"
140+
global use_gpu
141+
method_name = module.desc.attr.map.data['default_signature'].s
142+
predict_method = getattr(module, method_name)
143+
try:
144+
data = {}
145+
if input_img is not None:
146+
input_img = {"image": input_img}
147+
data.update(input_img)
148+
if extra is not None:
149+
data.update(extra)
150+
r_img = True if "r_img" in extra.keys() else False
151+
results = predict_method(
152+
data=data, use_gpu=use_gpu, batch_size=batch_size)
153+
results = utils.handle_mask_results(results)
154+
except Exception as err:
155+
curr = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
156+
print(curr, " - ", err)
157+
return {"result": "Please check data format!"}
158+
finally:
159+
base64_list = []
160+
results_pack = []
161+
if input_img is not None:
162+
if r_img is False:
163+
shutil.rmtree(output_folder)
164+
for index in range(len(results)):
165+
results[index]["path"] = ""
166+
results_pack = results
167+
else:
168+
input_img = input_img.get("image", [])
169+
for index in range(len(input_img)):
170+
item = input_img[index]
171+
with open(os.path.join(output_folder, item), "rb") as fp:
172+
b_head = "data:image/" + item.split(".")[-1] + ";base64"
173+
b_body = base64.b64encode(fp.read())
174+
b_body = str(b_body).replace("b'", "").replace("'", "")
175+
b_img = b_head + "," + b_body
176+
base64_list.append(b_img)
177+
results[index]["path"] = results[index]["path"].replace(
178+
id + "_", "") if results[index]["path"] != "" \
179+
else ""
180+
181+
results[index].update({"base64": b_img})
182+
results_pack.append(results[index])
183+
os.remove(item)
184+
os.remove(os.path.join(output_folder, item))
185+
else:
186+
results_pack = results
187+
188+
return results_pack
189+
190+
135191
def predict_object_detection(module, input_img, id, batch_size, extra={}):
136192
output_folder = "detection_result"
137193
global use_gpu
@@ -253,14 +309,22 @@ def predict_image(module_name):
253309
extra_info = {}
254310
for item in list(request.form.keys()):
255311
extra_info.update({item: request.form.getlist(item)})
312+
313+
for key in extra_info.keys():
314+
if isinstance(extra_info[key], list):
315+
extra_info[key] = utils.base64s_to_cvmats(
316+
eval(extra_info[key][0])["b64s"]) if isinstance(
317+
extra_info[key][0], str
318+
) and "b64s" in extra_info[key][0] else extra_info[key]
319+
256320
file_name_list = []
257321
if img_base64 != []:
258322
for item in img_base64:
259323
ext = item.split(";")[0].split("/")[-1]
260324
if ext not in ["jpeg", "jpg", "png"]:
261325
return {"result": "Unrecognized file type"}
262326
filename = req_id + "_" \
263-
+ utils.md5(str(time.time())+item[0:20]) \
327+
+ utils.md5(str(time.time()) + item[0:20]) \
264328
+ "." \
265329
+ ext
266330
img_data = base64.b64decode(item.split(',')[-1])
@@ -281,6 +345,10 @@ def predict_image(module_name):
281345
module_type = module.type.split("/")[-1].replace("-", "_").lower()
282346
predict_func = eval("predict_" + module_type)
283347
batch_size = batch_size_dict.get(module_name, 1)
348+
if file_name_list == []:
349+
file_name_list = None
350+
if extra_info == {}:
351+
extra_info = None
284352
results = predict_func(module, file_name_list, req_id, batch_size,
285353
extra_info)
286354
r = {"results": str(results)}

0 commit comments

Comments
 (0)