Created
November 13, 2020 16:15
-
-
Save MarsTechHAN/845db69e4efdd893a268acb978871f48 to your computer and use it in GitHub Desktop.
Yolov5s FC Service Provicer
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# -*- coding: utf-8 -*- | |
import cgi | |
import os | |
import logging | |
from http.server import BaseHTTPRequestHandler, HTTPServer | |
import json | |
import oss2 | |
import urllib.request | |
import urllib.parse | |
import subprocess | |
import uuid | |
auth = oss2.Auth('****************', '('****************',') | |
bucket = oss2.Bucket(auth, '('****************',', '('****************',') | |
server_addr = '('****************',' | |
up_stream_request = '('****************',' | |
def _response(start_response, reponse_code, message, img_format=None): | |
status = '%d' % reponse_code | |
if isinstance(message, str): | |
response_headers = [('Content-type', 'application/json')] | |
start_response(status, response_headers) | |
return [str.encode(message)] | |
else: | |
if img_format == 'jpg': | |
response_headers = [('Content-type', 'image/jpeg')] | |
if img_format == 'png': | |
response_headers = [('Content-type', 'image/png')] | |
start_response(status, response_headers) | |
return [message] | |
def handler(environ, start_response): | |
logger = logging.getLogger() | |
context = environ['fc.context'] | |
try: | |
request_uri = environ['QUERY_STRING'] | |
except (KeyError): | |
request_uri = " " | |
service_uuid = uuid.uuid1() | |
logger.info('Start precessing, uuid: %s, request uri: %s' % (service_uuid, request_uri)) | |
request_method = environ['REQUEST_METHOD'] | |
query = urllib.parse.parse_qs(request_uri) | |
if request_method == 'GET': | |
logger.info('Process mode: GET.') | |
if 'src_img' not in query.keys() and 'tok' not in query.keys(): | |
logger.error('Invalid query received, uuid: %s, request uri: %s' % (service_uuid, request_uri)) | |
return _response(start_response, 200, '{"status": "INVALID_QUERY", "request_uri": "%s"}' % (request_uri)) | |
if 'tok' in query.keys(): | |
token = query.get('tok')[0] | |
image_url = up_stream_request.replace('{tok}', token) | |
else: | |
image_url = query.get('src_img')[0] | |
logger.info('Image url: %s' % image_url) | |
try: | |
with urllib.request.urlopen(image_url, timeout=3) as response, open('/tmp/%s_img.jpg' % service_uuid, 'wb') as out_file: | |
data = response.read() | |
out_file.write(data) | |
except Exception as e: | |
logger.error('Failed to download image, uuid: %s, image url: %s, reason: %s' % (service_uuid, image_url, str(e))) | |
if 'tok' in query.keys(): | |
return _response(start_response, 200, '{"status": "FAILED_TO_DOWNLOAD", "token": "%s"}' % token) | |
else: | |
return _response(start_response, 200, '{"status": "FAILED_TO_DOWNLOAD", "image_url": "%s"}' % image_url) | |
else: | |
logger.info('Process mode: %s.' % request_method) | |
form = cgi.FieldStorage( | |
fp=environ['wsgi.input'], | |
environ=environ, | |
keep_blank_values=True | |
) | |
if 'image' not in form: | |
logger.error('No image find in post.') | |
return _response(start_response, 200, '{"status": "NO_IMAGE_FIND_IN_POST"}') | |
fileItem = form['image'] | |
logger.info('Start reading file.') | |
try: | |
with open('/tmp/%s_img.jpg' % service_uuid, 'wb') as output_file: | |
while 1: | |
data = fileItem.file.read(1024) | |
if not data: | |
break | |
output_file.write(data) | |
except Exception as e: | |
logger.error('Fail to read image, reason: %s' % str(e)) | |
return _response(start_response, 200, '{"status": "FAIL_TO_READ_IMAGE"}') | |
try: | |
if 'type' in query.keys(): | |
if query.get('type')[0] == 'png': | |
image_ext = '.png' | |
else: | |
image_ext = '.jpg' | |
yolo_out = subprocess.check_output('cd /mnt/yolov5_resources/; ./yolov5_stl /tmp/%s_img.jpg /tmp/%s_out_img%s' % (service_uuid, service_uuid, image_ext), shell=True).decode('utf-8') | |
if 'failed' in yolo_out: | |
logger.error('Failed to run detection, output:', yolo_out) | |
if 'tok' in query.keys(): | |
return _response(start_response, 200, '{"status": "FAILED_TO_RUN_MODEL", "token": "%s"}' % token) | |
else: | |
return _response(start_response, 200, '{"status": "FAILED_TO_RUN_MODEL", "uuid": "%s"}' % service_uuid) | |
logger.info('Ncnn detection output %s' % yolo_out) | |
except Exception as e: | |
logger.error('Invoke ncnn fail, uuid: %sreaon: %s' % (service_uuid, str(e))) | |
if 'tok' in query.keys(): | |
return _response(start_response, 200, '{"status": "FAILED_TO_RUN_MODEL", "token": "%s"}' % token) | |
else: | |
return _response(start_response, 200, '{"status": "FAILED_TO_RUN_MODEL", "uuid": "%s"}' % service_uuid) | |
if 'type' in query.keys(): | |
if query.get('type')[0] == 'jpg': | |
return _response(start_response, 200, open('/tmp/%s_out_img%s' % (service_uuid, image_ext), 'rb').read(), 'jpg') | |
if query.get('type')[0] == 'png': | |
return _response(start_response, 200, open('/tmp/%s_out_img%s' % (service_uuid, image_ext), 'rb').read(), 'png') | |
bucket.put_object_from_file('%s_out_img%s' % (service_uuid, image_ext), '/tmp/%s_out_img%s' % (service_uuid, image_ext)) | |
logger.info('Output image uploaded, address: %s/%s_out_img%s' % (server_addr, service_uuid, image_ext)) | |
out_string = '{"status": "SUCCESS", \n"img_url": "%s/%s_out_img%s", \n"detection_results": %s }' % (server_addr, service_uuid, image_ext, yolo_out) | |
response = json.dumps(json.loads(out_string), sort_keys=True, indent=4) | |
logger.info('Response json:\n'+response) | |
return _response(start_response, 200, response) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// Tencent is pleased to support the open source community by making ncnn available. | |
// | |
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. | |
// | |
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except | |
// in compliance with the License. You may obtain a copy of the License at | |
// | |
// https://opensource.org/licenses/BSD-3-Clause | |
// | |
// Unless required by applicable law or agreed to in writing, software distributed | |
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR | |
// CONDITIONS OF ANY KIND, either express or implied. See the License for the | |
// specific language governing permissions and limitations under the License. | |
// | |
// set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -lmvec -lm --static -static-libgcc -static-libstdc++") | |
// set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -lmvec -lm --static -pthread -static-libgcc -static-libstdc++") | |
#include "layer.h" | |
#include "net.h" | |
#include <opencv2/core/core.hpp> | |
#include <opencv2/highgui/highgui.hpp> | |
#include <opencv2/imgproc/imgproc.hpp> | |
#include <stdio.h> | |
#include <vector> | |
class YoloV5Focus : public ncnn::Layer | |
{ | |
public: | |
YoloV5Focus() | |
{ | |
one_blob_only = true; | |
} | |
virtual int forward(const ncnn::Mat& bottom_blob, ncnn::Mat& top_blob, const ncnn::Option& opt) const | |
{ | |
int w = bottom_blob.w; | |
int h = bottom_blob.h; | |
int channels = bottom_blob.c; | |
int outw = w / 2; | |
int outh = h / 2; | |
int outc = channels * 4; | |
top_blob.create(outw, outh, outc, 4u, 1, opt.blob_allocator); | |
if (top_blob.empty()) | |
return -100; | |
#pragma omp parallel for num_threads(opt.num_threads) | |
for (int p = 0; p < outc; p++) | |
{ | |
const float* ptr = bottom_blob.channel(p % channels).row((p / channels) % 2) + ((p / channels) / 2); | |
float* outptr = top_blob.channel(p); | |
for (int i = 0; i < outh; i++) | |
{ | |
for (int j = 0; j < outw; j++) | |
{ | |
*outptr = *ptr; | |
outptr += 1; | |
ptr += 2; | |
} | |
ptr += w; | |
} | |
} | |
return 0; | |
} | |
}; | |
DEFINE_LAYER_CREATOR(YoloV5Focus) | |
struct Object | |
{ | |
cv::Rect_<float> rect; | |
int label; | |
float prob; | |
}; | |
static inline float intersection_area(const Object& a, const Object& b) | |
{ | |
cv::Rect_<float> inter = a.rect & b.rect; | |
return inter.area(); | |
} | |
static void qsort_descent_inplace(std::vector<Object>& faceobjects, int left, int right) | |
{ | |
int i = left; | |
int j = right; | |
float p = faceobjects[(left + right) / 2].prob; | |
while (i <= j) | |
{ | |
while (faceobjects[i].prob > p) | |
i++; | |
while (faceobjects[j].prob < p) | |
j--; | |
if (i <= j) | |
{ | |
// swap | |
std::swap(faceobjects[i], faceobjects[j]); | |
i++; | |
j--; | |
} | |
} | |
#pragma omp parallel sections | |
{ | |
#pragma omp section | |
{ | |
if (left < j) qsort_descent_inplace(faceobjects, left, j); | |
} | |
#pragma omp section | |
{ | |
if (i < right) qsort_descent_inplace(faceobjects, i, right); | |
} | |
} | |
} | |
static void qsort_descent_inplace(std::vector<Object>& faceobjects) | |
{ | |
if (faceobjects.empty()) | |
return; | |
qsort_descent_inplace(faceobjects, 0, faceobjects.size() - 1); | |
} | |
static void nms_sorted_bboxes(const std::vector<Object>& faceobjects, std::vector<int>& picked, float nms_threshold) | |
{ | |
picked.clear(); | |
const int n = faceobjects.size(); | |
std::vector<float> areas(n); | |
for (int i = 0; i < n; i++) | |
{ | |
areas[i] = faceobjects[i].rect.area(); | |
} | |
for (int i = 0; i < n; i++) | |
{ | |
const Object& a = faceobjects[i]; | |
int keep = 1; | |
for (int j = 0; j < (int)picked.size(); j++) | |
{ | |
const Object& b = faceobjects[picked[j]]; | |
// intersection over union | |
float inter_area = intersection_area(a, b); | |
float union_area = areas[i] + areas[picked[j]] - inter_area; | |
// float IoU = inter_area / union_area | |
if (inter_area / union_area > nms_threshold) | |
keep = 0; | |
} | |
if (keep) | |
picked.push_back(i); | |
} | |
} | |
static inline float sigmoid(float x) | |
{ | |
return static_cast<float>(1.f / (1.f + exp(-x))); | |
} | |
static void generate_proposals(const ncnn::Mat& anchors, int stride, const ncnn::Mat& in_pad, const ncnn::Mat& feat_blob, float prob_threshold, std::vector<Object>& objects) | |
{ | |
const int num_grid = feat_blob.h; | |
int num_grid_x; | |
int num_grid_y; | |
if (in_pad.w > in_pad.h) | |
{ | |
num_grid_x = in_pad.w / stride; | |
num_grid_y = num_grid / num_grid_x; | |
} | |
else | |
{ | |
num_grid_y = in_pad.h / stride; | |
num_grid_x = num_grid / num_grid_y; | |
} | |
const int num_class = feat_blob.w - 5; | |
const int num_anchors = anchors.w / 2; | |
for (int q = 0; q < num_anchors; q++) | |
{ | |
const float anchor_w = anchors[q * 2]; | |
const float anchor_h = anchors[q * 2 + 1]; | |
const ncnn::Mat feat = feat_blob.channel(q); | |
for (int i = 0; i < num_grid_y; i++) | |
{ | |
for (int j = 0; j < num_grid_x; j++) | |
{ | |
const float* featptr = feat.row(i * num_grid_x + j); | |
// find class index with max class score | |
int class_index = 0; | |
float class_score = -FLT_MAX; | |
for (int k = 0; k < num_class; k++) | |
{ | |
float score = featptr[5 + k]; | |
if (score > class_score) | |
{ | |
class_index = k; | |
class_score = score; | |
} | |
} | |
float box_score = featptr[4]; | |
float confidence = sigmoid(box_score) * sigmoid(class_score); | |
if (confidence >= prob_threshold) | |
{ | |
// yolov5/models/yolo.py Detect forward | |
// y = x[i].sigmoid() | |
// y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy | |
// y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh | |
float dx = sigmoid(featptr[0]); | |
float dy = sigmoid(featptr[1]); | |
float dw = sigmoid(featptr[2]); | |
float dh = sigmoid(featptr[3]); | |
float pb_cx = (dx * 2.f - 0.5f + j) * stride; | |
float pb_cy = (dy * 2.f - 0.5f + i) * stride; | |
float pb_w = pow(dw * 2.f, 2) * anchor_w; | |
float pb_h = pow(dh * 2.f, 2) * anchor_h; | |
float x0 = pb_cx - pb_w * 0.5f; | |
float y0 = pb_cy - pb_h * 0.5f; | |
float x1 = pb_cx + pb_w * 0.5f; | |
float y1 = pb_cy + pb_h * 0.5f; | |
Object obj; | |
obj.rect.x = x0; | |
obj.rect.y = y0; | |
obj.rect.width = x1 - x0; | |
obj.rect.height = y1 - y0; | |
obj.label = class_index; | |
obj.prob = confidence; | |
objects.push_back(obj); | |
} | |
} | |
} | |
} | |
} | |
static int detect_yolov5(const cv::Mat& bgr, std::vector<Object>& objects) | |
{ | |
ncnn::Net yolov5; | |
yolov5.opt.use_vulkan_compute = true; | |
// yolov5.opt.use_bf16_storage = true; | |
yolov5.register_custom_layer("YoloV5Focus", YoloV5Focus_layer_creator); | |
// original pretrained model from https://github.com/ultralytics/yolov5 | |
// the ncnn model https://github.com/nihui/ncnn-assets/tree/master/models | |
yolov5.load_param("yolov5s.param"); | |
yolov5.load_model("yolov5s.bin"); | |
const int target_size = 640; | |
const float prob_threshold = 0.25f; | |
const float nms_threshold = 0.45f; | |
int img_w = bgr.cols; | |
int img_h = bgr.rows; | |
// letterbox pad to multiple of 32 | |
int w = img_w; | |
int h = img_h; | |
float scale = 1.f; | |
if (w > h) | |
{ | |
scale = (float)target_size / w; | |
w = target_size; | |
h = h * scale; | |
} | |
else | |
{ | |
scale = (float)target_size / h; | |
h = target_size; | |
w = w * scale; | |
} | |
ncnn::Mat in = ncnn::Mat::from_pixels_resize(bgr.data, ncnn::Mat::PIXEL_BGR2RGB, img_w, img_h, w, h); | |
// pad to target_size rectangle | |
// yolov5/utils/datasets.py letterbox | |
int wpad = (w + 31) / 32 * 32 - w; | |
int hpad = (h + 31) / 32 * 32 - h; | |
ncnn::Mat in_pad; | |
ncnn::copy_make_border(in, in_pad, hpad / 2, hpad - hpad / 2, wpad / 2, wpad - wpad / 2, ncnn::BORDER_CONSTANT, 114.f); | |
const float norm_vals[3] = {1 / 255.f, 1 / 255.f, 1 / 255.f}; | |
in_pad.substract_mean_normalize(0, norm_vals); | |
ncnn::Extractor ex = yolov5.create_extractor(); | |
ex.input("images", in_pad); | |
std::vector<Object> proposals; | |
// anchor setting from yolov5/models/yolov5s.yaml | |
// stride 8 | |
{ | |
ncnn::Mat out; | |
ex.extract("output", out); | |
ncnn::Mat anchors(6); | |
anchors[0] = 10.f; | |
anchors[1] = 13.f; | |
anchors[2] = 16.f; | |
anchors[3] = 30.f; | |
anchors[4] = 33.f; | |
anchors[5] = 23.f; | |
std::vector<Object> objects8; | |
generate_proposals(anchors, 8, in_pad, out, prob_threshold, objects8); | |
proposals.insert(proposals.end(), objects8.begin(), objects8.end()); | |
} | |
// stride 16 | |
{ | |
ncnn::Mat out; | |
ex.extract("771", out); | |
ncnn::Mat anchors(6); | |
anchors[0] = 30.f; | |
anchors[1] = 61.f; | |
anchors[2] = 62.f; | |
anchors[3] = 45.f; | |
anchors[4] = 59.f; | |
anchors[5] = 119.f; | |
std::vector<Object> objects16; | |
generate_proposals(anchors, 16, in_pad, out, prob_threshold, objects16); | |
proposals.insert(proposals.end(), objects16.begin(), objects16.end()); | |
} | |
// stride 32 | |
{ | |
ncnn::Mat out; | |
ex.extract("791", out); | |
ncnn::Mat anchors(6); | |
anchors[0] = 116.f; | |
anchors[1] = 90.f; | |
anchors[2] = 156.f; | |
anchors[3] = 198.f; | |
anchors[4] = 373.f; | |
anchors[5] = 326.f; | |
std::vector<Object> objects32; | |
generate_proposals(anchors, 32, in_pad, out, prob_threshold, objects32); | |
proposals.insert(proposals.end(), objects32.begin(), objects32.end()); | |
} | |
// sort all proposals by score from highest to lowest | |
qsort_descent_inplace(proposals); | |
// apply nms with nms_threshold | |
std::vector<int> picked; | |
nms_sorted_bboxes(proposals, picked, nms_threshold); | |
int count = picked.size(); | |
objects.resize(count); | |
for (int i = 0; i < count; i++) | |
{ | |
objects[i] = proposals[picked[i]]; | |
// adjust offset to original unpadded | |
float x0 = (objects[i].rect.x - (wpad / 2)) / scale; | |
float y0 = (objects[i].rect.y - (hpad / 2)) / scale; | |
float x1 = (objects[i].rect.x + objects[i].rect.width - (wpad / 2)) / scale; | |
float y1 = (objects[i].rect.y + objects[i].rect.height - (hpad / 2)) / scale; | |
// clip | |
x0 = std::max(std::min(x0, (float)(img_w - 1)), 0.f); | |
y0 = std::max(std::min(y0, (float)(img_h - 1)), 0.f); | |
x1 = std::max(std::min(x1, (float)(img_w - 1)), 0.f); | |
y1 = std::max(std::min(y1, (float)(img_h - 1)), 0.f); | |
objects[i].rect.x = x0; | |
objects[i].rect.y = y0; | |
objects[i].rect.width = x1 - x0; | |
objects[i].rect.height = y1 - y0; | |
} | |
return 0; | |
} | |
static void draw_objects(const cv::Mat& bgr, const std::vector<Object>& objects, const char* img_path) | |
{ | |
static const char* class_names[] = { | |
"person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", | |
"fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", | |
"elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", | |
"skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", | |
"tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", | |
"sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", | |
"potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", | |
"microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", | |
"hair drier", "toothbrush" | |
}; | |
cv::Mat image = bgr.clone(); | |
printf("[\n"); | |
char comma[2] = ""; | |
for (size_t i = 0; i < objects.size(); i++) | |
{ | |
const Object& obj = objects[i]; | |
if(i == objects.size() - 1){ | |
comma[0] = ' '; | |
} else{ | |
comma[0] = ','; | |
} | |
printf("\t{\"class\": \"%s\", \"accu\":%.5f, \"pos\":[%.2f, %.2f], \"size\":[%.2f, %.2f]}%s\n", class_names[obj.label], obj.prob, | |
obj.rect.x, obj.rect.y, obj.rect.width, obj.rect.height, comma); | |
cv::rectangle(image, obj.rect, cv::Scalar(255, 0, 0)); | |
char text[256]; | |
sprintf(text, "%s %.1f%%", class_names[obj.label], obj.prob * 100); | |
int baseLine = 0; | |
cv::Size label_size = cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine); | |
int x = obj.rect.x; | |
int y = obj.rect.y - label_size.height - baseLine; | |
if (y < 0) | |
y = 0; | |
if (x + label_size.width > image.cols) | |
x = image.cols - label_size.width; | |
cv::rectangle(image, cv::Rect(cv::Point(x, y), cv::Size(label_size.width, label_size.height + baseLine)), | |
cv::Scalar(255, 255, 255), -1); | |
cv::putText(image, text, cv::Point(x, y + label_size.height), | |
cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 0)); | |
} | |
printf("]\n"); | |
cv::imwrite(img_path, image); | |
} | |
int main(int argc, char** argv) | |
{ | |
if (argc != 3) | |
{ | |
fprintf(stderr, "Usage: %s [input image] [output image]\n", argv[0]); | |
return -1; | |
} | |
const char* imagepath = argv[1]; | |
cv::Mat m = cv::imread(imagepath, 1); | |
if (m.empty()) | |
{ | |
fprintf(stderr, "cv::imread %s failed\n", imagepath); | |
return -1; | |
} | |
std::vector<Object> objects; | |
detect_yolov5(m, objects); | |
draw_objects(m, objects, argv[2]); | |
return 0; | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment