我想知道如何在我这段代码里面加入区域检测功能,就是能只检测视频流每帧的一部分,然后再输出。
def show_video_frame2(self):
name_list2 = []
flag2, img2 = self.cap2.read()
if img2 is not None:
showimg2 = img2
with torch.no_grad():
img2 = letterbox(img2, new_shape=self.opt.img_size)[0]
# Convert
# BGR to RGB, to 3x416x416
img2 = img2[:, :, ::-1].transpose(2, 0, 1)
img2 = np.ascontiguousarray(img2)
img2 = torch.from_numpy(img2).to(self.device)
img2 = img2.half() if self.half else img2.float() # uint8 to fp16/32
img2 /= 255.0 # 0 - 255 to 0.0 - 1.0
if img2.ndimension() == 3:
img2 = img2.unsqueeze(0)
# Inference
pred2 = self.model(img2, augment=self.opt.augment)[0]
# Apply NMS
pred2 = non_max_suppression(pred2, self.opt.conf_thres, self.opt.iou_thres, classes=self.opt.classes,
agnostic=self.opt.agnostic_nms)
# Process detections
for i2, det2 in enumerate(pred2): # detections per image
if det2 is not None and len(det2):
# Rescale boxes from img_size to im0 size
det2[:, :4] = scale_boxes(
img2.shape[2:], det2[:, :4], showimg2.shape).round()
# Write results
for *xyxy2, conf2, cls2 in reversed(det2):
label2 = '%s %.2f' % (self.names[int(cls2)], conf2)
name_list2.append(self.names[int(cls2)])
print(label2)
plot_one_box(
xyxy2, showimg2, label=label2, color=self.colors[int(cls2)], line_thickness=2)
self.out2.write(showimg2)
show2 = cv2.resize(showimg2, (640, 480))
self.result2 = cv2.cvtColor(show2, cv2.COLOR_BGR2RGB)
showImage2 = QtGui.QImage(self.result2.data, self.result2.shape[1], self.result2.shape[0],
QtGui.QImage.Format_RGB888)
self.label_2.setPixmap(QtGui.QPixmap.fromImage(showImage2))