esults[0].boxes.data.shape[0] == 0 ,目标检测模型.pt文件怎么也识别不出物品,怎么办?
我正在django+websocket做了一个实时缺陷检测的系统
识别那块仿造的是一个系统里面的代码,明明同模型.pt文件和物品,借鉴的代码挺多的,但是我自己写的就是识别不出来,esults[0].boxes.data.shape[0] == 0
有没有兄弟帮忙看看呀/(ㄒoㄒ)/~~
我的代码
print("检测结果:", results[0].boxes.data)
一直都识别不到

CameraThread.py文件
import asyncio
import base64
import json
import threading
import time
from ultralytics import YOLO
import cv2
import websockets
# 为了测试而写死的路径
ws_url = "ws://localhost:8001/ws/detection/"
class CameraThread(threading.Thread):
def __init__(self, camera_id):
super().__init__()
self.camera_id = camera_id
self.cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 1) # 让 OpenCV 只存最新帧
self.running = True
def run(self):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self.send_video())
async def send_video(self):
while self.running:
try:
print(ws_url + self.camera_id + '/')
async with websockets.connect(ws_url + self.camera_id + '/') as websocket:
while self.running:
ret, frame = self.cap.read()
_, buffer = cv2.imencode(".jpg", frame)
if not ret:
print("图像编码失败")
continue
image_base64 = base64.b64encode(buffer).decode("utf-8")
# 发送到 WebSocket
await websocket.send(json.dumps({
"camera_id": self.camera_id,
"image": image_base64
}))
# 处理服务器的响应
response = await websocket.recv()
await asyncio.sleep(0.03) # 控制帧率
except websockets.exceptions.ConnectionClosed as e:
print(f"WebSocket 连接关闭: {e}")
await asyncio.sleep(1) # 等待1秒后重试
except Exception as e:
print(f"发生错误: {e}")
await asyncio.sleep(1) # 等待1秒后重试
def stop(self):
""" 关闭摄像头 """
self.running = False
self.cap.release() # 释放相机资源
print(f"Camera {self.camera_id} 已关闭")
DetectConsumer.py文件
from ultralytics import YOLO
import json
import asyncio
import base64
import numpy as np
import cv2
from channels.generic.websocket import AsyncWebsocketConsumer
from detection.Yaml_Tool import myYamlTool
class DetectConsumer(AsyncWebsocketConsumer):
async def connect(self):
self.camera_id = self.scope['url_route']['kwargs']['camera_id']
print(f"[INFO] 摄像头连接:{self.channel_name}")
await self.accept()
async def disconnect(self, close_code):
print(f"[INFO] 摄像头断开:{self.channel_name}")
async def receive(self, text_data):
""" 处理前端发送的数据 """
data = json.loads(text_data)
self.camera_id = data.get("camera_id")
image_base64 = data.get("image")
self.yamlTool = myYamlTool()
self.yamlData = self.yamlTool.read_yaml(r'detection/config.yaml')
# TODO 置信度
zxd = self.yamlData['threshold'] if data.get("zxd") is None else data.get("zxd")
if image_base64:
# 解码图片
image_data = base64.b64decode(image_base64)
np_arr = np.frombuffer(image_data, np.uint8)
frame = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
self.model = YOLO(model="detection/YoloModel/best.pt")
results = self.model(frame)
print("检测结果:", results[0].boxes.data)
#
if results[0].boxes.data.shape[0] == 0:
print("⚠️ 未检测到目标,返回原始图像")
for result in results[0].boxes.data:
x1, y1, x2, y2, conf, cls = result
label = f"{self.model.names[int(cls)]}: {conf:.2f}"
cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 2)
cv2.putText(frame, label, (int(x1), int(y1) - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
_, buffer = cv2.imencode(".jpg", frame)
image_base64 = base64.b64encode(buffer).decode("utf-8")
# 返回处理后的图像
await self.send(json.dumps({
"camera_id": self.camera_id,
"image": image_base64
}))
else:
await self.send(json.dumps({
"camera_id": None
}))
我仿造的系统代码
print("检测结果:", results[0].boxes.data)
识别成功

import time
import cv2
from ultralytics import YOLO
import numpy as np
from PyQt5 import QtGui
from PyQt5.QtCore import pyqtSignal, QThread
import warnings
warnings.filterwarnings('ignore')
class MyThread(QThread):
camera_signal = pyqtSignal(object) # 输出相机数据流
info_signal = pyqtSignal(str) # 输出信息状态
def __init__(self, parent=None):
super().__init__(parent)
self.drawLine = False
self.monoCamera = None
self.FrameBufferSize = None
self.isPause = False
self.model = YOLO(model="./YoloModel/best.pt")
self.latest_frame = None
self.cap = None # 初始化摄像头对象
def __del__(self):
if self.cap is not None:
self.cap.release() # 释放相机资源
def get_latest_frame(self):
return self.latest_frame
def run(self):
self.cap = cv2.VideoCapture(0) # 使用默认相机
if not self.cap.isOpened():
self.info_signal.emit("【{}】 无法打开相机".format(time.strftime("%Y-%m-%d %H:%M:%S")))
return
else:
self.info_signal.emit("【{}】 相机已成功打开".format(time.strftime("%Y-%m-%d %H:%M:%S")))
while not self.isPause:
ret, frame = self.cap.read() # 从相机捕获图像
if not ret:
self.info_signal.emit("【{}】 无法读取图像".format(time.strftime("%Y-%m-%d %H:%M:%S")))
break
# 存储最新帧
self.latest_frame = frame
# 处理图像(例如,YOLO检测)
print(frame)
results = self.model(frame) # 获取检测结果
print("检测结果:", results[0].boxes.data)
for result in results[0].boxes.data: # 使用boxes来访问检测框数据
x1, y1, x2, y2, conf, cls = result
label = f"{self.model.names[int(cls)]}: {conf:.2f}"
# 绘制矩形框
cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 2)
cv2.putText(frame, label, (int(x1), int(y1) - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
# 将图像转换为 PyQt5 可显示的格式
showImage = QtGui.QImage(frame.data, frame.shape[1], frame.shape[0],
QtGui.QImage.Format_BGR888).rgbSwapped()
self.camera_signal.emit(showImage) # 发射图像信号
self.info_signal.emit("【{}】 实时显示中......".format(time.strftime("%Y-%m-%d %H:%M:%S")))
time.sleep(0.03) # 控制帧率
self.cap.release() # 释放相机资源
self.info_signal.emit("【{}】 相机已关闭".format(time.strftime("%Y-%m-%d %H:%M:%S")))