python VideoCapture使用时,报错

python 3.4.3
VideoCapture VideoCapture-0.9.5-cp34
Pillow 3.0

import os
from VideoCapture import Device
import time

i = 0
cam = Device(devnum=0, showVideoWindow=0)
while i < 10:
cam_time = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))
cam_name = 'camera' + cam_time + '.jpg'
cam.saveSnapshot(cam_name, 3, 1, 'bl')
i += 1

File "D:\Python34\lib\site-packages\", line 234, in saveSnapshot
self.getImage(timestamp, boldfont, textpos).save(filename, **keywords)
File "D:\Python34\lib\site-packages\", line 154, in getImage
'RGB', (width, height), buffer, 'raw', 'BGR', 0, -1)
File "D:\Python34\lib\site-packages\PIL\", line 2053, in fromstring
"Please call frombytes() instead.")
Exception: fromstring() has been removed. Please call frombytes() instead.





File "D:\Python34\lib\site-packages\", line 154, in getImage


Csdn user default icon
# OpenCV的库,如下是代码 ``` import os import sys import cv2 import numpy as np def normalize(X, low, high, dtype=None): """Normalizes a given array in X to a value between low and high.""" X = np.asarray(X) minX, maxX = np.min(X), np.max(X) # normalize to [0...1]. X = X - float(minX) X = X / float((maxX - minX)) # scale to [low...high]. X = X * (high-low) X = X + low if dtype is None: return np.asarray(X) return np.asarray(X, dtype=dtype) def read_images(path, sz=None): """Reads the images in a given folder, resizes images on the fly if size is given. Args: path: Path to a folder with subfolders representing the subjects (persons). sz: A tuple with the size Resizes Returns: A list [X,y] X: The images, which is a Python list of numpy arrays. y: The corresponding labels (the unique number of the subject, person) in a Python list. """ c = 0 X,y = [], [] for dirname, dirnames, filenames in os.walk(path): for subdirname in dirnames: subject_path = os.path.join(dirname, subdirname) for filename in os.listdir(subject_path): try: if (filename == ".directory"): continue filepath = os.path.join(subject_path, filename) im = cv2.imread(os.path.join(subject_path, filename), cv2.IMREAD_GRAYSCALE) if (im is None): print ("image " + filepath + " is none") else: print (filepath) # resize to given size (if given) if (sz is not None): im = cv2.resize(im, (200, 200)) X.append(np.asarray(im, dtype=np.uint8)) y.append(c) except IOError as (errno, strerror): print ("I/O error({0}): {1}".format(errno, strerror)) except: print ("Unexpected error:", sys.exc_info()[0]) raise print (c) c = c+1 print (y) return [X,y] def face_rec(): names = ['Gaving', 'Mengxing', 'Cookie'] if len(sys.argv) < 2: print ("USAGE: </path/to/images> [</path/to/store/images/at>]") sys.exit() [X,y] = read_images(sys.argv[1]) y = np.asarray(y, dtype=np.int32) if len(sys.argv) == 3: out_dir = sys.argv[2] model = cv2.face.createEigenFaceRecognizer() model.train(np.asarray(X), np.asarray(y)) camera = cv2.VideoCapture(0) face_cascade = cv2.CascadeClassifier('E:\\OPCV\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_default.xml') while (True): read, img = faces = face_cascade.detectMultiScale(img, 1.3, 5) for (x, y, w, h) in faces: img = cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) roi = gray[x:x+w, y:y+h] try: roi = cv2.resize(roi, (200, 200), interpolation=cv2.INTER_LINEAR) print (roi.shape) params = model.predict(roi) print ("Label: %s, Confidence: %.2f" % (params[0], params[1])) cv2.putText(img, names[params[0]], (x, y - 20), cv2.FONT_HERSHEY_SIMPLEX, 1, 255, 2) if (params[0] == 0): cv2.imwrite('face_rec.jpg', img) except: continue cv2.imshow("camera", img) if cv2.waitKey(1000 / 12) & 0xff == ord("q"): break cv2.destroyAllWindows() if __name__ == "__main__": face_rec() def original(): # This is where we write the images, if an output_dir is given # in command line: out_dir = None names = ['Gaving', 'Mengxing', 'Cookie'] # You'll need at least a path to your image data, please see # the tutorial coming with this source code on how to prepare # your image data: if len(sys.argv) < 2: print ("USAGE: </path/to/images> [</path/to/store/images/at>]") sys.exit() # Now read in the image data. This must be a valid path! [X,y] = read_images(sys.argv[1]) # Convert labels to 32bit integers. This is a workaround for 64bit machines, # because the labels will truncated else. This will be fixed in code as # soon as possible, so Python users don't need to know about this. # Thanks to Leo Dirac for reporting: y = np.asarray(y, dtype=np.int32) # If a out_dir is given, set it: if len(sys.argv) == 3: out_dir = sys.argv[2] # Create the Eigenfaces model. We are going to use the default # parameters for this simple example, please read the documentation # for thresholding: #model = cv2.face.createLBPHFaceRecognizer() model = cv2.face.createEigenFaceRecognizer() # Read # Learn the model. Remember our function returns Python lists, # so we use np.asarray to turn them into NumPy lists to make # the OpenCV wrapper happy: model.train(np.asarray(X), np.asarray(y)) # We now get a prediction from the model! In reality you # should always use unseen images for testing your model. # But so many people were confused, when I sliced an image # off in the C++ version, so I am just using an image we # have trained with. # # model.predict is going to return the predicted label and # the associated confidence: camera = cv2.VideoCapture(0) face_cascade = cv2.CascadeClassifier('./cascades/haarcascade_frontalface_default.xml') while (True): read, img = faces = face_cascade.detectMultiScale(img, 1.3, 5) for (x, y, w, h) in faces: img = cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) roi = gray[x:x+w, y:y+h] roi = cv2.resize(roi, (200, 200), interpolation=cv2.INTER_LINEAR) print (roi.shape) params = model.predict(roi) print ("Label: %s, Confidence: %.2f" % (params[0], params[1])) cv2.putText(img, names[params[0]], (x,y - 20), cv2.FONT_HERSHEY_SIMPLEX, 1, 255, 3) cv2.imshow("camera", img) if cv2.waitKey(1000 / 12) & 0xff == ord("q"): break [p_label, p_confidence] = model.predict(np.asarray(X[0])) # Print it: print ("Predicted label = %d (confidence=%.2f)" % (p_label, p_confidence)) # Cool! Finally we'll plot the Eigenfaces, because that's # what most people read in the papers are keen to see. # # Just like in C++ you have access to all model internal # data, because the cv::FaceRecognizer is a cv::Algorithm. # # You can see the available parameters with getParams(): print (model.getParams()) # Now let's get some data: mean = model.getMat("mean") eigenvectors = model.getMat("eigenvectors") # We'll save the mean, by first normalizing it: mean_norm = normalize(mean, 0, 255, dtype=np.uint8) mean_resized = mean_norm.reshape(X[0].shape) if out_dir is None: cv2.imshow("mean", mean_resized) else: cv2.imwrite("%s/mean.png" % (out_dir), mean_resized) # Turn the first (at most) 16 eigenvectors into grayscale # images. You could also use cv::normalize here, but sticking # to NumPy is much easier for now. # Note: eigenvectors are stored by column: for i in xrange(min(len(X), 16)): eigenvector_i = eigenvectors[:,i].reshape(X[0].shape) eigenvector_i_norm = normalize(eigenvector_i, 0, 255, dtype=np.uint8) # Show or save the images: if out_dir is None: cv2.imshow("%s/eigenface_%d" % (out_dir,i), eigenvector_i_norm) else: cv2.imwrite("%s/eigenface_%d.png" % (out_dir,i), eigenvector_i_norm) # Show the images: if out_dir is None: cv2.waitKey(0) cv2.destroyAllWindows() ``` [图片说明]( 一直报错,第一次发帖求助,希望大佬不吝赐教!
videocapture 抓拍图片报警missing picture in access unit 是什么原因呀
这是报警信息![图片说明]( 这是抓图代码 ``` int CaptureJpg(const string& curl) { if (curl.empty()) { TextLog::Singleton().Write("log.log", "url empty"); return -1; } string ip; GetIP(curl, ip);//从Rtsp流地址中获取ip if (!ping(ip))//检测网络 { TextLog::Singleton().Write("log.log", "Camera connection failed"); return -1; } cout << curl << endl; VideoCapture cap(curl); if (!cap.isOpened()) { return -1; } Mat frame; string fname; time_t milsecond; time(&milsecond); fname = to_string(milsecond) + ".jpg"; if ( { //imshow("Video", image); if (frame.empty()) { TextLog::Singleton().Write("log.log", "frame empty"); return -1; } TextLog::Singleton().Write("log.log", fname); vector<int> compression_params; compression_params.push_back(CV_IMWRITE_JPEG_QUALITY); //选择jpeg compression_params.push_back(30); //在这个填入你要的图片质量 imwrite(fname.c_str(), frame, compression_params); DataCenter::Singlon().SetCameraljpg(fname); frame.release(); } cap.release(); return 0; } ```
yolo video python cv2图像转PIL fromarray报错
``` > > Traceback (most recent call last): > File "", line 66, in <module> > detect() > File "", line 60, in detect > detect_video(YOLO(**vars(FLAGS)), FLAGS.output) > File "/home/dwy5/wy/classface_yolo3/", line 239, in detect_video > image = Image.fromarray(frame) > File "/root/anaconda3/lib/python3.6/site-packages/PIL/", line 2508, in fromarray > arr = obj.__array_interface__ > AttributeError: 'NoneType' object has no attribute '__array_interface__' > > ``` 经常异常终止报错 ``` vid = cv2.VideoCapture(0) ··· ··· return_value, frame = image = Image.fromarray(frame) image = yolo.detect_image(image) ```
错误信息如下: Traceback (most recent call last): File "./tools/", line 36, in <module> cv2.resizeWindow("enhanced", 640, 360); cv2.error: OpenCV(4.1.0) /io/opencv/modules/highgui/src/window_QT.cpp:592: error: (-27:Null pointer) NULL guiReceiver (please create a window) in function 'cvResizeWindow' 代码: ``` # -*- coding: utf-8 -*- import PIL import cv2 if __name__ == '__main__': writeVideo_flag = True video_src = "rtsp://admin:Admin123@" video_capture = cv2.VideoCapture(video_src) source_file = '/approot1/ioth/ai/tf-faster-rcnn-master' print(video_capture.isOpened()) if writeVideo_flag: # 将检测的视频结果输出到output.avi,detection.txt # Define the codec and create VideoWriter object w = int(video_capture.get(3)) print(w) h = int(video_capture.get(4)) print(h) fourcc = cv2.VideoWriter_fourcc(*'MJPG') out = cv2.VideoWriter(source_file + '/img/output.avi', fourcc, 15, (w, h)) list_file = open(source_file + '/img/detection.txt', 'w') frame_index = -1 fps = 0.0 fpscount = 0 #while True: ret, frame = # frame shape 640*480*3 print(frame) while True: if ret == True: #窗口可以随意调整大小 #这行报错 cv2.resizeWindow("detect Inout", 640, 360); fpscount += 1 else: break; if fpscount % 1 == 0: image = PIL.Image.fromarray(frame) ```
用python实现opencv报错an integer is required
以下是代码 ``` #!/usr/bin/env python # -*- coding: utf-8 -*- import cv2 import numpy import time '''     视频管理 ''' class CaptureManager(object): def __init__(self,capture,previewWindowManager = None,shouldMirrorPreview = False): self.previewWindowManager = previewWindowManager self.shouldMirrorPreview = shouldMirrorPreview #定义非公有变量,单下划线开始,为保护变量,只有类对象或子类对象可以访问 protected #如果以双下划线开始,为私有成员变量,只有类对象自已可以访问,像private self._capture = capture self._channel = 0 self._enteredFrame = False self._frame = None self._imageFilename = None self._videoFilename = None self._videoEncoding = None self._videoWriter = None self._startTime = None self._framesElapsed = long(0) self._fpsEstimate = None @property def channel(self): return self._channel @channel.setter def channel(self,value): if self._channel != value: self._channel = value self.frame = None @property def frame(self): if self._enteredFrame and self._frame is None: _,self._frame = self._capture.retrieve() return self._frame @property def isWritingImage(self): return self._imageFilename is not None @property def isWritingVideo(self): return self._videoFilename is not None #只能同步一帧 def enterFrame(self): """Capture the next frame,if any.""" if self._capture is not None: #but first,check that any previous frame was exited. #assert not self._enteredFrame,'previous enterFrame() had no matching exitFrame()' if self._capture is not None: self._enteredFrame = self._capture.grab() else: None def exitFrame(self): """可以从当前通道中取得图像,估计帧率,显示图像,执行暂停的请求,向文件中写入图像""" #计算帧率 if self.frame is None: self._enteredFrame = False return #Update the FPS estimate and related variables.通过窗体显示图像 if self._framesElapsed == 0: self._startTime = time.time() else: timeElapsed = time.time() - self._startTime self._fpsEstimate = self._framesElapsed/timeElapsed self._framesElapsed += 1 #Draw to the Window,if any.保存图像文件 if self.previewWindowManager is not None: if self.shouldMirrorPreview: mirroredFrame = numpy.fliplr(self._frame).copy() else: #Write to the image file,if any.保存图像文件 if self.isWritingImage: cv2.imwrite(self._imageFilename,self._frame) self._imageFilename = None #Write to the video file,if any.保存视频文件 self._writeVideoFrame() #Release the frame.释放资源 self._frame = None self.enteredFrame = False def writeImage(self,filename): """Write the next exited frame to an image file."""#保存图片,公有函数 self._imageFilename = filename def startWritingVideo(self,filename,encoding = cv2.VideoWriter_fourcc('I','4','2','0')): """Start writing exited frames to a video file."""#开始保存视频,公有函数 self._videoFilename = filename self._videoEnchding = None self._videoWriter = None def _writeVideoFrame(self):#停止视频写入,公有函数 if not self.isWritingVideo: return if self._videoWriter is None: fps = self._capture.get(cv2.CAP_PROP_FPS) if fps == 0.0: #The capture's FPS is unknown so use an entimate. if self._framesElapsed < 20: #Wait until more frames frames elapse so that the estimate is more stable. return else: fps = self._fpsEstimate size = (int(self._capture.get(cv2.CAP_PROP_FRAME_WIDTH)),\ int(self._capture.get(cv2.CAP_PROP_FRAME_HEIGHT))) self._videoWriter = cv2.VideoWriter(self._videoFilename,self._videoEncoding,fps,size) self._videoWriter.write(self._frame) '''     窗口管理,支持键盘事件 ''' class WindowManager(object): def __init__(self,windowName,keypressCallback = None): #窗体名称#按键回调函数 self.keypressCallback = keypressCallback self._windowName = windowName self._isWindowCreated = False @property def isWindowCreated(self):#检查窗体是否被创建 return self._isWindowCreated def creatWindow(self):#创建窗体 cv2.namedWindow(self._windowName) self._isWindowCreated = True def show(self,frame):#显示图像 cv2.imshow(self._windowName,frame) def destroyWindow(self):#关闭窗体释放资源 cv2.destroyWindow(self._windowName) self._isWindowCreated = False def processEvents(self): keycode = cv2.waitKey(1) if self.keypressCallback is not None and keycode != -1: #Discard any non-ASCII info encoded by GTY. keycode &= 0xFF self.keypressCallback(keycode) ``` ``` #!/usr/bin/env python # -*- coding: utf-8 -*- import cv2 from managers import WindowManager,CaptureManager class Cameo(object): def __init__(self): self._windowManager = WindowManager('Cameo',self.onKeypress) self._captureManager = CaptureManager(cv2.VideoCapture(0),self._windowManager,True) def run(self): """Run the main loop.""" self._windowManager.creatWindow() while self._windowManager.isWindowCreated: self._captureManager.enterFrame() frame = self._captureManager.frame #TODO:Filter the frame (Chapter 3). self._captureManager.exitFrame() self._windowManager.processEvents() def onKeypress(self,keycode): """Handle a keycode. space -> 载图 tab -> 启动和停止视频录制 escape -> 退出应用 """ if keycode == 32: #space self._captureManager.writeImage('screenshot.png') elif keycode == 9:#tab if not self._captureManager.isWritingVideo: self._captureManager.startWritingVideo("screencast.avi") else: self._captureManager.stopWritingVideo() elif keycode == 27:#escape self._windowManager.destroyWindow() if __name__ == "__main__": Cameo().run() ```
opencv videocapture以rtsp读取网络摄像头时的问题
opencv videocapture以rtsp读取网络摄像头时,如果没网,程序就许停在那,什么都不干,既不会中断,也不会报错,也不往下执行,试过判断读取是否为空,但是在读取的时候就不往下执行了,根本没法判断,求助!!!
# 大佬们这个显示“list index out of range”(中文:列表索引超出范围) #怎么解决? ``` import cv2 recognizer = cv2.face.LBPHFaceRecognizer_create()"C:\work\AI\AI-picture\Face recognition\\face_trainer\\trainer.yml") cascadePath = "C:\work\AI\AI-picture\Face recognition\haarcascade_frontalface_default.xml" faceCascade = cv2.CascadeClassifier(cascadePath) font = cv2.FONT_HERSHEY_SIMPLEX idnum = 0 names = ['A', 'Bob'] cam = cv2.VideoCapture(0, cv2.CAP_DSHOW) minW = 0.1*cam.get(3) minH = 0.1*cam.get(4) while True: ret, img = gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale( gray, scaleFactor=1.2, minNeighbors=5, minSize=(int(minW), int(minH)) ) for (x, y, w, h) in faces: cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2) idnum, confidence = recognizer.predict(gray[y:y+h, x:x+w]) if confidence < 100: idnum = names[idnum]#问题出在这儿 confidence = "{0}%".format(round(100 - confidence)) else: idnum = "unknown" confidence = "{0}%".format(round(100 - confidence)) cv2.putText(img, str(idnum), (x+5, y-5), font, 1, (0, 0, 255), 1) cv2.putText(img, str(confidence), (x+5, y+h-5), font, 1, (0, 0, 0), 1) cv2.imshow('camera', img) k = cv2.waitKey(10) if k == 27: break cam.release() cv2.destroyAllWindows() ```
pycharm 打开摄像头失败
# pycharm 打开摄像头失败 ## 环境 pycharm2019.2,python3.7 ,Mac OS X10.14 ```py import cv2 cap = cv2.VideoCapture(0) while True: ret, frame = cv2.imshow("Video", frame) if cv2.waitKey(1) == 27: break cap.release() cv2.destroyAllWindows() ``` 报错 /usr/local/bin/python3.7 "/Users/pig/学习/编程练习/Pycharm/Python 2.7/cv_test/" Process finished with exit code 134 (interrupted by signal 6: SIGABRT)
最近用Opencv3.0 VideoCapture类open函数打开rtsp流,当我打开一个无效的rtsp流时, 将近一分钟才提示我错误。。。如何设置超时呢?
想请教一下Java可否用OpenCV播放网络视频,该怎样做。 做一个项目需要播放某个视频地址的视频流,之前在Python里用 ``` import cv2 cap = cv2.VideoCapture(" -视频地址- ") while cap.isOpened(): _, img = cv2.imshow('video', img) cv2.waitKey(10) ``` 就可以打开和在窗口里播放了。 现在需要用Java实现,看到Java里也有一样的Function,就也如此调用了,代码如下: ``` import org.opencv.core.Core; import org.opencv.core.Mat; import org.opencv.videoio.VideoCapture; import org.opencv.highgui.HighGui; public class Main { static{ System.loadLibrary(Core.NATIVE_LIBRARY_NAME); } public static void main(String[] args) { VideoCapture cap = new VideoCapture("rtsp://"); while(cap.isOpened()){ boolean flag; Mat image = new Mat(); flag =; if(!flag){ System.out.println("--- not able to get image ---"); break; } HighGui.imshow("video", image); HighGui.waitKey(10); } System.out.println("--- video capture closed ---"); } } ``` 但是运行发现并不能播放,因为cap.isOpened()返回了false。 这个地址的视频用VLC等视频播放软件是可以播放的,所以应该不是地址的问题。 用同样的代码,但是VideoCapture()的参数改成本地视频文件的路径,就可以正常播放,所以逻辑也没问题? OpenCV应用于Java的文档有点少,没有找到相关的内容。想请教一下Java可以用OpenCV播放网络视频吗?如果可以,该怎样做?谢谢。
小白,想用Python3.7+Opencv4.1.1+APP:IP摄像头,调用手机的摄像头。 结果出现如下状况,还请各位大佬指点!
小白,想用Python3.7+Opencv4.1.1+APP:IP摄像头,调用手机的摄像头。 结果出现如下状况,还请各位大佬指点! ``` import cv2 url = '' cap = cv2.VideoCapture(url) while (1): ret, frame = cv2.imshow('', if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows() ``` 我修改了一下,代码成这样了 ``` import cv2 url = '' cap = cv2.VideoCapture(url) while cap.isOpened(): ret, frame = cv2.imshow('frame', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows() ``` 运行并没有反应,我感觉直接没有进while,也就是说摄像头根本没数据流,请问各位大佬怎么改呢?
在linux下,使用python无法打开摄像头,第一次编译程序是没有任何反映,第二次就显示libv4l2: error setting pixformat: Device or resource busy HIGHGUI ERROR: libv4l unable to ioctl S_FMT libv4l2: error setting pixformat: Device or resource busy libv4l1: error setting pixformat: Device or resource busy libv4l2: error setting pixformat: Device or resource busy libv4l1: error setting pixformat: Device or resource busy HIGHGUI ERROR: libv4l unable to ioctl VIDIOCSPICT 这种错误,此后一直显示资源繁忙。但是使用opencv能够正常的打开摄像头。以下是python的程序import cv2 import numpy as np cap = cv2.VideoCapture(0) while(1): # get a frame ret, frame = # show a frame cv2.imshow('capture', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows() 求大神帮忙解决,没有人有类似的问题吗
Python OpenCV人脸识别错误待解
在编写人脸识别的时候(代码如下): ``` import cv2 import numpy as np cascPath = "C:\opencv\sources\data\haarcascades\haarcascade_frontalface_alt2.xml" faceCascade = cv2.CascadeClassifier(cascPath) video_capture = cv2.VideoCapture(0) while True: ret, frame = gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale( gray, scaleFactor=1.1, minNeighbors=10, #Adjust accuracy minSize=(50, 50), flags=cv2.CASCADE_SCALE_IMAGE ) for (x, y, w, h) in faces: cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 255, 255), 2) cv2.imshow('Video', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break if cv2.waitKey(1) & 0xFF == ord('s'): cv2.imwrite('test1.png', frame) img = cv2.imread("test1.png") face = img[x:x+w,y:y+h] cv2.imshow('Face', face) cv2.imwrite("face_detected.png", face) images=[] images.append(cv2.imread("klp.jpg",cv2.IMREAD_GRAYSCALE)) images.append(cv2.imread("wh.jpg",cv2.IMREAD_GRAYSCALE)) images.append(cv2.imread("wk.jpg",cv2.IMREAD_GRAYSCALE)) Labels=[0,1,2] recognizer = cv2.face.LBPHFaceRecognizer_create() recognizer.train(images, np.array(Labels)) predict_image = cv2.imread("face_detected.png",cv2.IMREAD_GRAYSCALE) label,confidence= recognizer.predict(predict_image) print("Label=", label) print("Confidence=", confidence) video_capture.release() cv2.destroyAllWindows() ``` 出现了如下问题: ``` Exception has occurred: error OpenCV(4.1.0) C:\projects\opencv-python\opencv\modules\core\src\matrix.cpp:235: error: (-215:Assertion failed) s >= 0 in function 'cv::setSize' File "F:\learn_python\Models\DetectFace&", line 41, in <module> recognizer.train(images, np.array(Labels)) ```
OpenCV videocapture类接口函数set曝光
**_python-opencv获取视频总帧数上百万帧,但是一帧一帧的读取播放时,真实播放的只有几万帧,是怎么回事?我改如何处理这样的视频,我需要点击时间进度条让它到指定的位置,但是老是匹配不正确_** 这是我的简单读取的代码,帧率是不是不正确啊,按照25播放能正常显示而且最后我用25计算总帧数也能对的上,但是使用25去取出具体的某一帧的话,不能正确的取出,最简单的就是快进快退,拖动时间进度条,不能对应上,哪位大神能帮帮忙? 还有谁遇到过这样的视频,怎么处理的,不会按正确的帧率转换后在处理吧,这样对我们来讲,效率太低了 ``` import cv2 capture = cv2.VideoCapture("./test.avi") fps = capture.get(cv2.CAP_PROP_FPS) print('fps = ', fps) total_s = capture.get(cv2.CAP_PROP_FRAME_COUNT) print("total_s = ", total_s) cv2.namedWindow("test", 0) cv2.resizeWindow("test", 640, 480) frame_index = 0 if capture.isOpened(): while True: success, frame = wait_key = cv2.waitKeyEx(1) if wait_key == 27: break if success: frame_index += 1 cv2.imshow("test", frame) else: print("end") break print("frame_index = ", frame_index) ``` 运行后的结果是: fps = 600.0 total_s = 167305.0 end frame_index = 6972 最后几个c币了,希望大神帮帮忙。
我先用VLC发RTSP视频流,视频格式为mp4,该视频流可以用另一个VLC成功接收并播放,而我用python的opencv库写的代码不能成功读取,该代码可以成功读取本地视频。 代码如下: import cv2 video_full_path = "rtsp://cky:30121323@" cap = cv2.VideoCapture(video_full_path) print(cap.isOpened()) frame_count = 1 success = True while (success): success, frame = print(frame) print('Read a new frame: ', success) params = [] # params.append(cv.CV_IMWRITE_PXM_BINARY) params.append(1) cv2.imwrite("video" + "_%d.jpg" % frame_count, frame, params) frame_count = frame_count + 1 cap.release() 望解决。
class CV_EXPORTS SimilarRects { public: SimilarRects(double _eps) : eps(_eps) {} inline bool operator()(const Rect& r1, const Rect& r2) const { double delta = eps*( std::min(r1.width, r2.width) + std::min(r1.height, r2.height))*0.5; return std::abs(r1.x - r2.x) <= delta && std::abs(r1.y - r2.y) <= delta && std::abs(r1.x + r1.width - r2.x - r2.width) <= delta && std::abs(r1.y + r1.height - r2.y - r2.height) <= delta; } double eps; }; 1. 对eps*( std::min(r1.width, r2.width) + std::min(r1.height, r2.height))*0.5;进行报错 error2059 2589 分别是::语法错误 2. 修改后double delta = eps*( (std::min)(r1.width, r2.width) + (std::min)(r1.height, r2.height))*0.5; 又会报 LNK2001 无法解析的外部符号 "public: __cdecl cv::VideoCapture::VideoCapture(int)" (??0VideoCapture@cv@@QEAA@H@Z)这种错误 求大神们解答
源代码:#include <opencv2/highgui/highgui.hpp> #include <opencv2/imgproc/imgproc.hpp> #include <opencv2/core/core.hpp> using namespace cv; int main() { VideoCapture cap(0); if(!cap.isOpened()) { return -1; } Mat frame; bool stop = false; while(!stop) {; imshow("Video",frame); if(waitKey(30)==27) //Esc键退出 { stop = true; } } return 0; } 报错:~$ g++ -o opencvq opencv.cpp /tmp/ccQJKVIi.o: In function `main': opencv.cpp:(.text+0x29): undefined reference to `cv::VideoCapture::VideoCapture(int)' opencv.cpp:(.text+0x38): undefined reference to `cv::VideoCapture::isOpened() const' opencv.cpp:(.text+0x80): undefined reference to `cv::VideoCapture::read(cv::Mat&)' opencv.cpp:(.text+0xf9): undefined reference to `cv::Mat::ones(cv::Size_<int>, int)' opencv.cpp:(.text+0x146): undefined reference to `cv::VideoCapture::read(cv::Mat&)' opencv.cpp:(.text+0x33d): undefined reference to `cv::_OutputArray::_OutputArray(cv::Mat&)' opencv.cpp:(.text+0x356): undefined reference to `cv::_InputArray::_InputArray(cv::Mat const&)' opencv.cpp:(.text+0x36f): undefined reference to `cv::_InputArray::_InputArray(cv::Mat const&)' opencv.cpp:(.text+0x3a1): undefined reference to `cv::addWeighted(cv::_InputArray const&, double, cv::_InputArray const&, double, double, cv::_OutputArray const&, int)' opencv.cpp:(.text+0x3ba): undefined reference to `cv::_InputArray::_InputArray(cv::Mat const&)' opencv.cpp:(.text+0x3fd): undefined reference to `cv::imshow(std::string const&, cv::_InputArray const&)' opencv.cpp:(.text+0x425): undefined reference to `cv::waitKey(int)' opencv.cpp:(.text+0x489): undefined reference to `cv::VideoCapture::~VideoCapture()' opencv.cpp:(.text+0x51a): undefined reference to `cv::VideoCapture::~VideoCapture()' /tmp/ccQJKVIi.o: In function `cv::Mat::~Mat()': opencv.cpp:(.text._ZN2cv3MatD2Ev[_ZN2cv3MatD5Ev]+0x39): undefined reference to `cv::fastFree(void*)' /tmp/ccQJKVIi.o: In function `cv::Mat::release()': opencv.cpp:(.text._ZN2cv3Mat7releaseEv[_ZN2cv3Mat7releaseEv]+0x47): undefined reference to `cv::Mat::deallocate()' /tmp/ccQJKVIi.o: In function `cv::Mat::operator()(cv::Rect_<int> const&) const': opencv.cpp:(.text._ZNK2cv3MatclERKNS_5Rect_IiEE[_ZNK2cv3MatclERKNS_5Rect_IiEE]+0x27): undefined reference to `cv::Mat::Mat(cv::Mat const&, cv::Rect_<int> const&)' collect2: error: ld returned 1 exit status 第一次使用,在此请教各位前辈,谢谢了
此博客仅为我业余记录文章所用,发布到此,仅供网友阅读参考,如有侵权,请通知我,我会删掉。 补充 有不少读者留言说本文章没有用,因为天气预报直接打开手机就可以收到了,为何要多此一举发送到邮箱呢!!!那我在这里只能说:因为你没用,所以你没用!!! 这里主要介绍的是思路,不是天气预报!不是天气预报!!不是天气预报!!!天气预报只是用于举例。请各位不要再刚了!!! 下面是我会用到的两个场景: 每日下
你知道的越多,你不知道的越多 点赞再看,养成习惯 GitHub上已经开源 有一线大厂面试点脑图、个人联系方式,欢迎Star和完善 前言 消息队列在互联网技术存储方面使用如此广泛,几乎所有的后端技术面试官都要在消息队列的使用和原理方面对小伙伴们进行360°的刁难。 作为一个在互联网公司面一次拿一次Offer的面霸,打败了无数
8年经验面试官详解 Java 面试秘诀
    作者 | 胡书敏 责编 | 刘静 出品 | CSDN(ID:CSDNnews) 本人目前在一家知名外企担任架构师,而且最近八年来,在多家外企和互联网公司担任Java技术面试官,前后累计面试了有两三百位候选人。在本文里,就将结合本人的面试经验,针对Java初学者、Java初级开发和Java开发,给出若干准备简历和准备面试的建议。   Java程序员准备和投递简历的实
我清晰的记得,刚买的macbook pro回到家,开机后第一件事情,就是上了淘宝网,花了500元钱,找了一个上门维修电脑的师傅,上门给我装了一个windows系统。。。。。。 表砍我。。。 当时买mac的初衷,只是想要个固态硬盘的笔记本,用来运行一些复杂的扑克软件。而看了当时所有的SSD笔记本后,最终决定,还是买个好(xiong)看(da)的。 已经有好几个朋友问我mba怎么样了,所以今天尽量客观
二哥,你好,我想知道一般程序猿都如何接私活,我也想接,能告诉我一些方法吗? 上面是一个读者“烦不烦”问我的一个问题。其实不止是“烦不烦”,还有很多读者问过我类似这样的问题。 我接的私活不算多,挣到的钱也没有多少,加起来不到 20W。说实话,这个数目说出来我是有点心虚的,毕竟太少了,大家轻喷。但我想,恰好配得上“一般程序员”这个称号啊。毕竟苍蝇再小也是肉,我也算是有经验的人了。 唾弃接私活、做外
Java编程规约命名风格 命名风格 类名使用UpperCamelCase风格 方法名,参数名,成员变量,局部变量都统一使用lowerCamelcase风格 常量命名全部大写,单词间用下划线隔开, 力求语义表达完整清楚,不要嫌名字长 ...
小编是一个理科生,不善长说一些废话。简单介绍下原理然后直接上代码。 使用的工具(Python+pycharm2019.3+selenium+xpath+chromedriver)其中要使用pycharm也可以私聊我selenium是一个框架可以通过pip下载 pip install selenium -i 
你知道的越多,你不知道的越多 点赞再看,养成习惯 本文 GitHub 已收录,有一线大厂面试点思维导图,也整理了很多我的文档,欢迎Star和完善,大家面试可以参照考点复习,希望我们一起有点东西。 前前言 为啥今天有个前前言呢? 因为你们的丙丙啊,昨天有牌面了哟,直接被微信官方推荐,知乎推荐,也就仅仅是还行吧(心里乐开花)
前奏: 今天2B哥和大家分享一位前几天面试的一位应聘者,工作4年26岁,统招本科。 以下就是他的简历和面试情况。 基本情况: 专业技能: 1、&nbsp;熟悉Sping了解SpringMVC、SpringBoot、Mybatis等框架、了解SpringCloud微服务 2、&nbsp;熟悉常用项目管理工具:SVN、GIT、MAVEN、Jenkins 3、&nbsp;熟悉Nginx、tomca
Python爬虫精简步骤1 获取数据
爬虫的工作分为四步: 1.获取数据。爬虫程序会根据我们提供的网址,向服务器发起请求,然后返回数据。 2.解析数据。爬虫程序会把服务器返回的数据解析成我们能读懂的格式。 3.提取数据。爬虫程序再从中提取出我们需要的数据。 4.储存数据。爬虫程序把这些有用的数据保存起来,便于你日后的使用和分析。 这一篇的内容就是:获取数据。 首先,我们将会利用一个强大的库——requests来获取数据。 在电脑上安装
Python绘图,圣诞树,花,爱心 | Turtle篇
1.画圣诞树 import turtle screen = turtle.Screen() screen.setup(800,600) circle = turtle.Turtle() circle.shape('circle') circle.color('red') circle.speed('fastest') circle.up() square = turtle.Turtle()
CPU对每个程序员来说,是个既熟悉又陌生的东西? 如果你只知道CPU是中央处理器的话,那可能对你并没有什么用,那么作为程序员的我们,必须要搞懂的就是CPU这家伙是如何运行的,尤其要搞懂它里面的寄存器是怎么一回事,因为这将让你从底层明白程序的运行机制。 随我一起,来好好认识下CPU这货吧 把CPU掰开来看 对于CPU来说,我们首先就要搞明白它是怎么回事,也就是它的内部构造,当然,CPU那么牛的一个东
2020年1月17日,国家统计局发布了2019年国民经济报告,报告中指出我国人口突破14亿。 猪哥的朋友圈被14亿人口刷屏,但是很多人并没有看到我国复杂的人口问题:老龄化、男女比例失衡、生育率下降、人口红利下降等。 今天我们就来分析一下我们国家的人口数据吧! 更多有趣分析教程,扫描下方二维码关注vx公号「裸睡的猪」 即可查看! 一、背景 1.人口突破14亿 2020年1月17日,国家统计局发布
第零关 进入传送门开始第0关(游戏链接) 请点击链接进入第1关: 连接在左边→ ←连接在右边 看不到啊。。。。(只能看到一堆大佬做完的留名,也能看到菜鸡的我,在后面~~) 直接fn+f12吧 &lt;span&gt;连接在左边→&lt;/span&gt; &lt;a href="first.php"&gt;&lt;/a&gt; &lt;span&gt;←连接在右边&lt;/span&gt; o
相信大家都已经收到国务院延长春节假期的消息,接下来,在家远程办公可能将会持续一段时间。 但是问题来了。远程办公不是人在电脑前就当坐班了,相反,对于沟通效率,文件协作,以及信息安全都有着极高的要求。有着非常多的挑战,比如: 1在异地互相不见面的会议上,如何提高沟通效率? 2文件之间的来往反馈如何做到及时性?如何保证信息安全? 3如何规划安排每天工作,以及如何进行成果验收? ......
截止目前,我已经分享了如下几篇文章: 一个程序在计算机中是如何运行的?超级干货!!! 作为一个程序员,CPU的这些硬核知识你必须会! 作为一个程序员,内存的这些硬核知识你必须懂! 这些知识可以说是我们之前都不太重视的基础知识,可能大家在上大学的时候都学习过了,但是嘞,当时由于老师讲解的没那么有趣,又加上这些知识本身就比较枯燥,所以嘞,大家当初几乎等于没学。 再说啦,学习这些,也看不出来有什么用啊!
偶然间,在知乎上看到一个问题 一时间,勾起了我深深的回忆。 以前在厂里打过两次工,做过家教,干过辅导班,做过中介。零下几度的晚上,贴过广告,满脸、满手地长冻疮。   再回首那段岁月,虽然苦,但让我学会了坚持和忍耐。让我明白了,在这个世界上,无论环境多么的恶劣,只要心存希望,星星之火,亦可燎原。   下文是原回答,希望能对你能有所启发。   如果我说,这个世界上人真的分三六九等,
记录学习笔记是一个重要的习惯,不希望学习过的东西成为过眼云烟。做总结的同时也是一次复盘思考的过程。 本文是根据阅读得到 App上《万维钢·精英日课》部分文章后所做的一点笔记和思考。学习是一个系统的过程,思维模型的建立需要相对完整的学习和思考过程。以下观点是在碎片化阅读后总结的一点心得总结。
B 站上有哪些很好的学习资源?
哇说起B站,在小九眼里就是宝藏般的存在,放年假宅在家时一天刷6、7个小时不在话下,更别提今年的跨年晚会,我简直是跪着看完的!! 最早大家聚在在B站是为了追番,再后来我在上面刷欧美新歌和漂亮小姐姐的舞蹈视频,最近两年我和周围的朋友们已经把B站当作学习教室了,而且学习成本还免费,真是个励志的好平台ヽ(.◕ฺˇд ˇ◕ฺ;)ノ 下面我们就来盘点一下B站上优质的学习资源: 综合类 Oeasy: 综合
你好呀,我是沉默王二,一个和黄家驹一样身高,和刘德华一样颜值的程序员。虽然已经写了十多年的 Java 代码,但仍然觉得自己是个菜鸟(请允许我惭愧一下)。 在一个月黑风高的夜晚,我思前想后,觉得再也不能这么蹉跎下去了。于是痛下决心,准备通过输出的方式倒逼输入,以此来修炼自己的内功,从而进阶成为一名真正意义上的大神。与此同时,希望这些文章能够帮助到更多的读者,让大家在学习的路上不再寂寞、空虚和冷。 ...
Web播放器解决了在手机浏览器和PC浏览器上播放音视频数据的问题,让视音频内容可以不依赖用户安装App,就能进行播放以及在社交平台进行传播。在视频业务大数据平台中,播放数据的统计分析非常重要,所以Web播放器在使用过程中,需要对其内部的数据进行收集并上报至服务端,此时,就需要对发生在其内部的一些播放行为进行事件监听。 那么Web播放器事件监听是怎么实现的呢? 01 监听事件明细表 名
本文知识点较多,篇幅较长,请耐心学习 MySQL已经成为时下关系型数据库产品的中坚力量,备受互联网大厂的青睐,出门面试想进BAT,想拿高工资,不会点MySQL优化知识,拿offer的成功率会大大下降。 为什么要优化 系统的吞吐量瓶颈往往出现在数据库的访问速度上 随着应用程序的运行,数据库的中的数据会越来越多,处理时间会相应变慢 数据是存放在磁盘上的,读写速度无法和内存相比 如何优化 设计
Linux 命令(122)—— watch 命令
1.命令简介 2.命令格式 3.选项说明 4.常用示例 参考文献 [1] watch(1) manual
Linux 命令(121)—— cal 命令
1.命令简介 2.命令格式 3.选项说明 4.常用示例 参考文献 [1] cal(1) manual
1.工具:eclipse+SQLyog 2.介绍:实现的内容就是显示新闻的基本信息,然后一个增删改查的操作。 3.数据库表设计 列名 中文名称 数据类型 长度 非空 newsId 文章ID int 11 √ newsTitle 文章标题 varchar 20 √ newsContent 文章内容 text newsStatus 是否审核 varchar 10 news...
今天介绍的项目是使用 Itchat 发送统计报告 项目功能设计: 定时爬取疫情数据存入Mysql 进行数据分析制作疫情报告 使用itchat给亲人朋友发送分析报告(本文) 基于Django做数据屏幕 使用Tableau做数据分析 来看看最终效果 目前已经完成,预计2月12日前更新 使用 itchat 发送数据统计报告 itchat 是一个基于 web微信的一个框架,但微信官方并不允
相关热词 c# 时间比天数 c# oracle查询 c# 主动推送 事件 c# java 属性 c# 控制台 窗体 c# 静态类存值 c#矢量作图 c#窗体调用外部程式 c# enum是否合法 c# 如何卸载引用