小筱熬夜中 2024-09-09 10:20
浏览 2
已结题

ffmpy.FFRuntimeError: `ffmpeg -i C:\Users\xiaoxiao\AppData\Local\Tem

运行face_recognition的实时人脸识别系统的时候遇见了这种问题

img


那个长长的问题在这:
ffmpy.FFRuntimeError: ffmpeg -i C:\Users\xiaoxiao\AppData\Local\Temp\gradio\e16f7da0230384a276558f14a0e8ed72ad995c823a7d7dee59cb2443908d42b0\sample.webm -vf hflip -c:a copy -an C:\Users\xiaoxiao\AppData\Local\Temp\gradio\e16f7da0230384a276558f14a0e8ed72ad995c823a7d7dee59cb2443908d42b0\sample_flip.webm exited with status 3165764104

问了老 师,最开始修改了这个录制视频的位置。可是他还是会生成到这。后面老 师说不知道怎末解决让我重装系统,重装后还是一摸一样的问题。不知道怎末解决了!有没有大 佬能帮帮我。第一次发不太懂,我把我的代码放在下面:

import cv2
import numpy as np
import os
from datetime import datetime
import gradio as gr
import face_recognition
from PIL import Image, ImageDraw, ImageFont

def sayhello(name):

    return f"您好:{name}"

'''
demo = gr.Interface(
    fn = sayhello,
    title='Gradio入门案例',
    inputs = [gr.Text(label="请输入您的遵循大名")],
    outputs = [gr.Text(label='输出结果结果')]
)
'''
path = 'database'  # 人像存储位置
images = []
className = []
myList = os.listdir(path)  # 返回指定文件目录下的列表,这里返回的是人像图片
print(myList)




def face_compare(src,dest):
    imgSrc = cv2.cvtColor(src, cv2.COLOR_BGR2RGB)  # 将BGR彩色图像转化为RGB彩色图像
    imgDest = cv2.cvtColor(dest, cv2.COLOR_BGR2RGB)

    faceLoc = face_recognition.face_locations(imgSrc)[0]  # 定位人脸位置
    encodeSrc = face_recognition.face_encodings(imgSrc)[0]  # 提取人脸的面部特征
    cv2.rectangle(imgSrc, (faceLoc[3], faceLoc[0]), (faceLoc[1], faceLoc[2]), (255, 0, 255), 2)  # 框出人脸

    # print(faceLoc)
    faceLocDest = face_recognition.face_locations(imgDest)[0]
    encodeDest = face_recognition.face_encodings(imgDest)[0]
    cv2.rectangle(imgDest, (faceLocDest[3], faceLocDest[0]), (faceLocDest[1], faceLocDest[2]), (255, 0, 255), 2)

    result = face_recognition.compare_faces([encodeSrc], encodeDest)  # 比较人脸编码的相似度
    faceDis = face_recognition.face_distance([encodeSrc], encodeDest)  # 计算两个人脸的欧氏距离(欧氏距离用于计算样本之间的相似度或距离)
    print(result, faceDis)
    cv2.putText(imgDest, f'{result}{round(faceDis[0], 2)}', (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255),
                2)  # 显示比对结果
    src_filename = "detected/src_image.png"
    dest_filename = "detected/dest_image.png"
    cv2.imwrite(src_filename, imgSrc)
    cv2.imwrite(dest_filename, imgDest)
    result_text = '是同一个人' if result[0] else '不是同一个人'
    print(result[0])
    print(type(result))

    return src_filename, dest_filename, result_text


def cv2AddChineseText(img, text, position, textColor, textSize):
    if (isinstance(img, np.ndarray)):  # 判断是否OpenCV图片类型
        img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    # 创建一个可以在给定图像上绘图的对象
    draw = ImageDraw.Draw(img)
    # 字体的格式
    fontStyle = ImageFont.truetype(
        "simsun.ttc", textSize, encoding="utf-8")  # simsun.ttc语言包放在程序同级目录下
    # 绘制文本
    draw.text(position, text, textColor, font=fontStyle)
    # 转换回OpenCV格式
    return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)

for cl in myList:  # 获取每张人像的名称
    #curImg = cv2.imread(f'{path}/{cl}')
    # 字符流转换字节流,这样可以读取中文文件名
    with open(f'{path}/{cl}', 'rb') as f:
        image_data = f.read()
    curImg = cv2.imdecode(np.frombuffer(image_data, np.uint8), cv2.IMREAD_COLOR)

    images.append(curImg)
    className.append(os.path.splitext(cl)[0])
print(className)


def findEncodings(images):  # 获取所有存储的人像编码
    encodeList = []
    for img in images:
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        encode = face_recognition.face_encodings(img)[0]
        encodeList.append(encode)
    return encodeList

encodeListKnown = findEncodings(images)
print('encoding complete')


def markAttendance(name):  # 打卡,生成记录
    with open('Attendance.csv', 'r+',encoding='utf-8') as f:
        myDatalist = f.readlines()  # 读取文件中所有的行
        nameList = []
        for line in myDatalist:
            entry = line.split(',')
            nameList.append(entry[0])
        if name not in nameList:
            now = datetime.now()
            dtString = now.strftime('%H:%M:%S')  # 将日期时间格式化成字符串
            f.writelines(f'\n{name},{dtString}')  # 将包含多个字符串的可迭代对象写入文件中,这里是记录人名



# 人脸检测函数
def face_rec(img):
    imgs = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    faceCurFrame = face_recognition.face_locations(imgs)  # 获取人脸位置信息
    encodesCurFrame = face_recognition.face_encodings(imgs, faceCurFrame)  # 获取人脸编码

    for encodeFace, faceLoc in zip(encodesCurFrame, faceCurFrame):  # zip函数,连接成字典
        matches = face_recognition.compare_faces(encodeListKnown, encodeFace)  # 人脸匹配度
        faceDis = face_recognition.face_distance(encodeListKnown, encodeFace)  # 欧式距离
        # print(faceDis)
        matchIndex = np.argmin(faceDis)  # 返回数组中小元素的索引
        if matches[matchIndex]:
            name = className[matchIndex].upper()
            print(name)
            y1, x2, y2, x1 = faceLoc  # 人脸位置
            y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4
            cv2.rectangle(imgs, (x1, y1), (x2, y2), (0, 255, 0), 1)
            cv2.rectangle(imgs, (x1, y2 - 35), (x2, y2), (0, 255, 0), cv2.FILLED)
            #cv2.putText(imgs, name, (x1 + 6, y2 - 6), cv2.QT_FONT_NORMAL, 1, (255, 255, 255), 2)
            imgs = cv2AddChineseText(imgs, name, (100, 100), (250, 242, 131), 30)
            markAttendance(name)  # 记录人名
            filename = "detected/output_image.png"
            cv2.imwrite(filename, imgs)
            dest_img = ''
    #cv2.imshow(str('Face_Detector'), img)
    return filename

#计算视频的帧率,总帧数,时长的函数
def get_second(capture):
    if capture.isOpened():
        rate = capture.get(5)  # 帧速率
        FrameNumber = capture.get(7)  # 视频文件的帧数
        duration = FrameNumber / rate  # 帧速率/视频总帧数 是时间,除以60之后单位是分钟
        return int(rate), int(FrameNumber), int(duration)


def snap(cap):
    cap = cv2.VideoCapture(cap)
    #OpenCV默认用的是*mp4的编码器,生成的mp4在浏览器无法播放。浏览器默认mp4必须是h264的解码器。
    fourcc = cv2.VideoWriter_fourcc(*'H264')
    fps = cap.get(cv2.CAP_PROP_FPS)  # 帧数
    print("视频总帧数=>",fps)
    width, height = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))  # 宽高
    out = cv2.VideoWriter('result.mp4', fourcc, fps, ((int)(width/4.0), (int)(height/4)))  # 写入视频
    frame_count = 0
    fps_all = 0
    rate, FrameNumber, duration = get_second(cap)
    print(f"帧速率:{rate},视频文件的帧数:{FrameNumber},时长:{duration}分钟")

    if cap.isOpened():
        while True:
            ret, imgs = cap.read()
            if not ret:
                break

            imgs = cv2.resize(imgs, (0, 0), None, 0.25, 0.25)  # 调整图片大小
            #imgs = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            faceCurFrame = face_recognition.face_locations(imgs)  # 获取人脸位置信息
            encodesCurFrame = face_recognition.face_encodings(imgs, faceCurFrame)  # 获取人脸编码

            for encodeFace, faceLoc in zip(encodesCurFrame, faceCurFrame):  # zip函数,连接成字典
                matches = face_recognition.compare_faces(encodeListKnown, encodeFace)  # 人脸匹配度
                faceDis = face_recognition.face_distance(encodeListKnown, encodeFace)  # 欧式距离
                # print(faceDis)
                matchIndex = np.argmin(faceDis)  # 返回数组中小元素的索引
                if matches[matchIndex]:
                    name = className[matchIndex].upper()
                    print(name)
                    y1, x2, y2, x1 = faceLoc  # 人脸位置
                    y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4
                    cv2.rectangle(imgs, (x1, y1), (x2, y2), (0, 255, 0), 1)
                    cv2.rectangle(imgs, (x1, y2 - 35), (x2, y2), (0, 255, 0), cv2.FILLED)
                    # cv2.putText(imgs, name, (x1 + 6, y2 - 6), cv2.QT_FONT_NORMAL, 1, (255, 255, 255), 2)
                    imgs = cv2AddChineseText(imgs, name, (100, 100), (250, 242, 131), 30)
            out.write(imgs)
    else:
        print("失败")

    cap.release()
    out.release()

    result_vido_file="result.mp4"
    return result_vido_file

facecomp = gr.Interface(
    fn = face_compare,
    title='face_recognition的人脸对比系统',
    inputs = [gr.Image(label='源图片'),gr.Image(label='目标图片')],
    outputs = [gr.Image(show_label=False),gr.Image(show_label=False),gr.Text(label='人脸对比结果')]
)


facerec = gr.Interface(
    fn = face_rec,
    title='face_recognition的人脸识别系统',
    inputs = gr.Image(),
    outputs = "image",
    examples=["images/person01.jpg", "images/person02.jpg", "images/person03.jpg","images/person04.jpg","images/person05.jpg","images/person06.jpg"],
)


webcamrec = gr.Interface(
    fn = snap,
    inputs = [gr.Video(sources='webcam')],
    outputs = gr.Video(),
    live=True,
    title='face_recognition的实时人脸识别系统'
)

tabbed_interface = gr.TabbedInterface([facecomp,facerec,webcamrec], ["图片人脸对比检测", "图片人脸识别检测","摄像头人脸识别检测"])



if __name__ == '__main__':

    tabbed_interface.launch(
        server_port=8081

    )

  • 写回答

0条回答 默认 最新

    报告相同问题?

    问题事件

    • 系统已结题 9月17日
    • 创建了问题 9月9日

    悬赏问题

    • ¥65 Tree 树形控件实现单选功能,可以使用element也可以手写一个,实现全选为全选状态
    • ¥60 寻抓云闪付tn组成网页付款链接
    • ¥20 如何通过sentry收集上传Android ndk的崩溃?
    • ¥30 有关数值生成的稳健性探讨
    • ¥20 关于C/C++图形界面设计问题
    • ¥15 QT+Gstreamer框架开发视频采集,无法将waylandsink视频绑定qt窗口
    • ¥15 vs2010开发 WFP(windows filtering platform)异常
    • ¥30 8*8*25的矩阵和1*8*25的矩阵相乘
    • ¥15 Ubuntu20.04主机有两个网口,如何配置将其中一个网口用来接入外网,另一个网口用来给其他设备上网
    • ¥15 ml307r-dl如何实现录音功能