qq_42585801 2022-05-17 10:19 采纳率: 33.3%
浏览 20
已结题

json格式的骨骼点实时显示问题

在github上down其他人的代码后,只有train和valid,并没有实时显示,但是output文件中生成了大量的json文件。我应当如何将这些json文件生成实时显示的3D文件呢?我想要生成matplotlib这样的能够旋转的格式,如果能够抛弃3维坐标系驱动模型(例如smpl)那将是更好的。
麻烦各位帮帮忙,十分感谢!
  
  • 写回答

1条回答 默认 最新

  • 赵4老师 2022-05-17 13:38
    关注

    仅供参考:

    import json
    import os
    
    if __name__ == "__main__":
        fn=r'keypoints.json'
        f=open(fn, 'r')
        data = json.load(f)
        f.close()
    #   print(data)
    
        people=data['people']
        L=len(people)
        for i in range(L):
            print("poeple %02d/%02d" %(i+1,L))
            for j in range(18):
                print("%02d: %5d %5d %4.2f" %(j, \
                    int(round(people[i]['pose_keypoints_2d'][j*3+0])), \
                    int(round(people[i]['pose_keypoints_2d'][j*3+1])), \
                              people[i]['pose_keypoints_2d'][j*3+2]))
            i+=1
    
    
    
    #-*- encoding=GBK -*-
    
    import cv2
    import json
    import os
    import pickle
    import sys
    
    if __name__ == "__main__":
        if len(sys.argv)<2:
            print("Usage: jsons2dat.py fullpathvideofilename.ext")
            sys.exit()
        video = cv2.VideoCapture(sys.argv[1]) #r"video.mp4"
        if not video.isOpened():
            print("Could not open video")
            sys.exit()
        framecount = int(video.get(cv2.CAP_PROP_FRAME_COUNT ))
        datas=[]
        for i in range(framecount):
            fn=r"%s_%012d_keypoints.json" %(sys.argv[1][0:-4],i)
            try:
                f=open(fn,'r')
            except Exception as e:
                exceptmsg="f=open('%s','r') Exception:%s" %(fn,str(e))
                print(exceptmsg)
                sys.exit()
            data=json.load(f)
            f.close()
            datas.append(data)
            if i%100==0:
                print("%d/%d" %(i,framecount))
    
        with open(sys.argv[1][0:-4]+".dat", 'wb') as f:
            pickle.dump(datas, f)
            f.close()
        del datas
        with open(sys.argv[1][0:-4]+".dat", 'rb') as f:
            datas = pickle.load(f)
        print(len(datas))
        people=datas[framecount-1]['people']
        L=len(people)
        for i in range(L):
            print("poeple %02d/%02d" %(i+1,L))
            for j in range(18):
                print("%02d: %5d %5d %4.2f" %(j, \
                    int(round(people[i]['pose_keypoints_2d'][j*3+0])), \
                    int(round(people[i]['pose_keypoints_2d'][j*3+1])), \
                              people[i]['pose_keypoints_2d'][j*3+2]))
    
    
    
    
    #-*- encoding=GBK -*-
    
    import json
    import os
    from PIL import Image, ImageFont, ImageDraw
    import cv2
    import numpy as np
    import pickle
    
    THRESHOLD=0.2
    nodecolors_COCO=[
        (192, 32, 96,150),# 0鼻头
        (128, 64,  0,150),# 1喉
        (192,128,  0,150),# 2右肩
        (192,192,  0,150),# 3右肘
        (128,192,  0,150),# 4右腕
        ( 64,192, 32,150),# 5左肩
        ( 00,192,  0,150),# 6左肘
        (  0,192, 64,150),# 7左腕
        ( 64,192,128,150),# 8右髋
        (  0,192,192,150),# 9右膝
        (  0,128,192,150),#10右踝
        (  0, 64,128,150),#11左髋
        ( 16, 16,128,150),#12左膝
        ( 64,  0,192,150),#13左踝
        (128, 16,128,150),#14右眼
        (192, 64,192,150),#15左眼
        (255, 16,192,150),#16右耳
        (192,  8, 64,150)]#17左耳
    linkcolors_COCO=[
       [(  0,  0,160,150),( 90,  0,192,150),(160, 32,100,150)                  ],# 0鼻头
       [(160,  0,  0,150),(160, 64,  0,150),(  0,160,  0,150),(  0,160,160,150)],# 1喉
       [(160,100,  0,150)                                                      ],# 2右肩
       [(160,160,  0,150)                                                      ],# 3右肘
       [                                                                       ],# 4右腕
       [(100,160,  0,150)                                                      ],# 5左肩
       [( 64,160,  0,150)                                                      ],# 6左肘
       [                                                                       ],# 7左腕
       [(  0,160, 60,150)                                                      ],# 8右髋
       [(  0,160,100,150)                                                      ],# 9右膝
       [                                                                       ],#10右踝
       [(  0, 64,128,150)                                                      ],#11左髋
       [(  0, 64,160,150)                                                      ],#12左膝
       [                                                                       ],#13左踝
       [(120,  8,192,150)                                                      ],#14右眼
       [(160,  8,100,150)                                                      ],#15左眼
       [                                                                       ],#16右耳
       [                                                                       ]]#17左耳
    links_COCO=[
        [1,14,15,-1],  # 0鼻头
        [2,5,8,11,-1], # 1喉
        [3,-1],        # 2右肩
        [4,-1],        # 3右肘
        [-1],          # 4右腕
        [6,-1],        # 5左肩
        [7,-1],        # 6左肘
        [-1],          # 7左腕
        [9,-1],        # 8右髋
        [10,-1],       # 9右膝
        [-1],          #10右踝
        [12,-1],       #11左髋
        [13,-1],       #12左膝
        [-1],          #13左踝
        [16,-1],       #14右眼
        [17,-1],       #15左眼
        [-1],          #16右耳
        [-1]]          #17左耳
    
    if __name__ == "__main__":
        vfn=r'video.mp4'
    
        video = cv2.VideoCapture(vfn)
    #   video.set(cv2.CAP_PROP_POS_FRAMES,50)  #设置要获取的帧号
    
        #include\opencv2\videoio.hpp
        #  0  CAP_PROP_POS_MSEC       Current position of the video file in milliseconds.
        #  1  CAP_PROP_POS_FRAMES     0-based index of the frame to be decoded/captured next.
        #  2  CAP_PROP_POS_AVI_RATIO  Relative position of the video file: 0 - start of the film, 1 - end of the film.
        #  3  CAP_PROP_FRAME_WIDTH    Width of the frames in the video stream.
        #  4  CAP_PROP_FRAME_HEIGHT   Height of the frames in the video stream.
        #  5  CAP_PROP_FPS            Frame rate.
        #  6  CAP_PROP_FOURCC         4-character code of codec.
        #  7  CAP_PROP_FRAME_COUNT    Number of frames in the video file.
        #  8  CAP_PROP_FORMAT         Format of the Mat objects returned by retrieve() .
        #  9  CAP_PROP_MODE           Backend-specific value indicating the current capture mode.
        # 10  CAP_PROP_BRIGHTNESS     Brightness of the image (only for cameras).
        # 11  CAP_PROP_CONTRAST       Contrast of the image (only for cameras).
        # 12  CAP_PROP_SATURATION     Saturation of the image (only for cameras).
        # 13  CAP_PROP_HUE            Hue of the image (only for cameras).
        # 14  CAP_PROP_GAIN           Gain of the image (only for cameras).
        # 15  CAP_PROP_EXPOSURE       Exposure (only for cameras).
        # 16  CAP_PROP_CONVERT_RGB    Boolean flags indicating whether images should be converted to RGB.
        # 17  CAP_PROP_WHITE_BALANCE  Currently unsupported
        # 18  CAP_PROP_RECTIFICATION  Rectification flag for stereo cameras (note: only supported by DC1394 v 2.x backend currently)
    
        width      = int(video.get(cv2.CAP_PROP_FRAME_WIDTH ))
        height     = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
        fps        = int(video.get(cv2.CAP_PROP_FPS         ))
        framecount = int(video.get(cv2.CAP_PROP_FRAME_COUNT ))
        print("width x height:%d x %d" %(width,height))
        print("           FPS:%d"      %(fps         ))
        print("    framecount:%d"      %(framecount  ))
    
        dfn=vfn[0:-4]+".dat"
        with open(dfn, 'rb') as f:
            datas = pickle.load(f)
        f.close()
    #   fm2=0
        while True:
            # 读取视频流
            fm1=video.get(cv2.CAP_PROP_POS_FRAMES)
            ms1=video.get(cv2.CAP_PROP_POS_MSEC)
            ok, frame = video.read()
            if not ok:
                break
            newrgb=Image.fromarray(cv2.cvtColor(frame,cv2.COLOR_BGR2RGB))
            dw=ImageDraw.Draw(newrgb)
    
            people=datas[int(fm1)]['people']
            L=len(people)
            for i in range(L):
    #           print("poeple %02d/%02d" %(i+1,L))
                Xlist = []
                Ylist = []
                for j in range(18):
                    for k in range(4):
                        if links_COCO[j][k]==-1:
                            break
                        x =int(round(people[i]['pose_keypoints_2d'][           j    *3+0]))
                        y =int(round(people[i]['pose_keypoints_2d'][           j    *3+1]))
                        c =          people[i]['pose_keypoints_2d'][           j    *3+2]
                        x1=int(round(people[i]['pose_keypoints_2d'][links_COCO[j][k]*3+0]))
                        y1=int(round(people[i]['pose_keypoints_2d'][links_COCO[j][k]*3+1]))
                        c1=          people[i]['pose_keypoints_2d'][links_COCO[j][k]*3+2]
                        if c>THRESHOLD and c1>THRESHOLD:
                            dw.line(((x,y),(x1,y1)),fill=linkcolors_COCO[j][k],width=4)
    
                    x=int(round(people[i]['pose_keypoints_2d'][j*3+0]))
                    y=int(round(people[i]['pose_keypoints_2d'][j*3+1]))
                    c=          people[i]['pose_keypoints_2d'][j*3+2]
                    if c>THRESHOLD:
                        dw.ellipse(((x-5,y-5),(x+5,y+5)),fill=nodecolors_COCO[j])
                        Xlist.append(x)
                        Ylist.append(y)
                if len(Xlist)>0:
                    maxX = int(max(Xlist))
                    minX = int(min(Xlist))
                    maxY = int(max(Ylist))
                    minY = int(min(Ylist))
                    w=maxX-minX
                    h=maxY-minY
                    left=minX-w//4
                    if left<0:
                        left=0
                    top=minY-h//4
                    if top<0:
                        top=0
                    right=maxX+w//4
                    if right>width-1:
                        right=width-1
                    bottom=maxY+h//4
                    if bottom>height-1:
                        bottom=height-1
                    dw.rectangle(((left  ,top  ),(right  ,bottom  )),outline=(  0,255,  0,255))
                    dw.rectangle(((left+1,top+1),(right-1,bottom-1)),outline=(  0,255,  0,255))
    
    #       dw.text((48, 48), "fm1 %6d==%6d fm2" %(fm1,fm2), font=ImageFont.truetype('simhei.ttf', 48),fill=(255,255,255,255))
            mss=int(framecount/fps*1000) #总毫秒数
            dw.text((24, 24), "%06d/%06d %02d:%02d:%02d.%03d/%02d:%02d:%02d" %(fm1,framecount,\
                ms1//(3600*1000),ms1//(60*1000)%60,ms1//(1000)%60,ms1%1000,\
                mss//(3600*1000),mss//(60*1000)%60,mss//(1000)%60,         \
                ), font=ImageFont.truetype('simhei.ttf', 24),fill=(255,255,255,255))
    
    #       fm2+=1
    
            imgs=cv2.cvtColor(np.asarray(newrgb),cv2.COLOR_RGB2BGR)
            cv2.imshow('video', imgs)
            key = cv2.waitKey(1) & 0xFF
            if key == 27:
                break
            if key == 32:
                cv2.waitKey(0)
        video.release()
        cv2.destroyAllWindows()
    
    
    
    
    本回答被题主选为最佳回答 , 对您是否有帮助呢?
    评论

报告相同问题?

问题事件

  • 系统已结题 5月25日
  • 已采纳回答 5月17日
  • 修改了问题 5月17日
  • 创建了问题 5月17日

悬赏问题

  • ¥15 为什么eprime输出的数据会有缺失?
  • ¥20 腾讯企业邮箱邮件可以恢复么
  • ¥15 有人知道怎么将自己的迁移策略布到edgecloudsim上使用吗?
  • ¥15 错误 LNK2001 无法解析的外部符号
  • ¥50 安装pyaudiokits失败
  • ¥15 计组这些题应该咋做呀
  • ¥60 更换迈创SOL6M4AE卡的时候,驱动要重新装才能使用,怎么解决?
  • ¥15 让node服务器有自动加载文件的功能
  • ¥15 jmeter脚本回放有的是对的有的是错的
  • ¥15 r语言蛋白组学相关问题