NuDt_xiaobai 2020-08-11 21:15 采纳率: 0%
浏览 1398
已结题

衔接错 RuntimeError: size mismatch, m1: [2484 x 1], m2: [2484 x 1] at C:\w\b\windows\pytorch\aten\src\TH/gen

自己写的读取声音文件识别序号的神经网络代码,读取wav文件没有问题,fc1层的时候出错,我是学生,希望大佬帮忙指正。

str.encode('utf-8')
import numpy
import scipy.io.wavfile
from scipy.fftpack import dct
import torch
import torch.nn as nn
import torch.optim as optim

def Read_wav(wav_name):
    sample_rate, signal = scipy.io.wavfile.read(wav_name)  # File assumed to be in the same directory
    signal = signal[0:int(3.5 * sample_rate)]  # Keep the first 3.5 seconds

    pre_emphasis = 0.97
    emphasized_signal = numpy.append(signal[0], signal[1:] - pre_emphasis * signal[:-1])

    frame_size = 0.025;frame_stride = 0.01
    frame_length, frame_step = frame_size * sample_rate, frame_stride * sample_rate  # Convert from seconds to samples
    signal_length = len(emphasized_signal)
    frame_length = int(round(frame_length))
    frame_step = int(round(frame_step))
    num_frames = int(
        numpy.ceil(float(numpy.abs(signal_length - frame_length)) / frame_step))  # Make sure that we have at least 1 frame

    pad_signal_length = num_frames * frame_step + frame_length
    z = numpy.zeros((pad_signal_length - signal_length))
    pad_signal = numpy.append(emphasized_signal, z)  # Pad Signal to make sure that all frames have equal number of samples without truncating any samples from the original signal

    indices = numpy.tile(numpy.arange(0, frame_length), (num_frames, 1)) + numpy.tile(
        numpy.arange(0, num_frames * frame_step, frame_step), (frame_length, 1)).T
    frames = pad_signal[indices.astype(numpy.int32, copy=False)]

    frames *= numpy.hamming(frame_length)
    # frames *= 0.54 - 0.46 * numpy.cos((2 * numpy.pi * n) / (frame_length - 1))  # Explicit Implementation **

    NFFT = 512# or 216
    mag_frames = numpy.absolute(numpy.fft.rfft(frames, NFFT))  # Magnitude of the FFT
    pow_frames = ((1.0 / NFFT) * ((mag_frames) ** 2))  # Power Spectrum

    nfilt = 40
    low_freq_mel = 0
    high_freq_mel = (2595 * numpy.log10(1 + (sample_rate / 2) / 700))  # Convert Hz to Mel
    mel_points = numpy.linspace(low_freq_mel, high_freq_mel, nfilt + 2)  # Equally spaced in Mel scale
    hz_points = (700 * (10 ** (mel_points / 2595) - 1))  # Convert Mel to Hz
    bin = numpy.floor((NFFT + 1) * hz_points / sample_rate)

    fbank = numpy.zeros((nfilt, int(numpy.floor(NFFT / 2 + 1))))
    for m in range(1, nfilt + 1):
        f_m_minus = int(bin[m - 1])  # left
        f_m = int(bin[m])  # center
        f_m_plus = int(bin[m + 1])  # right

        for k in range(f_m_minus, f_m):
            fbank[m - 1, k] = (k - bin[m - 1]) / (bin[m] - bin[m - 1])
        for k in range(f_m, f_m_plus):
            fbank[m - 1, k] = (bin[m + 1] - k) / (bin[m + 1] - bin[m])
    filter_banks = numpy.dot(pow_frames, fbank.T)
    filter_banks = numpy.where(filter_banks == 0, numpy.finfo(float).eps, filter_banks)  # Numerical Stability
    filter_banks = 20 * numpy.log10(filter_banks)

    num_ceps = 12
    mfcc = dct(filter_banks, type=2, axis=1, norm='ortho')[:, 1 : (num_ceps + 1)] # Keep 2-13

    #(nframes, ncoeff) = mfcc.shape
    #n = numpy.arange(ncoeff)
    #lift = 1 + (cep_lifter / 2) * numpy.sin(numpy.pi * n / cep_lifter)
    #mfcc *= lift  #*

    filter_banks -= (numpy.mean(filter_banks, axis=0) + 1e-8)
    mfcc -= (numpy.mean(mfcc, axis=0) + 1e-8)

    #return filter_banks.shape
    mfcc = mfcc.reshape(-1,1)[:2484]
    return mfcc.reshape(1,-1)#[1,2484]

count_number = 3

# 定义网络结构
class Net(nn.Module):
    def __init__(self,count_number):
        super(Net, self).__init__()
        self.fc1 = nn.Linear(2484,1)#3
        self.softmax = nn.Softmax(dim=1)
    def forward(self,x):
        x = torch.FloatTensor(x)
        x = x.view(-1,1)
        x = self.fc1(x)
        x = self.softmax(x)
        return x
LR = 0.0003
# 定义模型
model = Net(count_number)
# 定义代价函数
entropy_loss = nn.CrossEntropyLoss()
#定义优化器
optimizer = optim.Adam(model.parameters(), LR)


def train(t,labels):
    # 获得数据和对应的标签
    inputs = torch.FloatTensor(Read_wav(t))#[1,2484]
    #input('实际说话人:{0:Hu,1:Cao,2:Peng }')
    target = torch.ones(1)
    print(target)
    out = model(inputs).reshape(3)#[1,3]
    print(out)
    # 计算损失值
    loss = entropy_loss(out, target)
    # 梯度清0
    optimizer.zero_grad()
    # 计算梯度
    loss.backward()
    # 修改权值
    optimizer.step()
for i in range(1,201):
    train('0 (%s).wav' %(i) , 0)


  • 写回答

1条回答 默认 最新

  • threenewbee 2020-08-11 22:02
    关注

    inputs = torch.FloatTensor(Read_wav(t))
    这里看看inputs的shape

    评论

报告相同问题?

悬赏问题

  • ¥15 扩散模型sd.webui使用时报错“Nonetype”
  • ¥15 stm32流水灯+呼吸灯+外部中断按键
  • ¥15 将二维数组,按照假设的规定,如0/1/0 == "4",把对应列位置写成一个字符并打印输出该字符
  • ¥15 NX MCD仿真与博途通讯不了啥情况
  • ¥15 win11家庭中文版安装docker遇到Hyper-V启用失败解决办法整理
  • ¥15 gradio的web端页面格式不对的问题
  • ¥15 求大家看看Nonce如何配置
  • ¥15 Matlab怎么求解含参的二重积分?
  • ¥15 苹果手机突然连不上wifi了?
  • ¥15 cgictest.cgi文件无法访问