冬天吃根小雪糕 2024-05-07 17:39 采纳率: 0%
浏览 15

python的csv文件路径读取失败

问题遇到的现象和发生背景

读取.CSV文件找不到,实际已经放在对应位置了

遇到的现象和发生背景,请写出第一个错误信息
用代码块功能插入代码,请勿粘贴截图。 不用代码块回答率下降 50%
import math
import os

import multiprocessing as mp
import autograd.numpy as np
from autograd import elementwise_grad
from scipy import optimize

from irlmodelvalidation.irlagent import IRLAgent
from dataobjects import HighDDataset
from dataobjects.enums import HighDDatasetID
from irlmodelvalidation.rewardgradients import calculate_g_and_H_autograd
from irlmodelvalidation.evaluatemodel import run_agent
from irlmodelvalidation.irlagenttools import get_surrounding_cars_information
from processing.encryptiontools import load_encrypted_pickle, save_encrypted_pickle

import os

current_working_directory = os.getcwd()
full_path = os.path.join(current_working_directory, 'data\\HighD\\data\\01_recordingMeta.csv')

print(f"The full path to the file is: {full_path}")

def fit_theta_and_simulate(agent_id, dataset_index, road_boundaries, lane_centers, planner_dt, sim_dt, N, c=0.18, sigma_x=10.0, sigma_y=1.4, grid_search=False,
                           verbose=True, path_to_data_folder='data\\'):
    if verbose:
        print('Training on vehicle %d in dataset %02d, with PID %d' % (agent_id, dataset_index, os.getpid()))

    data = load_encrypted_pickle(path_to_data_folder + '%02d.pkl' % dataset_index)

    first_frame = data.track_data.loc[data.track_data['id'] == agent_id, 'frame'].min()
    last_frame = data.track_data.loc[data.track_data['id'] == agent_id, 'frame'].max()

    number_of_demonstrations = math.floor((last_frame - first_frame) / N)
    desired_velocity = data.track_data.loc[data.track_data['id'] == agent_id, 'xVelocity'].abs().max()

    u_h_values = np.array([[0.0] * N * 2] * number_of_demonstrations)
    x0 = np.array([[0.0] * 2] * number_of_demonstrations)
    x_r_values = []

    # agent parameters
    agent_length = data.track_meta_data.at[agent_id, 'width']
    agent_width = data.track_meta_data.at[agent_id, 'height']
    driving_direction = data.track_meta_data.at[agent_id, 'drivingDirection']

    for demonstration_number in range(number_of_demonstrations):
        frames = [f for f in range(first_frame + demonstration_number * N, first_frame + (demonstration_number + 1) * N)]
        frames_for_opponent = [f for f in range(first_frame + demonstration_number * N, first_frame + 1 + (demonstration_number + 1) * N)]

        state_index = demonstration_number
        u_h_demo = data.track_data.loc[(data.track_data['id'] == agent_id) & data.track_data['frame'].isin(frames), ['xVelocity', 'yVelocity']]
        u_h_values[state_index] = u_h_demo.to_numpy().flatten()

        horizon_data = data.track_data.loc[data.track_data['frame'].isin(frames_for_opponent), :]
        _, surrounding_cars_positions = get_surrounding_cars_information(horizon_data, data.track_meta_data, frames_for_opponent[0], frames_for_opponent[-1],
                                                                         agent_id)

        if surrounding_cars_positions.any():
            flat_surrounding_car_positions = surrounding_cars_positions.reshape(surrounding_cars_positions.shape[0],
                                                                                surrounding_cars_positions.shape[1] * surrounding_cars_positions.shape[2])
            flat_surrounding_car_positions = list(flat_surrounding_car_positions)
            x_r_values.append(flat_surrounding_car_positions)
        else:
            x_r_values.append([])

        first_frame_x_h = data.track_data.loc[(data.track_data['id'] == agent_id) & (data.track_data['frame'] == frames[0]), ['x', 'y']]
        x0[state_index] = first_frame_x_h.to_numpy()[0]

        # convert from top left corner to center coordinates
        x0[state_index] += np.array([agent_length / 2, agent_width / 2])

    # create agent
    agent = IRLAgent(0.0, 0.0, np.array([4.0, 0.02, 0.02, 1.5]), road_boundaries, lane_centers, desired_velocity, driving_direction, planner_dt, N, c=c,
                           sigma_x=sigma_x,
                           sigma_y=sigma_y)

    constants_values = (agent.C2, desired_velocity, road_boundaries, lane_centers, agent.sigma_x, agent.sigma_y, driving_direction)

    if verbose:
        print('determine H and g')
    g, H = calculate_g_and_H_autograd(constants_values, sim_dt, N)

    def likelihood(theta_values):
        l_total = 0.0
        successful_evaluations = 0

        for demonstration_number in range(number_of_demonstrations):

            g_values = g(u_h_values[demonstration_number], theta_values, x_r_values[demonstration_number], x0[demonstration_number])
            H_values = H(u_h_values[demonstration_number], theta_values, x_r_values[demonstration_number], x0[demonstration_number])

            H_values -= np.eye(N * 2) * 1e-1

            H_inv = np.linalg.inv(H_values)
            H_det = np.linalg.det(-H_values)

            l = np.dot(np.dot(g_values, H_inv), g_values) + np.log(H_det)
            if not np.isnan(l):
                l_total += l
                successful_evaluations += 1
        return -l_total / successful_evaluations

    if verbose:
        print('starting optimization')

    jac = elementwise_grad(likelihood)
    try:
        result = optimize.minimize(likelihood, np.array([-3.0, 5.0, -0.10, -2000.0]), jac=jac, options={'disp': True}, method='bfgs')
        if verbose:
            print('Theta for agent %d = ' % agent_id + str(result.x))

        run_agent(theta=result.x, data=data, agent_id=agent_id, c=agent.C2, sigma_x=agent.sigma_x, sigma_y=agent.sigma_y, save_as_grid_search_file=grid_search,
                  path_to_data_folder=path_to_data_folder, verbose=verbose)
    except Exception as e:
        if verbose:
            print("WARNING: fitting or running for agent %d failed with the exception below, now continuing with next agent" % agent_id)
            print(e)


def detect_single_lane_changes(data: HighDDataset):
    interesting_lane_changes = []
    cars_with_one_lane_change = data.track_meta_data.loc[data.track_meta_data['numLaneChanges'] == 1].index

    for car in cars_with_one_lane_change:
        initial_lane = data.track_data.loc[data.track_data['id'] == car, 'laneId'].iat[0]
        final_lane = data.track_data.loc[data.track_data['id'] == car, 'laneId'].iat[-1]
        driving_direction = data.track_meta_data.at[car, 'drivingDirection']

        if driving_direction == 1 and (final_lane - initial_lane) > 0:
            interesting_lane_changes.append(car)
        elif driving_direction == 2 and (final_lane - initial_lane) < 0:
            interesting_lane_changes.append(car)

    return interesting_lane_changes


if __name__ == '__main__':
    os.chdir(os.getcwd() + '/..')
    dataset_id = HighDDatasetID.DATASET_01
    dataset_index = dataset_id.value

    data = HighDDataset.load(dataset_id)

    # road parameters
    planner_dt = 1 / data.frame_rate
    sim_dt = 1 / data.frame_rate
    N = 5
    c = 0.14
    sigma_x = 15.0
    sigma_y = 1.4
    path_to_data_folder = 'data/HighD/data/'
    verbose = True
    is_grid_search = False
    workers = 4

    road_boundaries = np.array([data.upper_lane_markings[0], data.upper_lane_markings[-1], data.lower_lane_markings[0], data.lower_lane_markings[-1]])
    lane_centers = []

    for index in range(len(data.upper_lane_markings) - 1):
        lane_centers.append((data.upper_lane_markings[index + 1] - data.upper_lane_markings[index]) / 2 + data.upper_lane_markings[index])

    for index in range(len(data.lower_lane_markings) - 1):
        lane_centers.append((data.lower_lane_markings[index + 1] - data.lower_lane_markings[index]) / 2 + data.lower_lane_markings[index])

    average_lane_width = abs(road_boundaries[1] - road_boundaries[0]) / len(lane_centers)

    road_boundaries += np.array([-average_lane_width / 2, average_lane_width / 2, -average_lane_width / 2, average_lane_width / 2])

    # demonstrations to use
    ego_ids = detect_single_lane_changes(data)
    ego_ids = ego_ids[0:4]

    args = zip(ego_ids, [dataset_index] * len(ego_ids), [road_boundaries] * len(ego_ids), [lane_centers] * len(ego_ids), [planner_dt] * len(ego_ids),
               [sim_dt] * len(ego_ids), [N] * len(ego_ids), [c] * len(ego_ids), [sigma_x] * len(ego_ids), [sigma_y] * len(ego_ids),
               [is_grid_search] * len(ego_ids), [verbose] * len(ego_ids), [path_to_data_folder] * len(ego_ids))

    if verbose:
        print("Starting training with %d workers" % workers)

    with mp.Pool(workers) as p:
        p.starmap(fit_theta_and_simulate, args)

    if verbose:
        print('Dataset %02d done' % dataset_index)


运行结果及详细报错内容
The full path to the file is: D:\projecttravia\data\HighD\data\01_recordingMeta.csv
Traceback (most recent call last):
  File "D:\projecttravia\dataobjects\highddataset.py", line 72, in read_highd_csv
    recording_meta_data = pd.read_csv(path_to_csv)
  File "D:\projecttravia\lib\site-packages\pandas\io\parsers\readers.py", line 912, in read_csv
    return _read(filepath_or_buffer, kwds)
  File "D:\projecttravia\lib\site-packages\pandas\io\parsers\readers.py", line 577, in _read
    parser = TextFileReader(filepath_or_buffer, **kwds)
  File "D:\projecttravia\lib\site-packages\pandas\io\parsers\readers.py", line 1407, in __init__
    self._engine = self._make_engine(f, self.engine)
  File "D:\projecttravia\lib\site-packages\pandas\io\parsers\readers.py", line 1661, in _make_engine
    self.handles = get_handle(
  File "D:\projecttravia\lib\site-packages\pandas\io\common.py", line 859, in get_handle
    handle = open(
FileNotFoundError: [Errno 2] No such file or directory: 'data\\HighD\\data\\01_recordingMeta.csv'

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
  File "D:\projecttravia\train_irl.py", line 160, in <module>
    data = HighDDataset.load(dataset_id)
  File "D:\projecttravia\dataobjects\highddataset.py", line 63, in load
    dataset = HighDDataset.read_highd_csv(dataset_id)
  File "D:\projecttravia\dataobjects\highddataset.py", line 74, in read_highd_csv
    raise ValueError('The dataset ' + str(dataset_id) + ' could not be loaded because the data is missing.')
ValueError: The dataset HighD dataset #01 could not be loaded because the data is missing.


我的解答思路和尝试过的方法,不写自己思路的,回答率下降 60%

文件已经在对应位置了

img

  • 写回答

3条回答 默认 最新

  • 专家-赤兔[在线] 全栈领域优质创作者 2024-05-07 17:39
    关注

    引自免费微信小程序:皆我百晓生

    看起来你正在尝试从一个固定的目录加载数据,并且在运行过程中遇到了FileNotFoundError,这表明Python无法找到指定路径下的.csv文件。根据提供的错误信息和目录结构,01_recordingMeta.csv文件位于D:\projecttravia\data\HighD\data目录下。然而,当尝试读取这个文件时,程序抛出了异常。

    为了解决这个问题,确保检查以下几点:

    1. 确认文件路径是否正确无误。
    2. 检查当前工作目录(os.getcwd())是否与预期的一致。
    3. 如果文件路径是从其他地方引用过来的,确保那个引用也是正确的。

    根据你提供的代码,问题可能出现在这里:load_encrypted_pickle函数调用时传入的路径。在fit_theta_and_simulate函数内,你可能需要更新这个路径,使其指向正确的数据文件夹。由于你没有提供完整的load_encrypted_pickle函数,我将假设它接受一个包含文件名的路径作为参数。你需要确保传递给它的路径是正确的。

    以下是修复此问题的一种方法:

    # 在fit_theta_and_simulate函数内,替换这一行:
    data = load_encrypted_pickle(path_to_data_folder + '%02d.pkl' % dataset_index)
    
    # 为:
    data = load_encrypted_pickle(os.path.join(path_to_data_folder, '%02d.pkl' % dataset_index))
    

    这将确保使用os.path.join正确地连接各个路径部分。然后,在主程序中,确保设置正确的当前工作目录:

    if __name__ == '__main__':
        # 将这一行:
        os.chdir(os.getcwd() + '/..')
    
        # 改为:
        os.chdir('..')
    

    这样做的目的是使工作目录回到项目根目录,而不是添加上级目录两次。确保执行这些更改后,程序可以正确找到并加载.csv文件。

    如果你仍然遇到问题,建议你检查load_encrypted_pickle函数内部的实现,确保它正确处理文件路径。此外,你还可以在代码中加入日志语句,以便跟踪文件路径和操作过程,以便更好地诊断问题。例如:

    import logging
    
    logging.basicConfig(level=logging.DEBUG)
    logger = logging.getLogger(__name__)
    
    # ...
    
    def load_encrypted_pickle(path):
        logger.debug("Trying to load encrypted pickle from: %s", path)
        # ...
    

    这将在运行时记录调试消息,显示正在尝试加载的文件路径,便于分析。

    评论 编辑记录

报告相同问题?

问题事件

  • 创建了问题 5月7日

悬赏问题

  • ¥15 有两个非常“自以为是”烦人的问题急期待大家解决!
  • ¥30 STM32 INMP441无法读取数据
  • ¥100 求汇川机器人IRCB300控制器和示教器同版本升级固件文件升级包
  • ¥15 用visualstudio2022创建vue项目后无法启动
  • ¥15 x趋于0时tanx-sinx极限可以拆开算吗
  • ¥500 把面具戴到人脸上,请大家贡献智慧,别用大模型回答,大模型的答案没啥用
  • ¥15 任意一个散点图自己下载其js脚本文件并做成独立的案例页面,不要作在线的,要离线状态。
  • ¥15 各位 帮我看看如何写代码,打出来的图形要和如下图呈现的一样,急
  • ¥30 c#打开word开启修订并实时显示批注
  • ¥15 如何解决ldsc的这条报错/index error