a_zxcvdfgfv 2024-08-22 07:28 采纳率: 0%
浏览 4

随机森林案例乳腺癌调参第一步,调n_estimators出现了错误,怎么改正

scorel = []
for i in range(0,200,10):
    rfc = RandomForestClassifier(n_estimators=i+1,
                                 n_jobs=-1,
                                 random_state=90)
    score = cross_val_score(rfc,data.data,data.target,cv=10,error_score='raise').mean()
    scorel.append(score)
    
print(max(scorel),(scorel,index(max(scorel))*10)+1)
plt.figure(figsize=[20,5])
plt.plot(range(1,201,10),scorel)
plt.show()

出现了错误提示

---------------------------------------------------------------------------
UnicodeEncodeError                        Traceback (most recent call last)
~\AppData\Local\Temp\ipykernel_2692\4090940073.py in <module>
      4                                  n_jobs=-1,
      5                                  random_state=90)
----> 6     score = cross_val_score(rfc,data.data,data.target,cv=10,error_score='raise').mean()
      7     scorel.append(score)
      8 

D:\Anaconda3\lib\site-packages\sklearn\model_selection\_validation.py in cross_val_score(estimator, X, y, groups, scoring, cv, n_jobs, verbose, fit_params, pre_dispatch, error_score)
    507     scorer = check_scoring(estimator, scoring=scoring)
    508 
--> 509     cv_results = cross_validate(
    510         estimator=estimator,
    511         X=X,

D:\Anaconda3\lib\site-packages\sklearn\model_selection\_validation.py in cross_validate(estimator, X, y, groups, scoring, cv, n_jobs, verbose, fit_params, pre_dispatch, return_train_score, return_estimator, error_score)
    265     # independent, and that it is pickle-able.
    266     parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch)
--> 267     results = parallel(
    268         delayed(_fit_and_score)(
    269             clone(estimator),

D:\Anaconda3\lib\site-packages\joblib\parallel.py in __call__(self, iterable)
   1041             # remaining jobs.
   1042             self._iterating = False
-> 1043             if self.dispatch_one_batch(iterator):
   1044                 self._iterating = self._original_iterator is not None
   1045 

D:\Anaconda3\lib\site-packages\joblib\parallel.py in dispatch_one_batch(self, iterator)
    859                 return False
    860             else:
--> 861                 self._dispatch(tasks)
    862                 return True
    863 

D:\Anaconda3\lib\site-packages\joblib\parallel.py in _dispatch(self, batch)
    777         with self._lock:
    778             job_idx = len(self._jobs)
--> 779             job = self._backend.apply_async(batch, callback=cb)
    780             # A job can complete so quickly than its callback is
    781             # called before we get here, causing self._jobs to

D:\Anaconda3\lib\site-packages\joblib\_parallel_backends.py in apply_async(self, func, callback)
    206     def apply_async(self, func, callback=None):
    207         """Schedule a func to be run"""
--> 208         result = ImmediateResult(func)
    209         if callback:
    210             callback(result)

D:\Anaconda3\lib\site-packages\joblib\_parallel_backends.py in __init__(self, batch)
    570         # Don't delay the application, to avoid keeping the input
    571         # arguments in memory
--> 572         self.results = batch()
    573 
    574     def get(self):

D:\Anaconda3\lib\site-packages\joblib\parallel.py in __call__(self)
    260         # change the default number of processes to -1
    261         with parallel_backend(self._backend, n_jobs=self._n_jobs):
--> 262             return [func(*args, **kwargs)
    263                     for func, args, kwargs in self.items]
    264 

D:\Anaconda3\lib\site-packages\joblib\parallel.py in <listcomp>(.0)
    260         # change the default number of processes to -1
    261         with parallel_backend(self._backend, n_jobs=self._n_jobs):
--> 262             return [func(*args, **kwargs)
    263                     for func, args, kwargs in self.items]
    264 

D:\Anaconda3\lib\site-packages\sklearn\utils\fixes.py in __call__(self, *args, **kwargs)
    214     def __call__(self, *args, **kwargs):
    215         with config_context(**self.config):
--> 216             return self.function(*args, **kwargs)
    217 
    218 

D:\Anaconda3\lib\site-packages\sklearn\model_selection\_validation.py in _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, return_train_score, return_parameters, return_n_test_samples, return_times, return_estimator, split_progress, candidate_progress, error_score)
    678             estimator.fit(X_train, **fit_params)
    679         else:
--> 680             estimator.fit(X_train, y_train, **fit_params)
    681 
    682     except Exception:

D:\Anaconda3\lib\site-packages\sklearn\ensemble\_forest.py in fit(self, X, y, sample_weight)
    448             # parallel_backend contexts set at a higher level,
    449             # since correctness does not rely on using threads.
--> 450             trees = Parallel(
    451                 n_jobs=self.n_jobs,
    452                 verbose=self.verbose,

D:\Anaconda3\lib\site-packages\joblib\parallel.py in __call__(self, iterable)
    966 
    967         if not self._managed_backend:
--> 968             n_jobs = self._initialize_backend()
    969         else:
    970             n_jobs = self._effective_n_jobs()

D:\Anaconda3\lib\site-packages\joblib\parallel.py in _initialize_backend(self)
    733         """Build a process or thread pool and return the number of workers"""
    734         try:
--> 735             n_jobs = self._backend.configure(n_jobs=self.n_jobs, parallel=self,
    736                                              **self._backend_args)
    737             if self.timeout is not None and not self._backend.supports_timeout:

D:\Anaconda3\lib\site-packages\joblib\_parallel_backends.py in configure(self, n_jobs, parallel, prefer, require, idle_worker_timeout, **memmappingexecutor_args)
    492                 SequentialBackend(nesting_level=self.nesting_level))
    493 
--> 494         self._workers = get_memmapping_executor(
    495             n_jobs, timeout=idle_worker_timeout,
    496             env=self._prepare_worker_env(n_jobs=n_jobs),

D:\Anaconda3\lib\site-packages\joblib\executor.py in get_memmapping_executor(n_jobs, **kwargs)
     18 
     19 def get_memmapping_executor(n_jobs, **kwargs):
---> 20     return MemmappingExecutor.get_memmapping_executor(n_jobs, **kwargs)
     21 
     22 

D:\Anaconda3\lib\site-packages\joblib\executor.py in get_memmapping_executor(cls, n_jobs, timeout, initializer, initargs, env, temp_folder, context_id, **backend_args)
     40         _executor_args = executor_args
     41 
---> 42         manager = TemporaryResourcesManager(temp_folder)
     43 
     44         # reducers access the temporary folder in which to store temporary

D:\Anaconda3\lib\site-packages\joblib\_memmapping_reducer.py in __init__(self, temp_folder_root, context_id)
    529             # exposes exposes too many low-level details.
    530             context_id = uuid4().hex
--> 531         self.set_current_context(context_id)
    532 
    533     def set_current_context(self, context_id):

D:\Anaconda3\lib\site-packages\joblib\_memmapping_reducer.py in set_current_context(self, context_id)
    533     def set_current_context(self, context_id):
    534         self._current_context_id = context_id
--> 535         self.register_new_context(context_id)
    536 
    537     def register_new_context(self, context_id):

D:\Anaconda3\lib\site-packages\joblib\_memmapping_reducer.py in register_new_context(self, context_id)
    558                 new_folder_name, self._temp_folder_root
    559             )
--> 560             self.register_folder_finalizer(new_folder_path, context_id)
    561             self._cached_temp_folders[context_id] = new_folder_path
    562 

D:\Anaconda3\lib\site-packages\joblib\_memmapping_reducer.py in register_folder_finalizer(self, pool_subfolder, context_id)
    588         # semaphores and pipes
    589         pool_module_name = whichmodule(delete_folder, 'delete_folder')
--> 590         resource_tracker.register(pool_subfolder, "folder")
    591 
    592         def _cleanup():

D:\Anaconda3\lib\site-packages\joblib\externals\loky\backend\resource_tracker.py in register(self, name, rtype)
    189         '''Register a named resource, and increment its refcount.'''
    190         self.ensure_running()
--> 191         self._send('REGISTER', name, rtype)
    192 
    193     def unregister(self, name, rtype):

D:\Anaconda3\lib\site-packages\joblib\externals\loky\backend\resource_tracker.py in _send(self, cmd, name, rtype)
    202 
    203     def _send(self, cmd, name, rtype):
--> 204         msg = '{0}:{1}:{2}\n'.format(cmd, name, rtype).encode('ascii')
    205         if len(name) > 512:
    206             # posix guarantees that writes to a pipe of less than PIPE_BUF

UnicodeEncodeError: 'ascii' codec can't encode characters in position 18-20: ordinal not in range(128)


展开全部

  • 写回答

1条回答 默认 最新

  • 吃不了席 2024-08-22 09:18
    关注

    以下回复参考:皆我百晓生券券喵儿等免费微信小程序作答:

    这个问题是由于在Windows系统中,joblib库在处理临时文件夹时使用了非ASCII字符导致的。为了解决这个问题,你可以尝试将临时文件夹的路径更改为一个只包含ASCII字符的路径。你可以在创建TemporaryResourcesManager实例时指定temp_folder参数,如下所示:

    from joblib import TemporaryResourcesManager
    
    # 创建一个只包含ASCII字符的临时文件夹路径
    temp_folder = "C:\\temp\\joblib"
    
    # 使用指定的临时文件夹路径创建TemporaryResourcesManager实例
    manager = TemporaryResourcesManager(temp_folder)
    

    然后,你需要确保这个路径在你的系统中是可用的,并且具有适当的读写权限。这样,你应该可以避免出现UnicodeEncodeError错误。

    评论
编辑
预览

报告相同问题?

问题事件

  • 创建了问题 8月22日