下面是一个大文件切片计算md5时,再worker.js中调用createChunk,方法来拿到文件md5值,createChunk方法中调用了SparkMD5,但是
import SparkMD5 from './spark-md5.js';会导致worker报错,报错内容如下:
完整代码如下:
文件切片:
const CHUNK_SIZE = 1024 * 1024 * 5; // 5MB
const THREAD_COUNT = navigator.hardwareConcurrency || 4;
export async function cutFile(file) {
return new Promise((resolve, reject) => {
const chunkCount = Math.ceil(file.size / CHUNK_SIZE);
const threadChunkCounts = Math.ceil(chunkCount / THREAD_COUNT);
let finished = 0;
const result = [];
for (let i = 0; i < THREAD_COUNT; i++) {
const worker = new Worker('./../js/worker.js', { type: 'module' });
let end = (i + 1) * threadChunkCounts;
const start = i * threadChunkCounts;
if (end > chunkCount) {
end = chunkCount;
}
worker.onerror = (err) => console.error('Worker error:', i, err);
worker.postMessage({
start,
end,
file,
CHUNK_SIZE
});
worker.onmessage = (e) => {
for (let i = start; i < end; i++) {
result[i] = e.data[i - start];
}
finished++;
worker.terminate();
if (finished === THREAD_COUNT) {
resolve(result)
}
};
}
})
}
// worker.js
import { createChunk } from './createchunk.js';
console.log('Worker loaded successfully');
onmessage = async (e) => {
const { start, end, file, CHUNK_SIZE } = e.data;
console.log(`start: ${start}, end: ${end}`);
const result = [];
for (let i = start; i < end; i++) {
result.push(createChunk(file, i, CHUNK_SIZE)) ;
}
const chunks = await Promise.all(result);
postMessage(chunks);
}
// createchunk.js
import SparkMD5 from './spark-md5.js';
export function createChunk(file, index, chunkSize) {
return new Promise((resolve) => {
const start = index * chunkSize;
const end = start + chunkSize;
const spark = new SparkMD5.ArrayBuffer();
const fileReader = new FileReader();
const blob = file.slice(start, end);
fileReader.onload = (e) => {
spark.append(e.target.result)
resolve({
start,
end,
index,
hash: spark.end(),
blob
})
};
fileReader.readAsArrayBuffer(blob);
})
}
请问spark-md5.js应该怎么引入使用呢?