3. 前端大文件分片上传下载
分片
前端将文件分片后由后端进行组装
js
chunkFile(file, chunkSize) {
const chunkFlag = file.size > this.chunkBound
const chunkCount = chunkFlag ? Math.ceil(file.size / (chunkSize) ) : 1
console.log(`分片数量${chunkCount}, ${file.size}, ${chunkSize}`)
const result = []
let point = 0
let chunkId = 0
while (point < file.size) {
result.push({ file: file.slice(point, point + (chunkSize))name: `${file.name}-chunk-${chunkId++}`})
point += (chunkSize)
}
return result
}
文件秒传、断点续传
本质还是利用了hash
发送请求询问服务器该文件(利用hash来判定)还需要传递哪些分片
将文件分块获取hash值(增量算法,利用spark-md5)
js
function hash(chunks) {
return new Promise(()=> {
const spark = new SparkMD5()
function _read(i) {
if(i > chunks.length) {
resolve(spark.end())
return
}
const blob = chunks[i]
const reader = new FileReader()
reader.onload = e => {
const bytes = e.target.result // 读取到的字节数
spark.append(bytes)
_read(i+1)
}
reader.readAsArrayBuffer()
}
_read(0)
})
}
webWorker辅助
html
// main.js(主线程)
<script>
const fileWorker = new webWorker("./fileWorker.js")
fileInput.onchange((e)=>{
// 上传文件
fileWorker.postMessage(e.files[0])
})
fileWorker.addEventListener('message', (e) => {
if(e === 'success') {
alert('上传成功')
}
})
</script>
// fileWorker线程
<script>
function chunkFile(file, size){
// ...
}
function hash(chunks) {
// ...
}
function handleFileChunk(file) {
const chunks = chunk(file.target.files[0], 5 * 1024 * 1024)
const hash = await hash(chunks)
// 发送请求,将hash上传判断是否为新文件
// 是,后端返回需要的切片,上传
// 如果不是,重新上传
self.postMessage('success')
}
self.addEventListener('message', function(fileData) {
handleFileChunk(fileData)
})
</script>
js
function asyncTask() {
const arr = [1,2,3,4,5]
return new Promise(async function(resolve, reject) {
for(let i = 0; i < arr.length; i++) {
console.log(await setTimeTask(arr[i]))
}
}
}
function setTimeTask(num) {
return new Promise(function(resolve){
setTimeout(function() {
resolve(num)
},1000)
})
}