目前项目中需要上传大文件,浏览器端上传大文件的常用做法就是分片上传,项目前端用的是vue element,服务器用的是golang的开源框架 echo。
上传大文件如果传到一般断掉了,在全部重新上传的话那就太抓狂了,所以不只是分片上传还要 断点续传 。
前端
既然是vue+element 那肯定就是通过npm + webpack构建的vue 项目了, 什么是 npm ,什么是webpack这里不介绍,网上的介绍很详细。 首先通过npm安装 vue-simple-uploader 安装命令 npm -i vue-simple-uploade r ,vue-simple-uploader 就是一个基于 simple-uploader.js 和 Vue 结合做的一个上传组件,自带 UI,可覆盖、自定义UI,如下图

具体使用方法
初始化:
import uploader from ‘vue-simple-uploader’
Vue.use(uploader)
上传组件
<template>
<uploader
:attrs=”attrs”
:options=”options”
:file-status-text=”statusText”
class=”uploader-example”
@file-added=”on file Added”
@file-success=”onFileSuccess”>
<uploader-unsupport/>
<uploader-drop>
<uploader-btn :single=”true”>选择文件</uploader-btn>
</uploader-drop>
<uploader-list ref=”uploader_list”/>
</uploader>
</template>
<script>
import SparkMD5 from ‘spark-md5’
import { getToken } from ‘@/utils/auth’
export default {
name: ‘LbUploader’,
data() {
return {
options: {
target: ‘//localhost:1323/admin/upload’,
testChunks: true,
chunkSize: 5242880,
checkChunkUploadedByResponse: function(chunck, message ) {
const objMessage = JSON.parse(message)
if (objMessage.data.uploaded) {
return objMessage.data.uploaded.indexOf((chunck.offset + 1) + ”) >= 0
}
return false
},
headers: {
Authorization: ‘Bearer ‘ + getToken()
}
},
attrs: [
‘.zip’, ‘.rar’
],
collapse: false,
statusText: {
success: ‘上传成功’,
error: ‘出错了’,
uploading: ‘上传中’,
paused: ‘暂停中’,
waiting: ‘等待中’
}
}
},
methods: {
on File Added(file) {
this.computeMD5(file)
},
onFileSuccess(rootFile, file, response, chunk) {
response = JSON.parse(response)
if (response.data.fileUrl) {
this.$emit(‘uploadSuccess’, response.data.fileUrl)
}
},
/**
* 计算md5,实现断点续传及秒传
*/
computeMD5(file) {
const fileReader = new FileReader()
// const time = new Date().getTime()
let md5 = ”
file.pause()
fileReader.readAsArrayBuffer(file.file)
fileReader.onload = e => {
if (file.size !== e.target.result.byteLength) {
this.error(‘Browser reported success but could not read the file until the end.’)
return
}
md5 = SparkMD5.ArrayBuffer.hash(e.target.result)
file.uniqueIdentifier = md5
file.resume()
}
fileReader.onerror = function() {
this.error(‘FileReader onerror was triggered, maybe the browser aborted due to high memory usage.’)
}
}
}
}
</script>
服务端
上传之前有一个分片检测是GET 请求,上传是POST请求,所以都是一个方法里面处理判断了一些是get 还是post

服务端就不BB了 直接贴代码
/分片上传 func Upload(c echo.Context) error { var ( chunkNumber int //当前片数 chunkSize int64 //总分片总 currentChunkSize int64 //当前分片文件大小 totalSize int64 //文件总大小 identifier string //文件ID filename string totalChunks int //文件总大小 fileHeader *multipart.FileHeader file multipart.File dst *os.File err error currentPath string //上传目录 fileUrl string //保存文件完整路径 ) currentPath = utils.GetCurrentPath() + global.UPLOADDIR if len(c.QueryParams()) > 0 { //分片检测,有上传的片不上传,提高上传效率 identifier = c.QueryParams().Get("identifier") totalChunks, _ = strconv.Atoi(c.QueryParams().Get("totalChunks")) _, names := utils.GetDirList(fmt.Sprintf("%s/%s/", currentPath, identifier)) if totalChunks == len(names)-1 { filename = c.QueryParams().Get("filename") currentPath := utils.GetCurrentPath() + global.UPLOADDIR localPath := utils.CreateDateDir(currentPath) fileUrl = fmt.Sprintf("%s%s%s_%s", localPath, global.SEPARATOR, utils.GetRandomString(12), filename) //文件保存路径,对文件重命名 mergeFile(fileUrl, identifier) names = append(names[:0], names[0+1:]...) return utils.ResponseSuccess(c, map[string]interface{}{"uploaded": names, "fileUrl": strings.Replace(strings.Replace(fileUrl, currentPath, "", 1), "\\", "/", -1)}) } return utils.ResponseSuccess(c, map[string]interface{}{"uploaded": names}) } identifier = c.FormValue("identifier") if fileHeader, err = c.FormFile("file"); err != nil { return utils.ResponseError(c, err.Error()) } if chunkNumber, err = strconv.Atoi(c.FormValue("chunkNumber")); err != nil { return utils.ResponseError(c, err.Error()) } chunkSize, err = strconv.ParseInt(c.FormValue("chunkSize"), 10, 64) currentChunkSize, err = strconv.ParseInt(c.FormValue("currentChunkSize"), 10, 64) totalSize, err = strconv.ParseInt(c.FormValue("totalSize"), 10, 64) totalChunks, err = strconv.Atoi(c.FormValue("totalChunks")) if file, err = fileHeader.Open(); err != nil { return utils.ResponseError(c, err.Error()) } defer func() { file. Close () dst.Close() if chunkNumber == totalChunks { //上传完成,开始合并文件 if chunkSize*int64(chunkNumber-1)+currentChunkSize == totalSize { mergeFile(fileUrl, identifier) } } }() if dst, err = os.Create(fmt.Sprintf("%s/%d", utils.CreateDir(currentPath, identifier), chunkNumber)); err != nil { return utils.ResponseError(c, err.Error()) } if _, err = io.Copy(dst, file); err != nil { return utils.ResponseError(c, err.Error()) } if chunkNumber == totalChunks { filename = c.FormValue("filename") currentPath := utils.GetCurrentPath() + global.UPLOADDIR localPath := utils.CreateDateDir(currentPath) fileUrl = fmt.Sprintf("%s%s%s_%s", localPath, global.SEPARATOR, utils.GetRandomString(12), filename) //文件保存路径,对文件重命名 } return utils.ResponseSuccess(c, map[string]interface{}{"fileUrl": strings.Replace(strings.Replace(fileUrl, currentPath, "", 1), "\\", "/", -1)}) } /* 分片上传合并文件 */func mergeFile(fileUrl string, identifier string) { var ( body []byte localFile *os.File err error ) //文件合并 if localFile, err = os.OpenFile(fileUrl, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0755); err != nil { return } eachDir, _ := filepath.Split(fileUrl) eachDir += identifier filepath.Walk(eachDir, func(path string, f os.FileInfo, err error) error { if f == nil { return err } if f.IsDir() { return nil } if body, err = ioutil.ReadFile(path); err != nil { return err } localFile.Write(body) return nil }) localFile.Close() remPath := utils.GetCurrentPath() + global.UPLOADDIR + global.SEPARATOR + identifier err = os.RemoveAll(remPath) //合并完成删除临时文件 }
里面用到了一些封装的函数 GetCurrentPath 获取当前路径 CreateDateDir:根据日期创建路径 ResponseSuccess ,ResponseError 响应到客户端的数据