100字范文,内容丰富有趣,生活中的好帮手!
100字范文 > springboot+阿里云OSS分片上传 断点续传 秒传

springboot+阿里云OSS分片上传 断点续传 秒传

时间:2023-08-03 03:11:10

相关推荐

springboot+阿里云OSS分片上传 断点续传 秒传

最近工作中有使用到OSS的分片上传API,整体流程就是前端将大文件进行分割,每个分片大小是1MB,分片个数是:(文件总大小 / 单个分片大小),前端多线程处理上传分片到后端,后端接收到分片后调用OSS验证是否存在接口校验之前有没有传输过,如果分片在OSS上不存在则调用分片上传API进行上传,所有分片上传完成后调用OSS分片合并API,将所有分片在OSS上合并为我们最初的大文件,特此记录便于日后查阅。

目录

1、maven依赖

2、分片上传dto

3、OSS工具类

3、controller

4、前端js

1、maven依赖

<!-- 阿里云OSS --><dependency><groupId>com.aliyun.oss</groupId><artifactId>aliyun-sdk-oss</artifactId><version>3.11.0</version></dependency>

2、分片上传dto

/*** @author Wxm*/@Datapublic class UploadChunkFileParam {/*** 文件传输任务ID* 文件MD5编码*/private String identifier;/*** 文件全名称 例如:123.png*/private String filename;/*** 主体类型--这个字段是我项目中的其他业务逻辑可以忽略*/private String objectType;/*** 分片总数*/private int totalChunks;/*** 每个分块的大小*/private long chunkSize;/*** 当前为第几分片*/private int chunkNumber;/*** 当前分片大小*/private long currentChunkSize;/*** 分块文件传输对象*/private MultipartFile file;/*** oss上传时的上传id*/private String uploadId;/*** oss上传时的文件key*/private String key;}

3、OSS工具类

/*** @author wxm*/@Slf4jpublic class AliOSSManager {private static volatile OSSClient ossClient = null;private static volatile RedisManager redisManager = null;/*** 上传文件** @param file* @return*/public static String upload(MultipartFile file) {try {return putFile(getKey(FileUploadTypeEnum.DEF_TYPE.getType(), null, FileUtil.getSuffix(file)), file.getInputStream());} catch (Exception e) {e.printStackTrace();throw new ApiException("打开文件失败:" + e.getMessage());}}/*** 上传文件** @param file* @param prefix* @return*/public static String upload(MultipartFile file, String prefix) {try {return putFile(getKey(prefix, file.getName(), FileUtil.getSuffix(file)), file.getInputStream());} catch (Exception e) {e.printStackTrace();throw new ApiException("打开文件失败:" + e.getMessage());}}/*** 上传文件** @param file* @param uploadType* @return*/public static String upload(MultipartFile file, FileUploadTypeEnum uploadType) {try {return putFile(getKey(uploadType.getType(), file.getName(), FileUtil.getSuffix(file)), file.getInputStream());} catch (Exception e) {e.printStackTrace();throw new ApiException("打开文件失败:" + e.getMessage());}}/*** 分片上传** @param param 上传参数* @return*/public static Map uploadChunk(UploadChunkFileParam param) {if (ObjectUtil.isEmpty(param.getKey())) {String key = getKey(null, param.getIdentifier(), param.getFilename());param.setKey(key);}return uploadChunk(param.getUploadId(), param.getKey(), param.getFile(), param.getChunkNumber(),param.getCurrentChunkSize(), param.getTotalChunks());}/*** 分片上传* 1、检查文件是否上传* 2、检查文件是否第一次上传,第一次上传创建上传id uploadId* 3、检查是否是断点续传,如果是返回已上传的分片* 4、分片上传到阿里云OSS上,并记录上传信息到Redis* 5、判断是否已上传完成,已完成:合并所有分片为源文件** @param uploadId 上传id* @param key 文件在OSS上的key* @param file 文件分片* @param chunkIndex 分片索引* @param chunkSize 分片大小* @param chunkCount 总分片数* @return*/public static Map uploadChunk(String uploadId, String key, MultipartFile file, Integer chunkIndex,long chunkSize, Integer chunkCount) {if (ObjectUtil.isEmpty(key)) {key = getKey(FileUploadTypeEnum.DEF_TYPE.getType(), null, FileUtil.getSuffix(file));}ossClient = initOSS();try {Map<String, Object> map = MapUtil.newHashMap();// 判断是否上传if (checkExist(key)) {map.put("skipUpload", true);map.put("url", getUrl(key));return map;}// 判断是否第一次上传if (StringUtils.isBlank(uploadId)) {uploadId = uploadChunkInit(file, key);map.put("skipUpload", false);map.put("uploadId", uploadId);map.put("uploaded", null);return map;}RedisManager redisManager = initRedisManager();// 检查分片是否已上传 实现断点续传if (file == null) {Map<String, String> uploadedCache = redisManager.hmget(SysConstant.REDIS_ALI_OSS_KEY + uploadId);List<Integer> uploaded = Lists.newArrayList();for (Map.Entry<String, String> entry : uploadedCache.entrySet()) {uploaded.add(JSONUtil.toBean(entry.getValue(), PartETag.class).getPartNumber());}map.put("skipUpload", false);map.put("uploadId", uploadId);map.put("uploaded", uploaded);return map;}// 上传分片PartETag partETag = uploadChunkPart(uploadId, key, file.getInputStream(), chunkIndex, chunkSize, chunkCount);// 分片上传完成缓存keyredisManager.hset(SysConstant.REDIS_ALI_OSS_KEY + uploadId, chunkIndex + ",", JSONUtil.toJsonStr(partETag));// 取出所有已上传的分片信息Map<String, String> dataMap = redisManager.hmget(SysConstant.REDIS_ALI_OSS_KEY + uploadId);List<PartETag> partETagList = Lists.newArrayList();for (Map.Entry<String, String> entry : dataMap.entrySet()) {partETagList.add(JSONUtil.toBean(entry.getValue(), PartETag.class));}// 判断是否上传完成if (dataMap.keySet().size() == chunkCount) {uploadChunkComplete(uploadId, key, partETagList);for (String mapKey : dataMap.keySet()) {redisManager.hdel(SysConstant.REDIS_ALI_OSS_KEY + uploadId, mapKey);}map.put("skipUpload", true);map.put("uploadId", uploadId);map.put("url", getUrl(key));} else {List<Integer> list = partETagList.stream().map(PartETag::getPartNumber).collect(Collectors.toList());map.put("uploaded", list);map.put("skipUpload", false);map.put("uploadId", uploadId);}return map;} catch (Exception e) {e.printStackTrace();throw new ApiException("上传失败:" + e.getMessage());}}/*** 初始化分片上传** @param key* @return 分片上传的uploadId*/public static String uploadChunkInit(MultipartFile file, String key) {if (ObjectUtil.isEmpty(key)) {key = getKey(FileUploadTypeEnum.DEF_TYPE.getType(), null, FileUtil.getSuffix(file));}return uploadChunkInit(key);}/*** 初始化上传id uploadId** @param key* @return*/public static String uploadChunkInit(String key) {if (ObjectUtil.isEmpty(key)) {throw new ApiException("key不能为空");}ossClient = initOSS();try {// 创建分片上传对象InitiateMultipartUploadRequest uploadRequest = new InitiateMultipartUploadRequest(AliOSSProperties.BUCKET_NAME, key);// 初始化分片InitiateMultipartUploadResult result = ossClient.initiateMultipartUpload(uploadRequest);// 返回uploadId,它是分片上传事件的唯一标识,您可以根据这个uploadId发起相关的操作,如取消分片上传、查询分片上传等。return result.getUploadId();} catch (Exception e) {e.printStackTrace();throw new ApiException("初始化分片失败:" + e.getMessage());}}/*** 上传分片文件** @param uploadId 上传id* @param key key* @param instream 文件分片流* @param chunkIndex 分片索引* @param chunkSize 分片大小* @return*/public static PartETag uploadChunkPart(String uploadId, String key, InputStream instream,Integer chunkIndex, long chunkSize, Integer chunkCount) {ossClient = initOSS();try {UploadPartRequest partRequest = new UploadPartRequest();// 阿里云 oss 文件根目录partRequest.setBucketName(AliOSSProperties.BUCKET_NAME);// 文件keypartRequest.setKey(key);// 分片上传uploadIdpartRequest.setUploadId(uploadId);// 分片文件partRequest.setInputStream(instream);// 分片大小。除了最后一个分片没有大小限制,其他的分片最小为100 KB。partRequest.setPartSize(chunkSize);System.out.println(chunkSize + " " + chunkIndex + " " + uploadId);// 分片号。每一个上传的分片都有一个分片号,取值范围是1~10000,如果超出这个范围,OSS将返回InvalidArgument的错误码。partRequest.setPartNumber(chunkIndex);// 每个分片不需要按顺序上传,甚至可以在不同客户端上传,OSS会按照分片号排序组成完整的文件。UploadPartResult uploadPartResult = ossClient.uploadPart(partRequest);// 每次上传分片之后,OSS的返回结果包含PartETag。PartETag将被保存在redis中。return uploadPartResult.getPartETag();} catch (Exception e) {e.printStackTrace();throw new ApiException("分片上传失败:" + e.getMessage());}}/*** 文件合并** @param uploadId 上传id* @param key key* @param chunkTags 分片上传信息* @return*/public static CompleteMultipartUploadResult uploadChunkComplete(String uploadId, String key, List<PartETag> chunkTags) {ossClient = initOSS();try {CompleteMultipartUploadRequest completeMultipartUploadRequest =new CompleteMultipartUploadRequest(AliOSSProperties.BUCKET_NAME, key, uploadId, chunkTags);CompleteMultipartUploadResult result = pleteMultipartUpload(completeMultipartUploadRequest);return result;} catch (Exception e) {e.printStackTrace();throw new ApiException("分片合并失败:" + e.getMessage());}}/*** 根据key生成文件的访问地址** @param key* @return*/public static String getUrl(String key) {// 拼接文件访问路径。由于拼接的字符串大多为String对象,而不是""的形式,所以直接用+拼接的方式没有优势StringBuffer url = new StringBuffer();url.append("http://").append(AliOSSProperties.BUCKET_NAME).append(".").append(AliOSSProperties.END_POINT).append("/").append(key);return url.toString();}/*** 根据key生成文件的访问地址(带过期时间)** @param key* @return*/public static String getUrlExpire(String key) {ossClient = initOSS();// 生成过期时间long expireEndTime = System.currentTimeMillis() + AliOSSProperties.URL_EXPIRE * 1000;Date expiration = new Date(expireEndTime);// 生成URLGeneratePresignedUrlRequest generatePresignedUrlRequest = new GeneratePresignedUrlRequest(AliOSSProperties.BUCKET_NAME, key);generatePresignedUrlRequest.setExpiration(expiration);URL url = ossClient.generatePresignedUrl(generatePresignedUrlRequest);return url.toString();}/*** 通过文件名获取文件流** @param key 要下载的文件名(OSS服务器上的)*/public static InputStream getInputStream(String key) {ossClient = initOSS();// 下载OSS文件到本地文件。如果指定的本地文件存在会覆盖,不存在则新建。return ossClient.getObject(new GetObjectRequest(AliOSSProperties.BUCKET_NAME, key)).getObjectContent();}/*** 根据key下载文件** @param key*/public static void download(String key) {ossClient = initOSS();GetObjectRequest request = new GetObjectRequest(AliOSSProperties.BUCKET_NAME, key);ossClient.getObject(request);}/*** 根据key下载文件** @param key*/public static void download(String key, String fileName) {ossClient = initOSS();GetObjectRequest request = new GetObjectRequest(AliOSSProperties.BUCKET_NAME, key);ossClient.getObject(request, new File(fileName));}/*** 删除** @param key*/public static void delete(String key) {if (StringUtils.isNotEmpty(key)) {ossClient = initOSS();GenericRequest request = new DeleteObjectsRequest(AliOSSProperties.BUCKET_NAME).withKey(key);ossClient.deleteBucket(request);}}/*** 删除** @param keys*/public static void delete(List<String> keys) {if (ObjectUtil.isNotEmpty(keys)) {ossClient = initOSS();GenericRequest request = new DeleteObjectsRequest(AliOSSProperties.BUCKET_NAME).withKeys(keys);ossClient.deleteBucket(request);}}/*** 上传文件<>基础方法</>** @param key 文件key* @param inputStream 输入流* @return*/private static String putFile(String key, InputStream inputStream) {ossClient = initOSS();if (inputStream == null) {throw new CustomException("文件不能为空");}// 上传文件最大值 MB->byteslong maxSize = AliOSSProperties.MAX_SIZE * 1024 * 1024;long size = FileUtil.getInputStreamSize(inputStream);if (size <= 0 || size > maxSize) {throw new CustomException("请检查文件大小");}PutObjectResult result = null;try {// 创建上传Object的MetadataObjectMetadata meta = new ObjectMetadata();//被下载时网页的缓存行为meta.setCacheControl("no-cache");PutObjectRequest request = new PutObjectRequest(AliOSSProperties.BUCKET_NAME, key, inputStream, meta);result = ossClient.putObject(request);} catch (Exception e) {e.printStackTrace();log.error(e.getMessage(), e);}if (result != null) {return getUrl(key);}return null;}public static Boolean checkExist(String key) {ossClient = initOSS();return ossClient.doesObjectExist(AliOSSProperties.BUCKET_NAME, key);}/*** 获取上传文件的key* 上传和删除时除了需要bucketName外还需要此值** @param prefix 前缀(非必传),可以用于区分是哪个模块或子项目上传的文件,默认 file 文件夹* @param fileName 文件名称(非必传),如果为空默认生成文件名,格式:yyyyMMdd-UUID* @param suffix 后缀 , 可以是 png jpg* @return*/private static String getKey(final String prefix, final String fileName, final String suffix) {StringBuffer keySb = new StringBuffer();// 前缀处理if (StringUtils.isNotEmpty(prefix)) {keySb.append(prefix);} else {keySb.append(FileUploadTypeEnum.DEF_TYPE.getType());}// 文件名处理if (StringUtils.isBlank(fileName)) {// 上传时间 因为后期可能会用 - 将key进行split,然后进行分类统计keySb.append(CommKit.Date2Str(LocalDateTime.now(), "yyyyMMdd"));keySb.append("-");// 生成uuidkeySb.append(CommKit.genUUID());} else {keySb.append(fileName);}// 后缀处理if (StringUtils.isBlank(suffix)) {throw new NullPointerException("文件后缀不能为空");}if (suffix.contains(".")) {keySb.append(suffix.substring(suffix.lastIndexOf(".")));} else {keySb.append("." + suffix);}return keySb.toString();}private static OSSClient initOSS() {if (ossClient == null) {synchronized (OSSClient.class) {if (ossClient == null) {ossClient = new OSSClient(AliOSSProperties.END_POINT,new DefaultCredentialProvider(AliOSSProperties.ACCESS_KEY_ID, AliOSSProperties.ACCESS_KEY_SECRET),new ClientConfiguration());}}}return ossClient;}private static RedisManager initRedisManager() {if (redisManager == null) {synchronized (RedisManager.class) {if (redisManager == null) {return SpringUtils.getBean(RedisManager.class);}}}return redisManager;}}

3、controller

/*** 通用请求处理** @author wxm*/@RestController@RequestMapping("/common/")public class CommonController {/*** OSS普通上传** @param file 文件*/@PostMapping("/oss")public ApiResult uploadOSS(MultipartFile file) {String url = AliOSSManager.upload(file);if (url == null) {return ApiResult.fail("上传失败");}HashMap<Object, Object> map = MapUtil.newHashMap();map.put("url", url);return ApiResult.success(map);}/*** OSS分片上传** @param param 文件上传对象* @return*/@PostMapping("/oss/chunk/upload")public ApiResult uploadOSS(UploadChunkFileParam param) {Map<String, Object> map = AliOSSManager.uploadChunk(param);return ApiResult.success(map);}}

4、前端js

import md5 from 'js-md5' //引入MD5加密import UpApi from '@/api/common.js'import axios from 'axios'import { concurrentExecution } from '@/utils/jnxh'/*** 文件分片上传* @params file {File} 文件* @params pieceSize {Number} 分片大小 默认3MB* @params concurrent {Number} 并发数量 默认2* @params process {Function} 进度回调函数* @params success {Function} 成功回调函数* @params error {Function} 失败回调函数*/export const uploadByPieces = ({file,pieceSize = 3,concurrent = 3,success,process,error}) => {// 如果文件传入为空直接 return 返回if (!file || file.length < 1) {return error('文件不能为空')}let fileMD5 = '' // 总文件列表const chunkSize = pieceSize * 1024 * 1024 // 1MB一片const chunkCount = Math.ceil(file.size / chunkSize) // 总片数const chunkList = [] // 分片列表let uploaded = [] // 已经上传的let fileType = '' // 文件类型let uploadId = '' // 上传id// 获取md5/**** 获取md5**/const readFileMD5 = () => {// 读取视频文件的md5fileType = file.name.substring(file.name.lastIndexOf('.') + 1, file.name.length)console.log('获取文件的MD5值')let fileRederInstance = new FileReader()console.log('file', file)fileRederInstance.readAsBinaryString(file)fileRederInstance.addEventListener('load', e => {let fileBolb = e.target.resultfileMD5 = md5(fileBolb)let form = new FormData()form.append('filename', file.name)form.append('identifier', fileMD5)form.append('objectType', fileType)form.append('chunkNumber', 1)form.append('uploadId', uploadId)uploadChunks(form).then(res => {console.log(res, 'sdffffffs')if (res.data.skipUpload) {console.log('文件已被上传')success && success(res)} else {uploadId = res.data.uploadId// 判断是否是断点续传if (res.data.uploaded && res.data.uploaded.length != 0) {uploaded = [].concat(res.data.uploaded)}console.log('已上传的分片:' + uploaded)// 判断是并发上传或顺序上传if (concurrent == 1 || chunkCount == 1) {console.log('顺序上传')sequentialUplode(0)} else {console.log('并发上传')concurrentUpload()}}}).catch((e) => {console.log('文件合并错误')console.log(e)})})}/**** 获取每一个分片的详情**/const getChunkInfo = (file, currentChunk, chunkSize) => {let start = currentChunk * chunkSizelet end = Math.min(file.size, start + chunkSize)let chunk = file.slice(start, end)return {start,end,chunk}}/**** 针对每个文件进行chunk处理**/const readChunkMD5 = () => {// 针对单个文件进行chunk上传for (var i = 0; i < chunkCount; i++) {const {chunk} = getChunkInfo(file, i, chunkSize)// 判断已经上传的分片中是否包含当前分片if (uploaded.indexOf(i + '') == -1) {uploadChunk({chunk,currentChunk: i,chunkCount})}}}/**** 原始上传**/const uploadChunk = (chunkInfo) => {var sd = parseInt((chunkInfo.currentChunk / chunkInfo.chunkCount) * 100)console.log(sd, '进度')process(sd)console.log(chunkInfo, '分片大小')let inde = chunkInfo.currentChunk + 1if (uploaded.indexOf(inde + '') > -1) {const {chunk} = getChunkInfo(file, chunkInfo.currentChunk + 1, chunkSize)uploadChunk({chunk,currentChunk: inde,chunkCount})} else {let uploadData = createUploadData(chunkInfo)// 执行分片上传let config = {headers: {'Content-Type': 'application/json','Accept': '*/*'}}UpApi.uploadChunk(uploadData, config).then(res => {if (res.code == 200) {console.log('分片上传成功')uploaded.push(chunkInfo.currentChunk + 1)// 判断是否全部上传完if (uploaded.length == chunkInfo.chunkCount) {console.log('全部完成')success(res)process(100)} else {const {chunk} = getChunkInfo(file, chunkInfo.currentChunk + 1, chunkSize)uploadChunk({chunk,currentChunk: chunkInfo.currentChunk + 1,chunkCount})}} else {console.log(res.msg)}}).catch((e) => {error && error(e)})// if (chunkInfo.currentChunk < chunkInfo.chunkCount) {// setTimeout(() => {//// }, 1000)// }}}/**** 顺序上传**/const sequentialUplode = (currentChunk) => {const {chunk} = getChunkInfo(file, currentChunk, chunkSize)let chunkInfo = {chunk,currentChunk,chunkCount,uploadId}var sd = parseInt((chunkInfo.currentChunk / chunkInfo.chunkCount) * 100)process(sd)console.log('当前上传分片:' + currentChunk)let inde = chunkInfo.currentChunk + 1if (uploaded.indexOf(inde + '') > -1) {console.log('分片【' + currentChunk + '】已上传')sequentialUplode(currentChunk + 1)} else {let uploadData = createUploadData(chunkInfo)let config = {headers: {'Content-Type': 'application/json','Accept': '*/*'}}// 执行分片上传uploadChunks(uploadData, config).then(res => {console.log(res, 'sdfsdfsd')if (res.code == 200) {console.log('分片【' + currentChunk + '】上传成功')uploaded.push(chunkInfo.currentChunk + 1)// 判断是否全部上传完if (uploaded.length == chunkInfo.chunkCount) {console.log('全部完成')success(res)process(100)} else {sequentialUplode(currentChunk + 1)}} else {console.log(res.msg)}}).catch((e) => {error && error(e)})}}/**** 并发上传**/const concurrentUpload = () => {for (var i = 0; i < chunkCount; i++) {let index = Number(i) + 1if (uploaded.indexOf(index) === -1) {chunkList.push(Number(i))}}debuggerconsole.log('需要上传的分片索引:' + chunkList)concurrentExecution(chunkList, concurrent, (curItem) => {return new Promise((resolve, reject) => {const {chunk} = getChunkInfo(file, curItem, chunkSize)let chunkInfo = {chunk,currentChunk: curItem,chunkCount}var sd = parseInt((chunkInfo.currentChunk / chunkInfo.chunkCount) * 100)process(sd)console.log('当前上传分片:' + curItem)let inde = chunkInfo.currentChunk + 1if (uploaded.indexOf(inde) == -1) {// 构建上传文件的formDatalet uploadData = createUploadData(chunkInfo)// 请求头let config = {headers: {'Content-Type': 'application/json','Accept': '*/*'}}uploadChunks(uploadData, config).then(res => {if (res.code == 200) {uploaded.push(chunkInfo.currentChunk + 1)console.log('已经上传完成的分片:' + uploaded)// 判断是否全部上传完if (uploaded.length == chunkInfo.chunkCount) {success(res)process(100)}resolve()} else {reject(res)console.log(res.msg)}}).catch((e) => {console.log(e, 'erere')reject(e)error && error(e)})} else {console.log('分片【' + chunkInfo.currentChunk + '】已上传')resolve()}})}).then(res => {console.log('finish', res)})}/**** 创建文件上传参数**/const createUploadData = (chunkInfo) => {let fetchForm = new FormData()fetchForm.append('identifier', fileMD5)fetchForm.append('chunkNumber', chunkInfo.currentChunk + 1)fetchForm.append('chunkSize', chunkSize)fetchForm.append('currentChunkSize', chunkInfo.chunk.size)const chunkfile = new File([chunkInfo.chunk], file.name)fetchForm.append('file', chunkfile)// fetchForm.append('file', chunkInfo.chunk)fetchForm.append('filename', file.name)fetchForm.append('relativePath', file.name)fetchForm.append('totalChunks', chunkInfo.chunkCount)fetchForm.append('totalSize', file.size)fetchForm.append('objectType', fileType)fetchForm.append('uploadId', uploadId)return fetchForm}const api = axios.create({baseURL: 'http://localhost:8902/',timeout: 100000})const uploadChunks = (data) => api({url: '/common/oss/chunk/upload',method: 'post',data: data}).then(res => {console.log(res, 'uploadChunks')return res.data})readFileMD5() // 开始执行代码}

本内容不代表本网观点和政治立场,如有侵犯你的权益请联系我们处理。
网友评论
网友评论仅供其表达个人看法,并不表明网站立场。