Update sentry-rust monorepo to 0.35.0 #6
1 changed files with 83 additions and 77 deletions
|
@ -97,94 +97,100 @@ impl CaStore {
|
|||
let string_id = lexicographic_base64::encode(id.to_be_bytes());
|
||||
let source_fname = format!("temp/{string_id}");
|
||||
|
||||
/*info!("Starting multipart upload {id}");
|
||||
let multipart_result = self
|
||||
.client
|
||||
.create_multipart_upload()
|
||||
.bucket(&*self.bucket)
|
||||
.key(&source_fname)
|
||||
.send()
|
||||
.await
|
||||
.with_context(|| format!("Creating multipart request for Request ID{id}"))?;
|
||||
|
||||
let mut buf = BytesMut::with_capacity(16 * 1024 * 1024); // 16MiB byte buffer for the file
|
||||
let mut buf = BytesMut::with_capacity(5_000_000);
|
||||
reader.read_buf(&mut buf).await?;
|
||||
let hasher = Arc::new(Mutex::new(Hasher::new()));
|
||||
|
||||
let mut i = 1;
|
||||
let mut completed_multipart_upload_builder = CompletedMultipartUpload::builder();
|
||||
|
||||
loop {
|
||||
buf.clear();
|
||||
reader.read_buf(&mut buf).await.context("Reading chunk")?;
|
||||
if buf.is_empty() {
|
||||
break;
|
||||
}
|
||||
|
||||
debug!("Uploading part {i} for multipart upload {id}");
|
||||
|
||||
let buf2 = buf.clone();
|
||||
let hasher = Arc::clone(&hasher);
|
||||
let hasher_job = spawn_blocking(move || {
|
||||
hasher.blocking_lock().update_rayon(&buf2);
|
||||
});
|
||||
|
||||
let part_upload_fut = self
|
||||
let hash = if buf.len() >= 5_000_000 {
|
||||
info!("Starting multipart upload {id}");
|
||||
let multipart_result = self
|
||||
.client
|
||||
.upload_part()
|
||||
.create_multipart_upload()
|
||||
.bucket(&*self.bucket)
|
||||
.key(&source_fname)
|
||||
.set_upload_id(multipart_result.upload_id.clone())
|
||||
.body(ByteStream::from(buf.to_vec()))
|
||||
.part_number(i)
|
||||
.send();
|
||||
.send()
|
||||
.await
|
||||
.with_context(|| format!("Creating multipart request for Request ID{id}"))?;
|
||||
|
||||
let ((), part_upload_result) = try_join!(
|
||||
async { hasher_job.await.context("Awaiting hasher job") },
|
||||
async { part_upload_fut.await.context("Awaiting uploader job") }
|
||||
)
|
||||
.context("Awaiting job for chunk")?;
|
||||
completed_multipart_upload_builder = completed_multipart_upload_builder.parts(
|
||||
CompletedPart::builder()
|
||||
.e_tag(part_upload_result.e_tag.unwrap_or_default())
|
||||
let mut reader = buf.chain(reader);
|
||||
|
||||
let mut buf = BytesMut::with_capacity(16 * 1024 * 1024); // 16MiB byte buffer for the file
|
||||
let hasher = Arc::new(Mutex::new(Hasher::new()));
|
||||
|
||||
let mut i = 1;
|
||||
let mut completed_multipart_upload_builder = CompletedMultipartUpload::builder();
|
||||
|
||||
loop {
|
||||
buf.clear();
|
||||
reader.read_buf(&mut buf).await.context("Reading chunk")?;
|
||||
if buf.is_empty() {
|
||||
break;
|
||||
}
|
||||
|
||||
debug!("Uploading part {i} for multipart upload {id}");
|
||||
|
||||
let buf2 = buf.clone();
|
||||
let hasher = Arc::clone(&hasher);
|
||||
let hasher_job = spawn_blocking(move || {
|
||||
hasher.blocking_lock().update_rayon(&buf2);
|
||||
});
|
||||
|
||||
let part_upload_fut = self
|
||||
.client
|
||||
.upload_part()
|
||||
.bucket(&*self.bucket)
|
||||
.key(&source_fname)
|
||||
.set_upload_id(multipart_result.upload_id.clone())
|
||||
.body(ByteStream::from(buf.to_vec()))
|
||||
.part_number(i)
|
||||
.build(),
|
||||
);
|
||||
i += 1;
|
||||
}
|
||||
.send();
|
||||
|
||||
debug!("Finalizing Multipart Upload {id}");
|
||||
let ((), part_upload_result) = try_join!(
|
||||
async { hasher_job.await.context("Awaiting hasher job") },
|
||||
async { part_upload_fut.await.context("Awaiting uploader job") }
|
||||
)
|
||||
.context("Awaiting job for chunk")?;
|
||||
completed_multipart_upload_builder = completed_multipart_upload_builder.parts(
|
||||
CompletedPart::builder()
|
||||
.e_tag(part_upload_result.e_tag.unwrap_or_default())
|
||||
.part_number(i)
|
||||
.build(),
|
||||
);
|
||||
i += 1;
|
||||
}
|
||||
|
||||
let hash = hasher.lock().await.finalize();
|
||||
self.client
|
||||
.complete_multipart_upload()
|
||||
.bucket(&*self.bucket)
|
||||
.key(&source_fname)
|
||||
.multipart_upload(completed_multipart_upload_builder.build())
|
||||
.set_upload_id(multipart_result.upload_id)
|
||||
.send()
|
||||
.await
|
||||
.context("Completing multipart upload")?;*/
|
||||
debug!("Finalizing Multipart Upload {id}");
|
||||
|
||||
let hasher = Arc::new(Mutex::new(Hasher::new()));
|
||||
let mut buf = Vec::new();
|
||||
reader.read_to_end(&mut buf).await?;
|
||||
let buf = Bytes::from(buf);
|
||||
let buf2 = buf.clone();
|
||||
let hasher2 = Arc::clone(&hasher);
|
||||
spawn_blocking(move || {
|
||||
hasher2.blocking_lock().update_rayon(&buf2);
|
||||
})
|
||||
.await?;
|
||||
self.client
|
||||
.put_object()
|
||||
.bucket(&*self.bucket)
|
||||
.key(&source_fname)
|
||||
.body(ByteStream::from(buf.to_vec()))
|
||||
.send()
|
||||
.await
|
||||
.context("Uploading file")?;
|
||||
let hash = hasher.lock().await.finalize();
|
||||
self.client
|
||||
.complete_multipart_upload()
|
||||
.bucket(&*self.bucket)
|
||||
.key(&source_fname)
|
||||
.multipart_upload(completed_multipart_upload_builder.build())
|
||||
.set_upload_id(multipart_result.upload_id)
|
||||
.send()
|
||||
.await
|
||||
.context("Completing multipart upload")?;
|
||||
hash
|
||||
} else {
|
||||
let buf = Bytes::from(buf);
|
||||
let buf2 = buf.clone();
|
||||
let hasher2 = Arc::clone(&hasher);
|
||||
spawn_blocking(move || {
|
||||
hasher2.blocking_lock().update_rayon(&buf2);
|
||||
})
|
||||
.await?;
|
||||
self.client
|
||||
.put_object()
|
||||
.bucket(&*self.bucket)
|
||||
.key(&source_fname)
|
||||
.body(ByteStream::from(buf.to_vec()))
|
||||
.send()
|
||||
.await
|
||||
.context("Uploading file")?;
|
||||
|
||||
let hash = hasher.lock().await.finalize();
|
||||
hasher.lock().await.finalize()
|
||||
};
|
||||
|
||||
let target_fname = lexicographic_base64::encode(hash.as_bytes());
|
||||
|
||||
|
|
Loading…
Reference in a new issue