-
Notifications
You must be signed in to change notification settings - Fork 61
Expand file tree
/
Copy paths3_auto_ranged_put.c
More file actions
1753 lines (1452 loc) · 76.4 KB
/
s3_auto_ranged_put.c
File metadata and controls
1753 lines (1452 loc) · 76.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include "aws/s3/private/s3_auto_ranged_put.h"
#include "aws/s3/private/s3_checksums.h"
#include "aws/s3/private/s3_list_parts.h"
#include "aws/s3/private/s3_request_messages.h"
#include "aws/s3/private/s3_util.h"
#include <aws/common/clock.h>
#include <aws/common/encoding.h>
#include <aws/common/string.h>
#include <aws/io/stream.h>
#include <aws/io/event_loop.h>
/* TODO: better logging of steps */
static const size_t s_complete_multipart_upload_init_body_size_bytes = 512;
static const size_t s_abort_multipart_upload_init_body_size_bytes = 512;
/* For unknown length body we no longer know the number of parts. to avoid
* resizing arrays for etags/checksums too much, those array start out with
* capacity specified by the constant below. Note: constant has been arbitrary
* picked to avoid using allocations and using too much memory. might change in future.
*/
static const uint32_t s_unknown_length_default_num_parts = 32;
/* Max number of parts (per meta-request) that can be: "started, but not done reading from stream".
* Though reads are serial (only 1 part can be reading from stream at a time)
* we may queue up more to minimize delays between each read.
*
* If this number is too low, there could be an avoidable delay between each read
* (meta-request ready for more work, but client hasn't run update and given it more work yet)
*
* If this number is too high, early meta-requests could hog all the "work tokens"
* (1st meta-request as queue of 100 "work tokens" that it needs to read
* the stream for, while later meta-requests are doing nothing waiting for work tokens)
*
* TODO: this value needs further benchmarking. */
static const uint32_t s_max_parts_pending_read = 5;
static const struct aws_byte_cursor s_create_multipart_upload_copy_headers[] = {
AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-algorithm"),
AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-key-MD5"),
AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-context"),
};
/* Data for aws_s3_auto_ranged_put's async vtable->prepare_request() job */
struct aws_s3_auto_ranged_put_prepare_request_job {
struct aws_allocator *allocator;
struct aws_s3_request *request;
/* async step: prepare type-specific message */
struct aws_future_http_message *asyncstep_prepare_message;
/* future to set when this job completes */
struct aws_future_void *on_complete;
};
/* Data for async preparation of an UploadPart request */
struct aws_s3_prepare_upload_part_job {
struct aws_allocator *allocator;
struct aws_s3_request *request;
/* async step: read this part from input stream */
struct aws_future_bool *asyncstep_read_part;
/* future to set when this job completes */
struct aws_future_http_message *on_complete;
};
/* Data for async preparation of a CompleteMultipartUpload request */
struct aws_s3_prepare_complete_multipart_upload_job {
struct aws_allocator *allocator;
struct aws_s3_request *request;
/* future to set when this job completes */
struct aws_future_http_message *on_complete;
};
static void s_s3_meta_request_auto_ranged_put_destroy(struct aws_s3_meta_request *meta_request);
static void s_s3_auto_ranged_put_send_request_finish(
struct aws_s3_connection *connection,
struct aws_http_stream *stream,
int error_code);
static bool s_s3_auto_ranged_put_update(
struct aws_s3_meta_request *meta_request,
uint32_t flags,
struct aws_s3_request **out_request);
static void s_s3_auto_ranged_put_schedule_prepare_request(
struct aws_s3_meta_request *meta_request,
struct aws_s3_request *request,
aws_s3_meta_request_prepare_request_callback_fn *callback,
void *user_data);
static struct aws_future_void *s_s3_auto_ranged_put_prepare_request(struct aws_s3_request *request);
static void s_s3_auto_ranged_put_prepare_request_finish(void *user_data);
static struct aws_future_http_message *s_s3_prepare_list_parts(struct aws_s3_request *request);
static struct aws_future_http_message *s_s3_prepare_create_multipart_upload(struct aws_s3_request *request);
static struct aws_future_http_message *s_s3_prepare_upload_part(struct aws_s3_request *request);
static void s_s3_prepare_upload_part_on_read_done(void *user_data);
static void s_s3_prepare_upload_part_finish(struct aws_s3_prepare_upload_part_job *part_prep, int error_code);
static struct aws_future_http_message *s_s3_prepare_complete_multipart_upload(struct aws_s3_request *request);
static struct aws_future_http_message *s_s3_prepare_abort_multipart_upload(struct aws_s3_request *request);
static void s_s3_auto_ranged_put_request_finished(
struct aws_s3_meta_request *meta_request,
struct aws_s3_request *request,
int error_code);
static int s_s3_auto_ranged_put_pause(
struct aws_s3_meta_request *meta_request,
struct aws_s3_meta_request_resume_token **resume_token);
static int s_process_part_info_synced(const struct aws_s3_part_info *info, void *user_data) {
struct aws_s3_auto_ranged_put *auto_ranged_put = user_data;
struct aws_s3_meta_request *meta_request = &auto_ranged_put->base;
ASSERT_SYNCED_DATA_LOCK_HELD(&auto_ranged_put->base);
if (info->part_number == 0) {
AWS_LOGF_ERROR(
AWS_LS_S3_META_REQUEST, "id=%p: ListParts reported Part without valid PartNumber", (void *)meta_request);
return aws_raise_error(AWS_ERROR_S3_LIST_PARTS_PARSE_FAILED);
}
struct aws_s3_mpu_part_info *part = aws_mem_calloc(meta_request->allocator, 1, sizeof(struct aws_s3_mpu_part_info));
part->size = info->size;
part->etag = aws_strip_quotes(meta_request->allocator, info->e_tag);
part->was_previously_uploaded = true;
const struct aws_byte_cursor *checksum_cur = NULL;
switch (auto_ranged_put->base.checksum_config.checksum_algorithm) {
case AWS_SCA_CRC32:
checksum_cur = &info->checksumCRC32;
break;
case AWS_SCA_CRC32C:
checksum_cur = &info->checksumCRC32C;
break;
case AWS_SCA_SHA1:
checksum_cur = &info->checksumSHA1;
break;
case AWS_SCA_SHA256:
checksum_cur = &info->checksumSHA256;
break;
case AWS_SCA_NONE:
break;
default:
AWS_ASSERT(false);
break;
}
if ((checksum_cur != NULL) && (checksum_cur->len > 0)) {
aws_byte_buf_init_copy_from_cursor(&part->checksum_base64, auto_ranged_put->base.allocator, *checksum_cur);
}
/* Parts might be out of order or have gaps in them.
* Resize array-list to be long enough to hold this part,
* filling any intermediate slots with NULL. */
aws_array_list_ensure_capacity(&auto_ranged_put->synced_data.part_list, info->part_number);
while (aws_array_list_length(&auto_ranged_put->synced_data.part_list) < info->part_number) {
struct aws_s3_mpu_part_info *null_part = NULL;
aws_array_list_push_back(&auto_ranged_put->synced_data.part_list, &null_part);
}
/* Add this part */
aws_array_list_set_at(&auto_ranged_put->synced_data.part_list, &part, info->part_number - 1);
return AWS_OP_SUCCESS;
}
/*
* Validates token and updates part variables. Noop if token is null.
*/
static int s_try_update_part_info_from_resume_token(
uint64_t content_length,
const struct aws_s3_meta_request_resume_token *resume_token,
size_t *out_part_size,
uint32_t *out_total_num_parts) {
if (!resume_token) {
return AWS_OP_SUCCESS;
}
if (resume_token->type != AWS_S3_META_REQUEST_TYPE_PUT_OBJECT) {
AWS_LOGF_ERROR(AWS_LS_S3_META_REQUEST, "Could not load persisted state. Invalid token type.");
goto invalid_argument_cleanup;
}
if (resume_token->multipart_upload_id == NULL) {
AWS_LOGF_ERROR(AWS_LS_S3_META_REQUEST, "Could not load persisted state. Multipart upload id missing.");
goto invalid_argument_cleanup;
}
if (resume_token->part_size < g_s3_min_upload_part_size) {
AWS_LOGF_ERROR(
AWS_LS_S3_META_REQUEST,
"Could not create resume auto-ranged-put meta request; part size of %" PRIu64
" specified in the token is below minimum threshold for multi-part.",
(uint64_t)resume_token->part_size);
goto invalid_argument_cleanup;
}
if ((uint32_t)resume_token->total_num_parts > g_s3_max_num_upload_parts) {
AWS_LOGF_ERROR(
AWS_LS_S3_META_REQUEST,
"Could not create resume auto-ranged-put meta request; total number of parts %" PRIu32
" specified in the token is too large for platform.",
(uint32_t)resume_token->total_num_parts);
goto invalid_argument_cleanup;
}
uint32_t num_parts = (uint32_t)(content_length / resume_token->part_size);
if ((content_length % resume_token->part_size) > 0) {
++num_parts;
}
if (resume_token->total_num_parts != num_parts) {
AWS_LOGF_ERROR(
AWS_LS_S3_META_REQUEST,
"Could not create auto-ranged-put meta request; persisted number of parts %zu"
" does not match expected number of parts based on length of the body.",
resume_token->total_num_parts);
return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
}
*out_part_size = resume_token->part_size;
*out_total_num_parts = (uint32_t)resume_token->total_num_parts;
return AWS_OP_SUCCESS;
invalid_argument_cleanup:
return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
}
/**
* Initializes state necessary to resume upload. Noop if token is null.
*/
static int s_try_init_resume_state_from_persisted_data(
struct aws_allocator *allocator,
struct aws_s3_auto_ranged_put *auto_ranged_put,
const struct aws_s3_meta_request_resume_token *resume_token) {
if (resume_token == NULL) {
auto_ranged_put->synced_data.list_parts_operation = NULL;
auto_ranged_put->synced_data.list_parts_state.completed = true;
auto_ranged_put->synced_data.list_parts_state.started = true;
return AWS_OP_SUCCESS;
}
AWS_FATAL_ASSERT(auto_ranged_put->has_content_length);
struct aws_byte_cursor request_path;
if (aws_http_message_get_request_path(auto_ranged_put->base.initial_request_message, &request_path)) {
AWS_LOGF_ERROR(AWS_LS_S3_META_REQUEST, "Could not load persisted state. Request path could not be read.");
return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
}
auto_ranged_put->synced_data.num_parts_started = 0;
auto_ranged_put->synced_data.num_parts_completed = 0;
auto_ranged_put->synced_data.num_parts_noop = 0;
auto_ranged_put->synced_data.create_multipart_upload_sent = true;
auto_ranged_put->synced_data.create_multipart_upload_completed = true;
auto_ranged_put->upload_id = aws_string_clone_or_reuse(allocator, resume_token->multipart_upload_id);
struct aws_s3_list_parts_params list_parts_params = {
.key = request_path,
.upload_id = aws_byte_cursor_from_string(auto_ranged_put->upload_id),
.on_part = s_process_part_info_synced,
.user_data = auto_ranged_put,
};
auto_ranged_put->synced_data.list_parts_operation = aws_s3_list_parts_operation_new(allocator, &list_parts_params);
struct aws_http_headers *needed_response_headers = aws_http_headers_new(allocator);
const size_t copy_header_count = AWS_ARRAY_SIZE(s_create_multipart_upload_copy_headers);
const struct aws_http_headers *initial_headers =
aws_http_message_get_headers(auto_ranged_put->base.initial_request_message);
/* Copy headers that would have been used for create multipart from initial message, since create will never be
* called in this flow */
for (size_t header_index = 0; header_index < copy_header_count; ++header_index) {
const struct aws_byte_cursor *header_name = &s_create_multipart_upload_copy_headers[header_index];
struct aws_byte_cursor header_value;
AWS_ZERO_STRUCT(header_value);
if (aws_http_headers_get(initial_headers, *header_name, &header_value) == AWS_OP_SUCCESS) {
aws_http_headers_set(needed_response_headers, *header_name, header_value);
}
}
auto_ranged_put->synced_data.needed_response_headers = needed_response_headers;
return AWS_OP_SUCCESS;
}
static struct aws_s3_meta_request_vtable s_s3_auto_ranged_put_vtable = {
.update = s_s3_auto_ranged_put_update,
.send_request_finish = s_s3_auto_ranged_put_send_request_finish,
.schedule_prepare_request = s_s3_auto_ranged_put_schedule_prepare_request,
.prepare_request = s_s3_auto_ranged_put_prepare_request,
.init_signing_date_time = aws_s3_meta_request_init_signing_date_time_default,
.sign_request = aws_s3_meta_request_sign_request_default,
.finished_request = s_s3_auto_ranged_put_request_finished,
.destroy = s_s3_meta_request_auto_ranged_put_destroy,
.finish = aws_s3_meta_request_finish_default,
.pause = s_s3_auto_ranged_put_pause,
};
/* Allocate a new auto-ranged put meta request */
struct aws_s3_meta_request *aws_s3_meta_request_auto_ranged_put_new(
struct aws_allocator *allocator,
struct aws_s3_client *client,
size_t part_size,
bool has_content_length,
uint64_t content_length,
uint32_t num_parts,
const struct aws_s3_meta_request_options *options) {
/* These should already have been validated by the caller. */
AWS_PRECONDITION(allocator);
AWS_PRECONDITION(client);
AWS_PRECONDITION(options);
AWS_PRECONDITION(options->message);
if (s_try_update_part_info_from_resume_token(content_length, options->resume_token, &part_size, &num_parts)) {
return NULL;
}
struct aws_s3_auto_ranged_put *auto_ranged_put =
aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_auto_ranged_put));
if (aws_s3_meta_request_init_base(
allocator,
client,
part_size,
client->compute_content_md5 == AWS_MR_CONTENT_MD5_ENABLED ||
aws_http_headers_has(aws_http_message_get_headers(options->message), g_content_md5_header_name),
options,
auto_ranged_put,
&s_s3_auto_ranged_put_vtable,
&auto_ranged_put->base)) {
aws_mem_release(allocator, auto_ranged_put);
return NULL;
}
auto_ranged_put->has_content_length = has_content_length;
auto_ranged_put->content_length = has_content_length ? content_length : 0;
auto_ranged_put->total_num_parts_from_content_length = has_content_length ? num_parts : 0;
auto_ranged_put->upload_id = NULL;
auto_ranged_put->resume_token = options->resume_token;
aws_s3_meta_request_resume_token_acquire(auto_ranged_put->resume_token);
auto_ranged_put->threaded_update_data.next_part_number = 1;
auto_ranged_put->synced_data.is_body_stream_at_end = false;
uint32_t initial_num_parts = auto_ranged_put->has_content_length ? num_parts : s_unknown_length_default_num_parts;
aws_array_list_init_dynamic(
&auto_ranged_put->synced_data.part_list, allocator, initial_num_parts, sizeof(struct aws_s3_mpu_part_info *));
if (s_try_init_resume_state_from_persisted_data(allocator, auto_ranged_put, options->resume_token)) {
goto error_clean_up;
}
if (auto_ranged_put->base.checksum_config.full_object_checksum.len > 0) {
/* The full object checksum was set, make sure the parts level checksum will be calculated and sent via client.
*/
auto_ranged_put->base.checksum_config.location = AWS_SCL_TRAILER;
}
AWS_LOGF_DEBUG(
AWS_LS_S3_META_REQUEST, "id=%p Created new Auto-Ranged Put Meta Request.", (void *)&auto_ranged_put->base);
return &auto_ranged_put->base;
error_clean_up:
aws_s3_meta_request_release(&auto_ranged_put->base);
return NULL;
}
/* Destroy our auto-ranged put meta request */
static void s_s3_meta_request_auto_ranged_put_destroy(struct aws_s3_meta_request *meta_request) {
AWS_PRECONDITION(meta_request);
AWS_PRECONDITION(meta_request->impl);
struct aws_s3_auto_ranged_put *auto_ranged_put = meta_request->impl;
aws_string_destroy(auto_ranged_put->upload_id);
auto_ranged_put->upload_id = NULL;
auto_ranged_put->resume_token = aws_s3_meta_request_resume_token_release(auto_ranged_put->resume_token);
aws_s3_paginated_operation_release(auto_ranged_put->synced_data.list_parts_operation);
for (size_t part_index = 0; part_index < aws_array_list_length(&auto_ranged_put->synced_data.part_list);
++part_index) {
struct aws_s3_mpu_part_info *part;
aws_array_list_get_at(&auto_ranged_put->synced_data.part_list, &part, part_index);
if (part != NULL) {
aws_byte_buf_clean_up(&part->checksum_base64);
aws_string_destroy(part->etag);
aws_mem_release(auto_ranged_put->base.allocator, part);
}
}
aws_array_list_clean_up(&auto_ranged_put->synced_data.part_list);
aws_string_destroy(auto_ranged_put->synced_data.list_parts_continuation_token);
aws_http_headers_release(auto_ranged_put->synced_data.needed_response_headers);
aws_mem_release(meta_request->allocator, auto_ranged_put);
}
/* Check flags and corresponding conditions to see if any more parts can be
* scheduled during this pass. */
static bool s_should_skip_scheduling_more_parts_based_on_flags(
const struct aws_s3_auto_ranged_put *auto_ranged_put,
uint32_t flags) {
/* If the stream is actually async, only allow 1 pending-read.
* We need to wait for async read() to complete before calling it again. */
if (auto_ranged_put->base.request_body_async_stream != NULL) {
return auto_ranged_put->synced_data.num_parts_pending_read > 0;
}
/* If doing async-writes, only allow a new part if there's a pending write-future,
* and no pending-reads yet to copy that data. */
if (auto_ranged_put->base.request_body_using_async_writes == true) {
return (auto_ranged_put->base.synced_data.async_write.ready_to_send == false) ||
(auto_ranged_put->synced_data.num_parts_pending_read > 0);
}
/* If this is the conservative pass, only allow 1 pending-read.
* Reads are serial anyway, so queuing up a whole bunch isn't necessarily a speedup. */
if ((flags & AWS_S3_META_REQUEST_UPDATE_FLAG_CONSERVATIVE) != 0) {
return auto_ranged_put->synced_data.num_parts_pending_read > 0;
}
/* In all other cases, cap the number of pending-reads to something reasonable */
return auto_ranged_put->synced_data.num_parts_pending_read >= s_max_parts_pending_read;
}
static void s_s3_auto_ranged_put_send_request_finish(
struct aws_s3_connection *connection,
struct aws_http_stream *stream,
int error_code) {
struct aws_s3_request *request = connection->request;
if (request->request_tag == AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_PART) {
/* TODO: the single part upload may also be improved from a timeout as multipart. */
aws_s3_client_update_upload_part_timeout(request->meta_request->client, request, error_code);
}
aws_s3_meta_request_send_request_finish_default(connection, stream, error_code);
}
static bool s_s3_auto_ranged_put_update(
struct aws_s3_meta_request *meta_request,
uint32_t flags,
struct aws_s3_request **out_request) {
AWS_PRECONDITION(meta_request);
AWS_PRECONDITION(out_request);
struct aws_s3_request *request = NULL;
bool work_remaining = false;
struct aws_s3_auto_ranged_put *auto_ranged_put = meta_request->impl;
/* BEGIN CRITICAL SECTION */
{
aws_s3_meta_request_lock_synced_data(meta_request);
if (!aws_s3_meta_request_has_finish_result_synced(meta_request)) {
/* If resuming and list part has not been sent, do it now. */
if (!auto_ranged_put->synced_data.list_parts_state.started) {
request = aws_s3_request_new(
meta_request,
AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_LIST_PARTS,
AWS_S3_REQUEST_TYPE_LIST_PARTS,
0 /*part_number*/,
AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS);
auto_ranged_put->synced_data.list_parts_state.started = true;
goto has_work_remaining;
}
if (auto_ranged_put->synced_data.list_parts_state.continues) {
/* If list parts need to continue, send another list parts request. */
AWS_ASSERT(auto_ranged_put->synced_data.list_parts_continuation_token != NULL);
request = aws_s3_request_new(
meta_request,
AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_LIST_PARTS,
AWS_S3_REQUEST_TYPE_LIST_PARTS,
0 /*part_number*/,
AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS);
auto_ranged_put->synced_data.list_parts_state.continues = false;
goto has_work_remaining;
}
if (!auto_ranged_put->synced_data.list_parts_state.completed) {
/* waiting on list parts to finish. */
goto has_work_remaining;
}
/* If we haven't already sent a create-multipart-upload message, do so now. */
if (!auto_ranged_put->synced_data.create_multipart_upload_sent) {
request = aws_s3_request_new(
meta_request,
AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_CREATE_MULTIPART_UPLOAD,
AWS_S3_REQUEST_TYPE_CREATE_MULTIPART_UPLOAD,
0 /*part_number*/,
AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS);
auto_ranged_put->synced_data.create_multipart_upload_sent = true;
goto has_work_remaining;
}
/* If the create-multipart-upload message hasn't been completed, then there is still additional work to do,
* but it can't be done yet. */
if (!auto_ranged_put->synced_data.create_multipart_upload_completed) {
goto has_work_remaining;
}
bool should_create_next_part_request = false;
bool request_previously_uploaded = false;
if (auto_ranged_put->has_content_length && (auto_ranged_put->synced_data.num_parts_started <
auto_ranged_put->total_num_parts_from_content_length)) {
/* Check if next part was previously uploaded (due to resume) */
size_t part_index = auto_ranged_put->threaded_update_data.next_part_number - 1;
struct aws_s3_mpu_part_info *part = NULL;
aws_array_list_get_at(&auto_ranged_put->synced_data.part_list, &part, part_index);
if (part != NULL) {
AWS_ASSERT(part->was_previously_uploaded == true);
/* This part has been uploaded. */
request_previously_uploaded = true;
}
if (s_should_skip_scheduling_more_parts_based_on_flags(auto_ranged_put, flags)) {
goto has_work_remaining;
}
should_create_next_part_request = true;
} else if (!auto_ranged_put->has_content_length && !auto_ranged_put->synced_data.is_body_stream_at_end) {
if (s_should_skip_scheduling_more_parts_based_on_flags(auto_ranged_put, flags)) {
goto has_work_remaining;
}
should_create_next_part_request = true;
}
if (should_create_next_part_request) {
struct aws_s3_buffer_pool_ticket *ticket = NULL;
if (meta_request->synced_data.async_write.ready_to_send) {
/* Async-write already has a ticket, take ownership */
AWS_FATAL_ASSERT(meta_request->synced_data.async_write.buffered_data_ticket);
ticket = meta_request->synced_data.async_write.buffered_data_ticket;
meta_request->synced_data.async_write.buffered_data_ticket = NULL;
} else {
/* Try to reserve a ticket */
ticket = aws_s3_buffer_pool_reserve(meta_request->client->buffer_pool, meta_request->part_size);
}
if (ticket != NULL) {
/* Allocate a request for another part. */
request = aws_s3_request_new(
meta_request,
AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_PART,
AWS_S3_REQUEST_TYPE_UPLOAD_PART,
0 /*part_number*/,
AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS | AWS_S3_REQUEST_FLAG_PART_SIZE_REQUEST_BODY);
request->part_number = auto_ranged_put->threaded_update_data.next_part_number;
/* If request was previously uploaded, we prepare it to ensure checksums still match,
* but ultimately it gets marked no-op and we don't send it */
request->was_previously_uploaded = request_previously_uploaded;
request->ticket = ticket;
if (meta_request->synced_data.async_write.ready_to_send) {
/* Async-write already has a buffer */
request->request_body = meta_request->synced_data.async_write.buffered_data;
}
++auto_ranged_put->threaded_update_data.next_part_number;
++auto_ranged_put->synced_data.num_parts_started;
++auto_ranged_put->synced_data.num_parts_pending_read;
AWS_LOGF_DEBUG(
AWS_LS_S3_META_REQUEST,
"id=%p: Returning request %p for part %d",
(void *)meta_request,
(void *)request,
request->part_number);
}
goto has_work_remaining;
}
/* There is one more request to send after all the parts (the complete-multipart-upload) but it can't be
* done until all the parts have been completed.*/
if (auto_ranged_put->has_content_length) {
if (auto_ranged_put->synced_data.num_parts_completed !=
auto_ranged_put->total_num_parts_from_content_length) {
goto has_work_remaining;
}
} else {
if ((!auto_ranged_put->synced_data.is_body_stream_at_end) ||
auto_ranged_put->synced_data.num_parts_completed !=
auto_ranged_put->synced_data.num_parts_started) {
goto has_work_remaining;
}
}
/* If the complete-multipart-upload request hasn't been set yet, then send it now. */
if (!auto_ranged_put->synced_data.complete_multipart_upload_sent) {
request = aws_s3_request_new(
meta_request,
AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_COMPLETE_MULTIPART_UPLOAD,
AWS_S3_REQUEST_TYPE_COMPLETE_MULTIPART_UPLOAD,
0 /*part_number*/,
AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS);
auto_ranged_put->synced_data.complete_multipart_upload_sent = true;
goto has_work_remaining;
}
/* Wait for the complete-multipart-upload request to finish. */
if (!auto_ranged_put->synced_data.complete_multipart_upload_completed) {
goto has_work_remaining;
}
goto no_work_remaining;
} else {
/* If the create multipart upload hasn't been sent, then there is nothing left to do when canceling. */
if (!auto_ranged_put->synced_data.create_multipart_upload_sent) {
goto no_work_remaining;
}
/* If the create-multipart-upload request is still in flight, wait for it to finish. */
if (!auto_ranged_put->synced_data.create_multipart_upload_completed) {
goto has_work_remaining;
}
/* If the number of parts completed is less than the number of parts sent, then we need to wait until all of
* those parts are done sending before aborting. */
if (auto_ranged_put->synced_data.num_parts_completed < auto_ranged_put->synced_data.num_parts_started) {
goto has_work_remaining;
}
/* If the complete-multipart-upload is already in flight, then we can't necessarily send an abort. */
if (auto_ranged_put->synced_data.complete_multipart_upload_sent &&
!auto_ranged_put->synced_data.complete_multipart_upload_completed) {
goto has_work_remaining;
}
/* If the upload was paused or resume failed, we don't abort the multipart upload. */
if (meta_request->synced_data.finish_result.error_code == AWS_ERROR_S3_PAUSED ||
meta_request->synced_data.finish_result.error_code == AWS_ERROR_S3_RESUME_FAILED) {
goto no_work_remaining;
}
/* If the complete-multipart-upload completed successfully, then there is nothing to abort since the
* transfer has already finished. */
if (auto_ranged_put->synced_data.complete_multipart_upload_completed &&
auto_ranged_put->synced_data.complete_multipart_upload_error_code == AWS_ERROR_SUCCESS) {
goto no_work_remaining;
}
/* If we made it here, and the abort-multipart-upload message hasn't been sent yet, then do so now. */
if (!auto_ranged_put->synced_data.abort_multipart_upload_sent) {
if (auto_ranged_put->upload_id == NULL) {
goto no_work_remaining;
}
if (auto_ranged_put->base.synced_data.finish_result.error_code == AWS_ERROR_SUCCESS) {
/* Not sending abort when success even if we haven't sent complete MPU, in case we resume after MPU
* already completed. */
goto no_work_remaining;
}
request = aws_s3_request_new(
meta_request,
AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_ABORT_MULTIPART_UPLOAD,
AWS_S3_REQUEST_TYPE_ABORT_MULTIPART_UPLOAD,
0 /*part_number*/,
AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS | AWS_S3_REQUEST_FLAG_ALWAYS_SEND);
auto_ranged_put->synced_data.abort_multipart_upload_sent = true;
goto has_work_remaining;
}
/* Wait for the multipart upload to be completed. */
if (!auto_ranged_put->synced_data.abort_multipart_upload_completed) {
goto has_work_remaining;
}
goto no_work_remaining;
}
has_work_remaining:
work_remaining = true;
no_work_remaining:
/* If some events are still being delivered to caller, then wait for those to finish */
if (!work_remaining && aws_s3_meta_request_are_events_out_for_delivery_synced(meta_request)) {
work_remaining = true;
}
if (!work_remaining) {
aws_s3_meta_request_set_success_synced(meta_request, AWS_HTTP_STATUS_CODE_200_OK);
}
aws_s3_meta_request_unlock_synced_data(meta_request);
}
/* END CRITICAL SECTION */
if (work_remaining) {
*out_request = request;
} else {
AWS_ASSERT(request == NULL);
aws_s3_meta_request_finish(meta_request);
}
return work_remaining;
}
/**
* Helper to compute request body size.
* Basically returns either part size or if content is not equally divisible into parts, the size of the remaining last
* part.
*/
static size_t s_compute_request_body_size(
const struct aws_s3_meta_request *meta_request,
uint32_t part_number,
uint64_t *offset_out) {
AWS_PRECONDITION(meta_request);
const struct aws_s3_auto_ranged_put *auto_ranged_put = meta_request->impl;
size_t request_body_size = meta_request->part_size;
/* Last part--adjust size to match remaining content length. */
if (auto_ranged_put->has_content_length && part_number == auto_ranged_put->total_num_parts_from_content_length) {
size_t content_remainder = (size_t)(auto_ranged_put->content_length % (uint64_t)meta_request->part_size);
if (content_remainder > 0) {
request_body_size = content_remainder;
}
}
/* The part_number starts at 1 */
*offset_out = (part_number - 1) * meta_request->part_size;
return request_body_size;
}
static int s_verify_part_matches_checksum(
struct aws_allocator *allocator,
struct aws_byte_cursor body_cur,
enum aws_s3_checksum_algorithm algorithm,
struct aws_byte_cursor part_checksum) {
AWS_PRECONDITION(allocator);
if (algorithm == AWS_SCA_NONE) {
return AWS_OP_SUCCESS;
}
struct aws_byte_buf checksum;
if (aws_byte_buf_init(&checksum, allocator, aws_get_digest_size_from_checksum_algorithm(algorithm))) {
return AWS_OP_ERR;
}
struct aws_byte_buf encoded_checksum = {0};
int return_status = AWS_OP_SUCCESS;
size_t encoded_len = 0;
if (aws_base64_compute_encoded_len(aws_get_digest_size_from_checksum_algorithm(algorithm), &encoded_len)) {
AWS_LOGF_ERROR(
AWS_LS_S3_META_REQUEST, "Failed to resume upload. Unable to determine length of encoded checksum.");
return_status = aws_raise_error(AWS_ERROR_S3_RESUME_FAILED);
goto on_done;
}
if (aws_checksum_compute(allocator, algorithm, &body_cur, &checksum)) {
AWS_LOGF_ERROR(
AWS_LS_S3_META_REQUEST, "Failed to resume upload. Unable to compute checksum for the skipped part.");
return_status = aws_raise_error(AWS_ERROR_S3_RESUME_FAILED);
goto on_done;
}
if (aws_byte_buf_init(&encoded_checksum, allocator, encoded_len)) {
AWS_LOGF_ERROR(
AWS_LS_S3_META_REQUEST, "Failed to resume upload. Unable to allocate buffer for encoded checksum.");
return_status = aws_raise_error(AWS_ERROR_S3_RESUME_FAILED);
goto on_done;
}
struct aws_byte_cursor checksum_cur = aws_byte_cursor_from_buf(&checksum);
if (aws_base64_encode(&checksum_cur, &encoded_checksum)) {
AWS_LOGF_ERROR(AWS_LS_S3_META_REQUEST, "Failed to resume upload. Unable to encode checksum.");
return_status = aws_raise_error(AWS_ERROR_S3_RESUME_FAILED);
goto on_done;
}
if (!aws_byte_cursor_eq_byte_buf(&part_checksum, &encoded_checksum)) {
AWS_LOGF_ERROR(
AWS_LS_S3_META_REQUEST, "Failed to resume upload. Checksum for previously uploaded part does not match");
return_status = aws_raise_error(AWS_ERROR_S3_RESUMED_PART_CHECKSUM_MISMATCH);
goto on_done;
}
on_done:
aws_byte_buf_clean_up(&checksum);
aws_byte_buf_clean_up(&encoded_checksum);
return return_status;
}
void s_s3_auto_ranged_put_schedule_prepare_request(
struct aws_s3_meta_request *meta_request,
struct aws_s3_request *request,
aws_s3_meta_request_prepare_request_callback_fn *callback,
void *user_data) {
AWS_PRECONDITION(meta_request);
AWS_PRECONDITION(request);
/* When the body stream supports reading in parallel, and it's upload parts, do parallel preparation to speed up
* reading. */
bool parallel_prepare =
(meta_request->request_body_parallel_stream && request->request_tag == AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_PART);
aws_s3_meta_request_schedule_prepare_request_default_impl(
meta_request, request, parallel_prepare /*parallel*/, callback, user_data);
}
/* Given a request, prepare it for sending based on its description. */
static struct aws_future_void *s_s3_auto_ranged_put_prepare_request(struct aws_s3_request *request) {
struct aws_future_void *asyncstep_prepare_request = aws_future_void_new(request->allocator);
/* Store data for async job */
struct aws_s3_auto_ranged_put_prepare_request_job *request_prep =
aws_mem_calloc(request->allocator, 1, sizeof(struct aws_s3_auto_ranged_put_prepare_request_job));
request_prep->allocator = request->allocator;
request_prep->on_complete = aws_future_void_acquire(asyncstep_prepare_request);
request_prep->request = request;
/* Each type of request prepares an aws_http_message in its own way, which maybe require async substeps */
switch (request->request_tag) {
case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_LIST_PARTS:
request_prep->asyncstep_prepare_message = s_s3_prepare_list_parts(request);
break;
case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_CREATE_MULTIPART_UPLOAD:
request_prep->asyncstep_prepare_message = s_s3_prepare_create_multipart_upload(request);
break;
case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_PART:
request_prep->asyncstep_prepare_message = s_s3_prepare_upload_part(request);
break;
case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_COMPLETE_MULTIPART_UPLOAD:
request_prep->asyncstep_prepare_message = s_s3_prepare_complete_multipart_upload(request);
break;
case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_ABORT_MULTIPART_UPLOAD:
request_prep->asyncstep_prepare_message = s_s3_prepare_abort_multipart_upload(request);
break;
default:
AWS_FATAL_ASSERT(0);
break;
}
/* When the specific type of message is ready, finish common preparation steps */
aws_future_http_message_register_callback(
request_prep->asyncstep_prepare_message, s_s3_auto_ranged_put_prepare_request_finish, request_prep);
return asyncstep_prepare_request;
}
/* Prepare a ListParts request.
* Currently, this is actually synchronous. */
static struct aws_future_http_message *s_s3_prepare_list_parts(struct aws_s3_request *request) {
struct aws_s3_meta_request *meta_request = request->meta_request;
struct aws_s3_auto_ranged_put *auto_ranged_put = meta_request->impl;
struct aws_http_message *message = NULL;
int message_creation_result = AWS_OP_ERR;
/* BEGIN CRITICAL SECTION */
{
aws_s3_meta_request_lock_synced_data(meta_request);
if (auto_ranged_put->synced_data.list_parts_continuation_token) {
AWS_LOGF_DEBUG(
AWS_LS_S3_META_REQUEST,
"id=%p ListParts for Multi-part Upload, with ID:%s, continues with token:%s.",
(void *)meta_request,
aws_string_c_str(auto_ranged_put->upload_id),
aws_string_c_str(auto_ranged_put->synced_data.list_parts_continuation_token));
struct aws_byte_cursor continuation_cur =
aws_byte_cursor_from_string(auto_ranged_put->synced_data.list_parts_continuation_token);
message_creation_result = aws_s3_construct_next_paginated_request_http_message(
auto_ranged_put->synced_data.list_parts_operation, &continuation_cur, &message);
} else {
message_creation_result = aws_s3_construct_next_paginated_request_http_message(
auto_ranged_put->synced_data.list_parts_operation, NULL, &message);
}
aws_s3_meta_request_unlock_synced_data(meta_request);
}
/* END CRITICAL SECTION */
/* ListPart will not fail to create the next message `s_construct_next_request_http_message` */
AWS_FATAL_ASSERT(message_creation_result == AWS_OP_SUCCESS);
if (meta_request->checksum_config.checksum_algorithm == AWS_SCA_NONE) {
/* We don't need to worry about the pre-calculated checksum from user as for multipart upload, only way
* to calculate checksum for multipart upload is from client. */
aws_s3_message_util_copy_headers(
meta_request->initial_request_message,
message,
g_s3_list_parts_excluded_headers,
g_s3_list_parts_excluded_headers_count,
true);
} else {
aws_s3_message_util_copy_headers(
meta_request->initial_request_message,
message,
g_s3_list_parts_with_checksum_excluded_headers,
g_s3_list_parts_with_checksum_excluded_headers_count,
true);
}
AWS_ASSERT(message);
struct aws_future_http_message *future = aws_future_http_message_new(request->allocator);
aws_future_http_message_set_result_by_move(future, &message);
return future;
}
/* Prepare a CreateMultipartUpload request.
* Currently, this is actually synchronous. */
struct aws_future_http_message *s_s3_prepare_create_multipart_upload(struct aws_s3_request *request) {
struct aws_s3_meta_request *meta_request = request->meta_request;
/* Create the message to create a new multipart upload. */
struct aws_http_message *message = aws_s3_create_multipart_upload_message_new(
meta_request->allocator, meta_request->initial_request_message, &meta_request->checksum_config);
struct aws_future_http_message *future = aws_future_http_message_new(request->allocator);
if (message != NULL) {
aws_future_http_message_set_result_by_move(future, &message);
} else {
aws_future_http_message_set_error(future, aws_last_error_or_unknown());
}
return future;
}
/* Prepare an UploadPart request */
struct aws_future_http_message *s_s3_prepare_upload_part(struct aws_s3_request *request) {
struct aws_s3_meta_request *meta_request = request->meta_request;
struct aws_allocator *allocator = request->allocator;
struct aws_future_http_message *message_future = aws_future_http_message_new(allocator);
struct aws_s3_prepare_upload_part_job *part_prep =
aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_prepare_upload_part_job));
part_prep->allocator = allocator;
part_prep->request = request;
part_prep->on_complete = aws_future_http_message_acquire(message_future);
if (request->num_times_prepared == 0) {
/* Preparing request for the first time.
* Next async step: read through the body stream until we've
* skipped over parts that were already uploaded (in case we're resuming
* from an upload that had been paused) */
/* Read the body */
uint64_t offset = 0;
size_t request_body_size = s_compute_request_body_size(meta_request, request->part_number, &offset);
if (request->request_body.capacity == 0) {
AWS_FATAL_ASSERT(request->ticket);
request->request_body =
aws_s3_buffer_pool_acquire_buffer(request->meta_request->client->buffer_pool, request->ticket);
request->request_body.capacity = request_body_size;
}
struct aws_event_loop *loop = aws_event_loop_group_get_next_loop(request->meta_request->client->body_streaming_elg);
part_prep->asyncstep_read_part = aws_s3_meta_request_read_body(meta_request, offset, &request->request_body);
aws_future_bool_register_event_loop_callback(
part_prep->asyncstep_read_part, loop, s_s3_prepare_upload_part_on_read_done, part_prep);