88
99struct loop_tgt_data {
1010 bool user_copy;
11+ bool zero_copy;
1112 bool block_device;
1213 unsigned long offset;
1314};
@@ -102,13 +103,12 @@ static int loop_setup_tgt(struct ublksrv_dev *dev, int type, bool recovery)
102103 tgt->tgt_ring_depth = info->queue_depth ;
103104 tgt->nr_fds = 1 ;
104105 tgt->fds [1 ] = fd;
106+
107+ tgt_data->zero_copy = info->flags & UBLK_F_SUPPORT_ZERO_COPY;
105108 tgt_data->user_copy = info->flags & UBLK_F_USER_COPY;
106- if (tgt_data->user_copy )
109+ if (tgt_data->zero_copy || tgt_data-> user_copy )
107110 tgt->tgt_ring_depth *= 2 ;
108111
109- if (info->flags & UBLK_F_SUPPORT_ZERO_COPY)
110- return -EINVAL;
111-
112112 return 0 ;
113113}
114114
@@ -138,7 +138,8 @@ static int loop_init_tgt(struct ublksrv_dev *dev, int type, int argc, char
138138 char *file = NULL ;
139139 struct ublksrv_tgt_base_json tgt_json = { 0 };
140140 struct ublk_params p = {
141- .types = UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DISCARD,
141+ .types = UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DISCARD |
142+ UBLK_PARAM_TYPE_DMA_ALIGN,
142143 .basic = {
143144 .attrs = UBLK_ATTR_VOLATILE_CACHE | UBLK_ATTR_FUA,
144145 .logical_bs_shift = 9 ,
@@ -152,6 +153,9 @@ static int loop_init_tgt(struct ublksrv_dev *dev, int type, int argc, char
152153 .max_discard_sectors = UINT_MAX >> 9 ,
153154 .max_discard_segments = 1 ,
154155 },
156+ .dma = {
157+ .alignment = 511 ,
158+ },
155159 };
156160 bool can_discard = false ;
157161 unsigned long offset = 0 ;
@@ -327,7 +331,7 @@ static int lo_rw(const struct ublksrv_queue *q,
327331 const struct ublksrv_io_desc *iod, int tag,
328332 const struct loop_tgt_data *tgt_data)
329333{
330- enum io_uring_op uring_op = ublk_to_uring_fs_op (iod);
334+ enum io_uring_op uring_op = ublk_to_uring_fs_op (iod, false );
331335 void *buf = (void *)iod->addr ;
332336 struct io_uring_sqe *sqe[1 ];
333337
@@ -345,11 +349,51 @@ static int lo_rw(const struct ublksrv_queue *q,
345349 return 1 ;
346350}
347351
352+ static int lo_rw_zero_copy (const struct ublksrv_queue *q,
353+ const struct ublksrv_io_desc *iod, int tag,
354+ const struct loop_tgt_data *tgt_data)
355+ {
356+ unsigned ublk_op = ublksrv_get_op (iod);
357+ enum io_uring_op uring_op = ublk_to_uring_fs_op (iod, true );
358+ struct io_uring_sqe *sqe[3 ];
359+
360+ ublk_queue_alloc_sqes (q, sqe, 3 );
361+
362+ io_uring_prep_buf_register (sqe[0 ], 0 , tag, q->q_id , tag);
363+ sqe[0 ]->user_data = build_user_data (tag,
364+ ublk_cmd_op_nr (UBLK_U_IO_REGISTER_IO_BUF),
365+ 0 ,
366+ 1 );
367+ sqe[0 ]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_FIXED_FILE | IOSQE_IO_LINK;
368+
369+ io_uring_prep_rw (uring_op,
370+ sqe[1 ],
371+ 1 /* fds[1]*/ ,
372+ 0 ,
373+ iod->nr_sectors << 9 ,
374+ (iod->start_sector + tgt_data->offset ) << 9 );
375+ sqe[1 ]->buf_index = tag;
376+ sqe[1 ]->flags |= IOSQE_FIXED_FILE | IOSQE_IO_LINK;
377+ sqe[1 ]->user_data = build_user_data (tag, ublk_op, 0 , 1 );
378+
379+ io_uring_prep_buf_unregister (sqe[2 ], 0 , tag, q->q_id , tag);
380+ sqe[2 ]->flags |= IOSQE_FIXED_FILE;
381+ sqe[2 ]->user_data = build_user_data (tag,
382+ ublk_cmd_op_nr (UBLK_U_IO_UNREGISTER_IO_BUF),
383+ 0 ,
384+ 1 );
385+
386+ // buf register is marked as IOSQE_CQE_SKIP_SUCCESS
387+ return 2 ;
388+ }
389+
348390static int loop_queue_tgt_rw (const struct ublksrv_queue *q,
349391 const struct ublksrv_io_desc *iod, int tag,
350392 const struct loop_tgt_data *data)
351393{
352-
394+ /* zero_copy has top priority */
395+ if (data->zero_copy )
396+ return lo_rw_zero_copy (q, iod, tag, data);
353397 if (data->user_copy )
354398 return lo_rw_user_copy (q, iod, tag, data);
355399 return lo_rw (q, iod, tag, data);
0 commit comments