1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_IO_READ_H
3 #define _BCACHEFS_IO_READ_H
13 * Reads will often have to be split, and if the extent being read from
14 * was checksummed or compressed we'll also have to allocate bounce
15 * buffers and copy the data back into the original bio.
17 * If we didn't have to split, we have to save and restore the original
18 * bi_end_io - @split below indicates which:
21 struct bch_read_bio *parent;
26 * Saved copy of bio->bi_iter, from submission time - allows us to
27 * resubmit on IO error, and also to copy data back to the original bio
28 * when we're bouncing:
30 struct bvec_iter bvec_iter;
32 unsigned offset_into_extent;
49 struct bch_devs_list devs_have;
51 struct extent_ptr_decoded pick;
54 * pos we read from - different from data_pos for indirect extents:
60 * start pos of data we read (may not be pos of data we want) - for
61 * promote, narrow extents paths:
63 enum btree_id data_btree;
65 struct bversion version;
67 struct promote_op *promote;
69 struct bch_io_opts opts;
71 struct work_struct work;
76 #define to_rbio(_bio) container_of((_bio), struct bch_read_bio, bio)
79 struct cache_promote_op;
80 struct extent_ptr_decoded;
82 int __bch2_read_indirect_extent(struct btree_trans *, unsigned *,
85 static inline int bch2_read_indirect_extent(struct btree_trans *trans,
86 enum btree_id *data_btree,
87 unsigned *offset_into_extent,
90 if (k->k->k.type != KEY_TYPE_reflink_p)
93 *data_btree = BTREE_ID_reflink;
94 return __bch2_read_indirect_extent(trans, offset_into_extent, k);
98 BCH_READ_RETRY_IF_STALE = 1 << 0,
99 BCH_READ_MAY_PROMOTE = 1 << 1,
100 BCH_READ_USER_MAPPED = 1 << 2,
101 BCH_READ_NODECODE = 1 << 3,
102 BCH_READ_LAST_FRAGMENT = 1 << 4,
105 BCH_READ_MUST_BOUNCE = 1 << 5,
106 BCH_READ_MUST_CLONE = 1 << 6,
107 BCH_READ_IN_RETRY = 1 << 7,
110 int __bch2_read_extent(struct btree_trans *, struct bch_read_bio *,
111 struct bvec_iter, struct bpos, enum btree_id,
112 struct bkey_s_c, unsigned,
113 struct bch_io_failures *, unsigned);
115 static inline void bch2_read_extent(struct btree_trans *trans,
116 struct bch_read_bio *rbio, struct bpos read_pos,
117 enum btree_id data_btree, struct bkey_s_c k,
118 unsigned offset_into_extent, unsigned flags)
120 __bch2_read_extent(trans, rbio, rbio->bio.bi_iter, read_pos,
121 data_btree, k, offset_into_extent, NULL, flags);
124 void __bch2_read(struct bch_fs *, struct bch_read_bio *, struct bvec_iter,
125 subvol_inum, struct bch_io_failures *, unsigned flags);
127 static inline void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
130 struct bch_io_failures failed = { .nr = 0 };
132 BUG_ON(rbio->_state);
135 rbio->start_time = local_clock();
136 rbio->subvol = inum.subvol;
138 __bch2_read(c, rbio, rbio->bio.bi_iter, inum, &failed,
139 BCH_READ_RETRY_IF_STALE|
140 BCH_READ_MAY_PROMOTE|
141 BCH_READ_USER_MAPPED);
144 static inline struct bch_read_bio *rbio_init(struct bio *bio,
145 struct bch_io_opts opts)
147 struct bch_read_bio *rbio = to_rbio(bio);
150 rbio->promote = NULL;
155 void bch2_fs_io_read_exit(struct bch_fs *);
156 int bch2_fs_io_read_init(struct bch_fs *);
158 #endif /* _BCACHEFS_IO_READ_H */