1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include <linux/fdtable.h>
3 #include <linux/anon_inodes.h>
7 static int cachefiles_ondemand_fd_release(struct inode *inode,
10 struct cachefiles_object *object = file->private_data;
11 struct cachefiles_cache *cache = object->volume->cache;
12 int object_id = object->ondemand_id;
13 struct cachefiles_req *req;
14 XA_STATE(xas, &cache->reqs, 0);
16 xa_lock(&cache->reqs);
17 object->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED;
20 * Flush all pending READ requests since their completion depends on
23 xas_for_each(&xas, req, ULONG_MAX) {
24 if (req->msg.object_id == object_id &&
25 req->msg.opcode == CACHEFILES_OP_READ) {
28 xas_store(&xas, NULL);
31 xa_unlock(&cache->reqs);
33 xa_erase(&cache->ondemand_ids, object_id);
34 trace_cachefiles_ondemand_fd_release(object, object_id);
35 cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd);
36 cachefiles_put_unbind_pincount(cache);
40 static ssize_t cachefiles_ondemand_fd_write_iter(struct kiocb *kiocb,
41 struct iov_iter *iter)
43 struct cachefiles_object *object = kiocb->ki_filp->private_data;
44 struct cachefiles_cache *cache = object->volume->cache;
45 struct file *file = object->file;
46 size_t len = iter->count;
47 loff_t pos = kiocb->ki_pos;
48 const struct cred *saved_cred;
54 cachefiles_begin_secure(cache, &saved_cred);
55 ret = __cachefiles_prepare_write(object, file, &pos, &len, true);
56 cachefiles_end_secure(cache, saved_cred);
60 trace_cachefiles_ondemand_fd_write(object, file_inode(file), pos, len);
61 ret = __cachefiles_write(object, file, pos, iter, NULL, NULL);
68 static loff_t cachefiles_ondemand_fd_llseek(struct file *filp, loff_t pos,
71 struct cachefiles_object *object = filp->private_data;
72 struct file *file = object->file;
77 return vfs_llseek(file, pos, whence);
80 static long cachefiles_ondemand_fd_ioctl(struct file *filp, unsigned int ioctl,
83 struct cachefiles_object *object = filp->private_data;
84 struct cachefiles_cache *cache = object->volume->cache;
85 struct cachefiles_req *req;
88 if (ioctl != CACHEFILES_IOC_READ_COMPLETE)
91 if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
95 req = xa_erase(&cache->reqs, id);
99 trace_cachefiles_ondemand_cread(object, id);
100 complete(&req->done);
104 static const struct file_operations cachefiles_ondemand_fd_fops = {
105 .owner = THIS_MODULE,
106 .release = cachefiles_ondemand_fd_release,
107 .write_iter = cachefiles_ondemand_fd_write_iter,
108 .llseek = cachefiles_ondemand_fd_llseek,
109 .unlocked_ioctl = cachefiles_ondemand_fd_ioctl,
113 * OPEN request Completion (copen)
114 * - command: "copen <id>,<cache_size>"
115 * <cache_size> indicates the object size if >=0, error code if negative
117 int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
119 struct cachefiles_req *req;
120 struct fscache_cookie *cookie;
126 if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
130 pr_err("Empty id specified\n");
135 psize = strchr(args, ',');
137 pr_err("Cache size is not specified\n");
144 ret = kstrtoul(pid, 0, &id);
148 req = xa_erase(&cache->reqs, id);
152 /* fail OPEN request if copen format is invalid */
153 ret = kstrtol(psize, 0, &size);
159 /* fail OPEN request if daemon reports an error */
161 if (!IS_ERR_VALUE(size))
167 cookie = req->object->cookie;
168 cookie->object_size = size;
170 clear_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
172 set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
173 trace_cachefiles_ondemand_copen(req->object, id, size);
176 complete(&req->done);
180 static int cachefiles_ondemand_get_fd(struct cachefiles_req *req)
182 struct cachefiles_object *object;
183 struct cachefiles_cache *cache;
184 struct cachefiles_open *load;
189 object = cachefiles_grab_object(req->object,
190 cachefiles_obj_get_ondemand_fd);
191 cache = object->volume->cache;
193 ret = xa_alloc_cyclic(&cache->ondemand_ids, &object_id, NULL,
194 XA_LIMIT(1, INT_MAX),
195 &cache->ondemand_id_next, GFP_KERNEL);
199 fd = get_unused_fd_flags(O_WRONLY);
205 file = anon_inode_getfile("[cachefiles]", &cachefiles_ondemand_fd_fops,
212 file->f_mode |= FMODE_PWRITE | FMODE_LSEEK;
213 fd_install(fd, file);
215 load = (void *)req->msg.data;
217 req->msg.object_id = object_id;
218 object->ondemand_id = object_id;
220 cachefiles_get_unbind_pincount(cache);
221 trace_cachefiles_ondemand_open(object, &req->msg, load);
227 xa_erase(&cache->ondemand_ids, object_id);
229 cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd);
233 ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
234 char __user *_buffer, size_t buflen)
236 struct cachefiles_req *req;
237 struct cachefiles_msg *msg;
238 unsigned long id = 0;
241 XA_STATE(xas, &cache->reqs, 0);
244 * Search for a request that has not ever been processed, to prevent
245 * requests from being processed repeatedly.
247 xa_lock(&cache->reqs);
248 req = xas_find_marked(&xas, UINT_MAX, CACHEFILES_REQ_NEW);
250 xa_unlock(&cache->reqs);
258 xa_unlock(&cache->reqs);
262 xas_clear_mark(&xas, CACHEFILES_REQ_NEW);
263 xa_unlock(&cache->reqs);
268 if (msg->opcode == CACHEFILES_OP_OPEN) {
269 ret = cachefiles_ondemand_get_fd(req);
274 if (copy_to_user(_buffer, msg, n) != 0) {
279 /* CLOSE request has no reply */
280 if (msg->opcode == CACHEFILES_OP_CLOSE) {
281 xa_erase(&cache->reqs, id);
282 complete(&req->done);
288 if (msg->opcode == CACHEFILES_OP_OPEN)
289 close_fd(((struct cachefiles_open *)msg->data)->fd);
291 xa_erase(&cache->reqs, id);
293 complete(&req->done);
297 typedef int (*init_req_fn)(struct cachefiles_req *req, void *private);
299 static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
300 enum cachefiles_opcode opcode,
302 init_req_fn init_req,
305 struct cachefiles_cache *cache = object->volume->cache;
306 struct cachefiles_req *req;
307 XA_STATE(xas, &cache->reqs, 0);
310 if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
313 if (test_bit(CACHEFILES_DEAD, &cache->flags))
316 req = kzalloc(sizeof(*req) + data_len, GFP_KERNEL);
320 req->object = object;
321 init_completion(&req->done);
322 req->msg.opcode = opcode;
323 req->msg.len = sizeof(struct cachefiles_msg) + data_len;
325 ret = init_req(req, private);
331 * Stop enqueuing the request when daemon is dying. The
332 * following two operations need to be atomic as a whole.
333 * 1) check cache state, and
334 * 2) enqueue request if cache is alive.
335 * Otherwise the request may be enqueued after xarray has been
336 * flushed, leaving the orphan request never being completed.
340 * test CACHEFILES_DEAD bit
341 * set CACHEFILES_DEAD bit
342 * flush requests in the xarray
343 * enqueue the request
347 if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
353 /* coupled with the barrier in cachefiles_flush_reqs() */
356 if (opcode != CACHEFILES_OP_OPEN && object->ondemand_id <= 0) {
357 WARN_ON_ONCE(object->ondemand_id == 0);
364 xas_find_marked(&xas, UINT_MAX, XA_FREE_MARK);
365 if (xas.xa_node == XAS_RESTART)
366 xas_set_err(&xas, -EBUSY);
367 xas_store(&xas, req);
368 xas_clear_mark(&xas, XA_FREE_MARK);
369 xas_set_mark(&xas, CACHEFILES_REQ_NEW);
371 } while (xas_nomem(&xas, GFP_KERNEL));
373 ret = xas_error(&xas);
377 wake_up_all(&cache->daemon_pollwq);
378 wait_for_completion(&req->done);
385 static int cachefiles_ondemand_init_open_req(struct cachefiles_req *req,
388 struct cachefiles_object *object = req->object;
389 struct fscache_cookie *cookie = object->cookie;
390 struct fscache_volume *volume = object->volume->vcookie;
391 struct cachefiles_open *load = (void *)req->msg.data;
392 size_t volume_key_size, cookie_key_size;
393 void *volume_key, *cookie_key;
396 * Volume key is a NUL-terminated string. key[0] stores strlen() of the
397 * string, followed by the content of the string (excluding '\0').
399 volume_key_size = volume->key[0] + 1;
400 volume_key = volume->key + 1;
402 /* Cookie key is binary data, which is netfs specific. */
403 cookie_key_size = cookie->key_len;
404 cookie_key = fscache_get_key(cookie);
406 if (!(object->cookie->advice & FSCACHE_ADV_WANT_CACHE_SIZE)) {
407 pr_err("WANT_CACHE_SIZE is needed for on-demand mode\n");
411 load->volume_key_size = volume_key_size;
412 load->cookie_key_size = cookie_key_size;
413 memcpy(load->data, volume_key, volume_key_size);
414 memcpy(load->data + volume_key_size, cookie_key, cookie_key_size);
419 static int cachefiles_ondemand_init_close_req(struct cachefiles_req *req,
422 struct cachefiles_object *object = req->object;
423 int object_id = object->ondemand_id;
426 * It's possible that object id is still 0 if the cookie looking up
427 * phase failed before OPEN request has ever been sent. Also avoid
428 * sending CLOSE request for CACHEFILES_ONDEMAND_ID_CLOSED, which means
429 * anon_fd has already been closed.
434 req->msg.object_id = object_id;
435 trace_cachefiles_ondemand_close(object, &req->msg);
439 struct cachefiles_read_ctx {
444 static int cachefiles_ondemand_init_read_req(struct cachefiles_req *req,
447 struct cachefiles_object *object = req->object;
448 struct cachefiles_read *load = (void *)req->msg.data;
449 struct cachefiles_read_ctx *read_ctx = private;
450 int object_id = object->ondemand_id;
452 /* Stop enqueuing requests when daemon has closed anon_fd. */
453 if (object_id <= 0) {
454 WARN_ON_ONCE(object_id == 0);
455 pr_info_once("READ: anonymous fd closed prematurely.\n");
459 req->msg.object_id = object_id;
460 load->off = read_ctx->off;
461 load->len = read_ctx->len;
462 trace_cachefiles_ondemand_read(object, &req->msg, load);
466 int cachefiles_ondemand_init_object(struct cachefiles_object *object)
468 struct fscache_cookie *cookie = object->cookie;
469 struct fscache_volume *volume = object->volume->vcookie;
470 size_t volume_key_size, cookie_key_size, data_len;
473 * CacheFiles will firstly check the cache file under the root cache
474 * directory. If the coherency check failed, it will fallback to
475 * creating a new tmpfile as the cache file. Reuse the previously
476 * allocated object ID if any.
478 if (object->ondemand_id > 0)
481 volume_key_size = volume->key[0] + 1;
482 cookie_key_size = cookie->key_len;
483 data_len = sizeof(struct cachefiles_open) +
484 volume_key_size + cookie_key_size;
486 return cachefiles_ondemand_send_req(object, CACHEFILES_OP_OPEN,
487 data_len, cachefiles_ondemand_init_open_req, NULL);
490 void cachefiles_ondemand_clean_object(struct cachefiles_object *object)
492 cachefiles_ondemand_send_req(object, CACHEFILES_OP_CLOSE, 0,
493 cachefiles_ondemand_init_close_req, NULL);
496 int cachefiles_ondemand_read(struct cachefiles_object *object,
497 loff_t pos, size_t len)
499 struct cachefiles_read_ctx read_ctx = {pos, len};
501 return cachefiles_ondemand_send_req(object, CACHEFILES_OP_READ,
502 sizeof(struct cachefiles_read),
503 cachefiles_ondemand_init_read_req, &read_ctx);