4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2012, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/llite/llite_close.c
34 * Lustre Lite routines to issue a secondary close after writeback
37 #include <linux/module.h>
39 #define DEBUG_SUBSYSTEM S_LLITE
41 #include "llite_internal.h"
43 /** records that a write is in flight */
44 void vvp_write_pending(struct vvp_object *club, struct vvp_page *page)
46 struct ll_inode_info *lli = ll_i2info(club->vob_inode);
48 spin_lock(&lli->lli_lock);
49 lli->lli_flags |= LLIF_SOM_DIRTY;
50 if (page && list_empty(&page->vpg_pending_linkage))
51 list_add(&page->vpg_pending_linkage, &club->vob_pending_list);
52 spin_unlock(&lli->lli_lock);
55 /** records that a write has completed */
56 void vvp_write_complete(struct vvp_object *club, struct vvp_page *page)
58 struct ll_inode_info *lli = ll_i2info(club->vob_inode);
61 spin_lock(&lli->lli_lock);
62 if (page && !list_empty(&page->vpg_pending_linkage)) {
63 list_del_init(&page->vpg_pending_linkage);
66 spin_unlock(&lli->lli_lock);
68 ll_queue_done_writing(club->vob_inode, 0);
71 /** Queues DONE_WRITING if
72 * - done writing is allowed;
73 * - inode has no no dirty pages;
75 void ll_queue_done_writing(struct inode *inode, unsigned long flags)
77 struct ll_inode_info *lli = ll_i2info(inode);
78 struct vvp_object *club = cl2vvp(ll_i2info(inode)->lli_clob);
80 spin_lock(&lli->lli_lock);
81 lli->lli_flags |= flags;
83 if ((lli->lli_flags & LLIF_DONE_WRITING) &&
84 list_empty(&club->vob_pending_list)) {
85 struct ll_close_queue *lcq = ll_i2sbi(inode)->ll_lcq;
87 if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
88 CWARN("%s: file "DFID"(flags %u) Size-on-MDS valid, done writing allowed and no diry pages\n",
89 ll_get_fsname(inode->i_sb, NULL, 0),
90 PFID(ll_inode2fid(inode)), lli->lli_flags);
91 /* DONE_WRITING is allowed and inode has no dirty page. */
92 spin_lock(&lcq->lcq_lock);
94 LASSERT(list_empty(&lli->lli_close_list));
95 CDEBUG(D_INODE, "adding inode "DFID" to close list\n",
96 PFID(ll_inode2fid(inode)));
97 list_add_tail(&lli->lli_close_list, &lcq->lcq_head);
99 /* Avoid a concurrent insertion into the close thread queue:
100 * an inode is already in the close thread, open(), write(),
101 * close() happen, epoch is closed as the inode is marked as
102 * LLIF_EPOCH_PENDING. When pages are written inode should not
103 * be inserted into the queue again, clear this flag to avoid
106 lli->lli_flags &= ~LLIF_DONE_WRITING;
108 wake_up(&lcq->lcq_waitq);
109 spin_unlock(&lcq->lcq_lock);
111 spin_unlock(&lli->lli_lock);
114 /** Pack SOM attributes info @opdata for CLOSE, DONE_WRITING rpc. */
115 void ll_done_writing_attr(struct inode *inode, struct md_op_data *op_data)
117 struct ll_inode_info *lli = ll_i2info(inode);
119 op_data->op_flags |= MF_SOM_CHANGE;
120 /* Check if Size-on-MDS attributes are valid. */
121 if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
122 CERROR("%s: inode "DFID"(flags %u) MDS holds lock on Size-on-MDS attributes\n",
123 ll_get_fsname(inode->i_sb, NULL, 0),
124 PFID(ll_inode2fid(inode)), lli->lli_flags);
126 if (!cl_local_size(inode)) {
127 /* Send Size-on-MDS Attributes if valid. */
128 op_data->op_attr.ia_valid |= ATTR_MTIME_SET | ATTR_CTIME_SET |
129 ATTR_ATIME_SET | ATTR_SIZE | ATTR_BLOCKS;
133 /** Closes ioepoch and packs Size-on-MDS attribute if needed into @op_data. */
134 void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
135 struct obd_client_handle **och, unsigned long flags)
137 struct ll_inode_info *lli = ll_i2info(inode);
138 struct vvp_object *club = cl2vvp(ll_i2info(inode)->lli_clob);
140 spin_lock(&lli->lli_lock);
141 if (!(list_empty(&club->vob_pending_list))) {
142 if (!(lli->lli_flags & LLIF_EPOCH_PENDING)) {
144 LASSERT(!lli->lli_pending_och);
145 /* Inode is dirty and there is no pending write done
146 * request yet, DONE_WRITE is to be sent later.
148 lli->lli_flags |= LLIF_EPOCH_PENDING;
149 lli->lli_pending_och = *och;
150 spin_unlock(&lli->lli_lock);
152 inode = igrab(inode);
156 if (flags & LLIF_DONE_WRITING) {
157 /* Some pages are still dirty, it is early to send
158 * DONE_WRITE. Wait until all pages will be flushed
159 * and try DONE_WRITE again later.
161 LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING));
162 lli->lli_flags |= LLIF_DONE_WRITING;
163 spin_unlock(&lli->lli_lock);
165 inode = igrab(inode);
170 CDEBUG(D_INODE, "Epoch %llu closed on "DFID"\n",
171 ll_i2info(inode)->lli_ioepoch, PFID(&lli->lli_fid));
172 op_data->op_flags |= MF_EPOCH_CLOSE;
174 if (flags & LLIF_DONE_WRITING) {
175 LASSERT(lli->lli_flags & LLIF_SOM_DIRTY);
176 LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING));
177 *och = lli->lli_pending_och;
178 lli->lli_pending_och = NULL;
179 lli->lli_flags &= ~LLIF_EPOCH_PENDING;
181 /* Pack Size-on-MDS inode attributes only if they has changed */
182 if (!(lli->lli_flags & LLIF_SOM_DIRTY)) {
183 spin_unlock(&lli->lli_lock);
187 /* There is a pending DONE_WRITE -- close epoch with no
190 if (lli->lli_flags & LLIF_EPOCH_PENDING) {
191 spin_unlock(&lli->lli_lock);
196 LASSERT(list_empty(&club->vob_pending_list));
197 lli->lli_flags &= ~LLIF_SOM_DIRTY;
198 spin_unlock(&lli->lli_lock);
199 ll_done_writing_attr(inode, op_data);
206 * Cliens updates SOM attributes on MDS (including llog cookies):
207 * obd_getattr with no lock and md_setattr.
209 int ll_som_update(struct inode *inode, struct md_op_data *op_data)
211 struct ll_inode_info *lli = ll_i2info(inode);
212 struct ptlrpc_request *request = NULL;
218 if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
219 CERROR("%s: inode "DFID"(flags %u) MDS holds lock on Size-on-MDS attributes\n",
220 ll_get_fsname(inode->i_sb, NULL, 0),
221 PFID(ll_inode2fid(inode)), lli->lli_flags);
223 oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
225 CERROR("can't allocate memory for Size-on-MDS update.\n");
229 old_flags = op_data->op_flags;
230 op_data->op_flags = MF_SOM_CHANGE;
232 /* If inode is already in another epoch, skip getattr from OSTs. */
233 if (lli->lli_ioepoch == op_data->op_ioepoch) {
234 rc = ll_inode_getattr(inode, oa, op_data->op_ioepoch,
235 old_flags & MF_GETATTR_LOCK);
239 CERROR("%s: inode_getattr failed - unable to send a Size-on-MDS attribute update for inode "DFID": rc = %d\n",
240 ll_get_fsname(inode->i_sb, NULL, 0),
241 PFID(ll_inode2fid(inode)), rc);
243 CDEBUG(D_INODE, "Size-on-MDS update on "DFID"\n",
244 PFID(&lli->lli_fid));
246 /* Install attributes into op_data. */
247 md_from_obdo(op_data, oa, oa->o_valid);
250 rc = md_setattr(ll_i2sbi(inode)->ll_md_exp, op_data,
251 NULL, 0, NULL, 0, &request, NULL);
252 ptlrpc_req_finished(request);
254 kmem_cache_free(obdo_cachep, oa);
259 * Closes the ioepoch and packs all the attributes into @op_data for
262 static void ll_prepare_done_writing(struct inode *inode,
263 struct md_op_data *op_data,
264 struct obd_client_handle **och)
266 ll_ioepoch_close(inode, op_data, och, LLIF_DONE_WRITING);
267 /* If there is no @och, we do not do D_W yet. */
271 ll_pack_inode2opdata(inode, op_data, &(*och)->och_fh);
272 ll_prep_md_op_data(op_data, inode, NULL, NULL,
273 0, 0, LUSTRE_OPC_ANY, NULL);
276 /** Send a DONE_WRITING rpc. */
277 static void ll_done_writing(struct inode *inode)
279 struct obd_client_handle *och = NULL;
280 struct md_op_data *op_data;
283 LASSERT(exp_connect_som(ll_i2mdexp(inode)));
285 op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
289 ll_prepare_done_writing(inode, op_data, &och);
290 /* If there is no @och, we do not do D_W yet. */
294 rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, NULL);
296 /* MDS has instructed us to obtain Size-on-MDS attribute from
297 * OSTs and send setattr to back to MDS.
299 rc = ll_som_update(inode, op_data);
301 CERROR("%s: inode "DFID" mdc done_writing failed: rc = %d\n",
302 ll_get_fsname(inode->i_sb, NULL, 0),
303 PFID(ll_inode2fid(inode)), rc);
306 ll_finish_md_op_data(op_data);
308 md_clear_open_replay_data(ll_i2sbi(inode)->ll_md_exp, och);
313 static struct ll_inode_info *ll_close_next_lli(struct ll_close_queue *lcq)
315 struct ll_inode_info *lli = NULL;
317 spin_lock(&lcq->lcq_lock);
319 if (!list_empty(&lcq->lcq_head)) {
320 lli = list_entry(lcq->lcq_head.next, struct ll_inode_info,
322 list_del_init(&lli->lli_close_list);
323 } else if (atomic_read(&lcq->lcq_stop)) {
324 lli = ERR_PTR(-EALREADY);
327 spin_unlock(&lcq->lcq_lock);
331 static int ll_close_thread(void *arg)
333 struct ll_close_queue *lcq = arg;
335 complete(&lcq->lcq_comp);
338 struct l_wait_info lwi = { 0 };
339 struct ll_inode_info *lli;
342 l_wait_event_exclusive(lcq->lcq_waitq,
343 (lli = ll_close_next_lli(lcq)) != NULL,
348 inode = ll_info2i(lli);
349 CDEBUG(D_INFO, "done_writing for inode "DFID"\n",
350 PFID(ll_inode2fid(inode)));
351 ll_done_writing(inode);
355 CDEBUG(D_INFO, "ll_close exiting\n");
356 complete(&lcq->lcq_comp);
360 int ll_close_thread_start(struct ll_close_queue **lcq_ret)
362 struct ll_close_queue *lcq;
363 struct task_struct *task;
365 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CLOSE_THREAD))
368 lcq = kzalloc(sizeof(*lcq), GFP_NOFS);
372 spin_lock_init(&lcq->lcq_lock);
373 INIT_LIST_HEAD(&lcq->lcq_head);
374 init_waitqueue_head(&lcq->lcq_waitq);
375 init_completion(&lcq->lcq_comp);
377 task = kthread_run(ll_close_thread, lcq, "ll_close");
380 return PTR_ERR(task);
383 wait_for_completion(&lcq->lcq_comp);
388 void ll_close_thread_shutdown(struct ll_close_queue *lcq)
390 init_completion(&lcq->lcq_comp);
391 atomic_inc(&lcq->lcq_stop);
392 wake_up(&lcq->lcq_waitq);
393 wait_for_completion(&lcq->lcq_comp);