4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2012, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/include/lustre_lib.h
34 * Basic Lustre library routines.
45 #include <linux/sched/signal.h>
46 #include <linux/signal.h>
47 #include <linux/types.h>
48 #include <linux/libcfs/libcfs.h>
49 #include <uapi/linux/lustre/lustre_idl.h>
50 #include <uapi/linux/lustre/lustre_ver.h>
51 #include <uapi/linux/lustre/lustre_cfg.h>
54 struct ptlrpc_request;
58 #include <lustre_ha.h>
59 #include <lustre_net.h>
61 #define LI_POISON 0x5a5a5a5a
62 #if BITS_PER_LONG > 32
63 # define LL_POISON 0x5a5a5a5a5a5a5a5aL
65 # define LL_POISON 0x5a5a5a5aL
67 #define LP_POISON ((void *)LL_POISON)
69 int target_pack_pool_reply(struct ptlrpc_request *req);
70 int do_set_info_async(struct obd_import *imp,
71 int opcode, int version,
72 u32 keylen, void *key,
73 u32 vallen, void *val,
74 struct ptlrpc_request_set *set);
76 void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id);
79 * l_wait_event is a flexible sleeping function, permitting simple caller
80 * configuration of interrupt and timeout sensitivity along with actions to
81 * be performed in the event of either exception.
83 * The first form of usage looks like this:
85 * struct l_wait_info lwi = LWI_TIMEOUT_INTR(timeout, timeout_handler,
86 * intr_handler, callback_data);
87 * rc = l_wait_event(waitq, condition, &lwi);
89 * l_wait_event() makes the current process wait on 'waitq' until 'condition'
90 * is TRUE or a "killable" signal (SIGTERM, SIKGILL, SIGINT) is pending. It
91 * returns 0 to signify 'condition' is TRUE, but if a signal wakes it before
92 * 'condition' becomes true, it optionally calls the specified 'intr_handler'
93 * if not NULL, and returns -EINTR.
95 * If a non-zero timeout is specified, signals are ignored until the timeout
96 * has expired. At this time, if 'timeout_handler' is not NULL it is called.
97 * If it returns FALSE l_wait_event() continues to wait as described above with
98 * signals enabled. Otherwise it returns -ETIMEDOUT.
100 * LWI_INTR(intr_handler, callback_data) is shorthand for
101 * LWI_TIMEOUT_INTR(0, NULL, intr_handler, callback_data)
103 * The second form of usage looks like this:
105 * struct l_wait_info lwi = LWI_TIMEOUT(timeout, timeout_handler);
106 * rc = l_wait_event(waitq, condition, &lwi);
108 * This form is the same as the first except that it COMPLETELY IGNORES
109 * SIGNALS. The caller must therefore beware that if 'timeout' is zero, or if
110 * 'timeout_handler' is not NULL and returns FALSE, then the ONLY thing that
111 * can unblock the current process is 'condition' becoming TRUE.
113 * Another form of usage is:
114 * struct l_wait_info lwi = LWI_TIMEOUT_INTERVAL(timeout, interval,
116 * rc = l_wait_event(waitq, condition, &lwi);
117 * This is the same as previous case, but condition is checked once every
118 * 'interval' jiffies (if non-zero).
120 * Subtle synchronization point: this macro does *not* necessary takes
121 * wait-queue spin-lock before returning, and, hence, following idiom is safe
122 * ONLY when caller provides some external locking:
126 * l_wait_event(&obj->wq, ....); (1)
128 * wake_up(&obj->wq): (2)
129 * spin_lock(&q->lock); (2.1)
130 * __wake_up_common(q, ...); (2.2)
131 * spin_unlock(&q->lock, flags); (2.3)
135 * As l_wait_event() may "short-cut" execution and return without taking
136 * wait-queue spin-lock, some additional synchronization is necessary to
137 * guarantee that step (3) can begin only after (2.3) finishes.
139 * XXX nikita: some ptlrpc daemon threads have races of that sort.
142 static inline int back_to_sleep(void *arg)
147 #define LWI_ON_SIGNAL_NOOP ((void (*)(void *))(-1))
153 int (*lwi_on_timeout)(void *);
154 void (*lwi_on_signal)(void *);
158 /* NB: LWI_TIMEOUT ignores signals completely */
159 #define LWI_TIMEOUT(time, cb, data) \
160 ((struct l_wait_info) { \
161 .lwi_timeout = time, \
162 .lwi_on_timeout = cb, \
163 .lwi_cb_data = data, \
165 .lwi_allow_intr = 0 \
168 #define LWI_TIMEOUT_INTERVAL(time, interval, cb, data) \
169 ((struct l_wait_info) { \
170 .lwi_timeout = time, \
171 .lwi_on_timeout = cb, \
172 .lwi_cb_data = data, \
173 .lwi_interval = interval, \
174 .lwi_allow_intr = 0 \
177 #define LWI_TIMEOUT_INTR(time, time_cb, sig_cb, data) \
178 ((struct l_wait_info) { \
179 .lwi_timeout = time, \
180 .lwi_on_timeout = time_cb, \
181 .lwi_on_signal = sig_cb, \
182 .lwi_cb_data = data, \
184 .lwi_allow_intr = 0 \
187 #define LWI_TIMEOUT_INTR_ALL(time, time_cb, sig_cb, data) \
188 ((struct l_wait_info) { \
189 .lwi_timeout = time, \
190 .lwi_on_timeout = time_cb, \
191 .lwi_on_signal = sig_cb, \
192 .lwi_cb_data = data, \
194 .lwi_allow_intr = 1 \
197 #define LWI_INTR(cb, data) LWI_TIMEOUT_INTR(0, NULL, cb, data)
199 #define LUSTRE_FATAL_SIGS (sigmask(SIGKILL) | sigmask(SIGINT) | \
200 sigmask(SIGTERM) | sigmask(SIGQUIT) | \
204 * wait_queue_entry_t of Linux (version < 2.6.34) is a FIFO list for exclusively
205 * waiting threads, which is not always desirable because all threads will
206 * be waken up again and again, even user only needs a few of them to be
207 * active most time. This is not good for performance because cache can
208 * be polluted by different threads.
210 * LIFO list can resolve this problem because we always wakeup the most
211 * recent active thread by default.
213 * NB: please don't call non-exclusive & exclusive wait on the same
214 * waitq if add_wait_queue_exclusive_head is used.
216 #define add_wait_queue_exclusive_head(waitq, link) \
218 unsigned long flags; \
220 spin_lock_irqsave(&((waitq)->lock), flags); \
221 __add_wait_queue_exclusive(waitq, link); \
222 spin_unlock_irqrestore(&((waitq)->lock), flags); \
226 * wait for @condition to become true, but no longer than timeout, specified
229 #define __l_wait_event(wq, condition, info, ret, l_add_wait) \
231 wait_queue_entry_t __wait; \
232 long __timeout = info->lwi_timeout; \
233 sigset_t __blocked; \
234 int __allow_intr = info->lwi_allow_intr; \
240 init_waitqueue_entry(&__wait, current); \
241 l_add_wait(&wq, &__wait); \
243 /* Block all signals (just the non-fatal ones if no timeout). */ \
244 if (info->lwi_on_signal && (__timeout == 0 || __allow_intr)) \
245 __blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS); \
247 __blocked = cfs_block_sigsinv(0); \
253 set_current_state(TASK_INTERRUPTIBLE); \
255 if (__timeout == 0) { \
258 long interval = info->lwi_interval ? \
260 info->lwi_interval, __timeout) : \
262 long remaining = schedule_timeout(interval);\
263 __timeout = cfs_time_sub(__timeout, \
264 cfs_time_sub(interval, remaining));\
265 if (__timeout == 0) { \
266 if (!info->lwi_on_timeout || \
267 info->lwi_on_timeout(info->lwi_cb_data)) { \
271 /* Take signals after the timeout expires. */ \
272 if (info->lwi_on_signal) \
273 (void)cfs_block_sigsinv(LUSTRE_FATAL_SIGS);\
277 set_current_state(TASK_RUNNING); \
281 if (signal_pending(current)) { \
282 if (info->lwi_on_signal && \
283 (__timeout == 0 || __allow_intr)) { \
284 if (info->lwi_on_signal != LWI_ON_SIGNAL_NOOP) \
285 info->lwi_on_signal(info->lwi_cb_data);\
289 /* We have to do this here because some signals */ \
290 /* are not blockable - ie from strace(1). */ \
291 /* In these cases we want to schedule_timeout() */ \
292 /* again, because we don't want that to return */ \
293 /* -EINTR when the RPC actually succeeded. */ \
294 /* the recalc_sigpending() below will deliver the */ \
295 /* signal properly. */ \
296 cfs_clear_sigpending(); \
300 cfs_restore_sigs(__blocked); \
302 remove_wait_queue(&wq, &__wait); \
305 #define l_wait_event(wq, condition, info) \
308 struct l_wait_info *__info = (info); \
310 __l_wait_event(wq, condition, __info, \
311 __ret, add_wait_queue); \
315 #define l_wait_event_exclusive(wq, condition, info) \
318 struct l_wait_info *__info = (info); \
320 __l_wait_event(wq, condition, __info, \
321 __ret, add_wait_queue_exclusive); \
325 #define l_wait_event_exclusive_head(wq, condition, info) \
328 struct l_wait_info *__info = (info); \
330 __l_wait_event(wq, condition, __info, \
331 __ret, add_wait_queue_exclusive_head); \
335 #define l_wait_condition(wq, condition) \
337 struct l_wait_info lwi = { 0 }; \
338 l_wait_event(wq, condition, &lwi); \
341 #define l_wait_condition_exclusive(wq, condition) \
343 struct l_wait_info lwi = { 0 }; \
344 l_wait_event_exclusive(wq, condition, &lwi); \
347 #define l_wait_condition_exclusive_head(wq, condition) \
349 struct l_wait_info lwi = { 0 }; \
350 l_wait_event_exclusive_head(wq, condition, &lwi); \
355 #endif /* _LUSTRE_LIB_H */