4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lnet/selftest/framework.c
34 * Author: Isaac Huang <isaac@clusterfs.com>
35 * Author: Liang Zhen <liangzhen@clusterfs.com>
38 #define DEBUG_SUBSYSTEM S_LNET
42 struct lst_sid LST_INVALID_SID = {LNET_NID_ANY, -1};
44 static int session_timeout = 100;
45 module_param(session_timeout, int, 0444);
46 MODULE_PARM_DESC(session_timeout, "test session timeout in seconds (100 by default, 0 == never)");
48 static int rpc_timeout = 64;
49 module_param(rpc_timeout, int, 0644);
50 MODULE_PARM_DESC(rpc_timeout, "rpc timeout in seconds (64 by default, 0 == never)");
52 #define sfw_unpack_id(id) \
54 __swab64s(&(id).nid); \
55 __swab32s(&(id).pid); \
58 #define sfw_unpack_sid(sid) \
60 __swab64s(&(sid).ses_nid); \
61 __swab64s(&(sid).ses_stamp); \
64 #define sfw_unpack_fw_counters(fc) \
66 __swab32s(&(fc).running_ms); \
67 __swab32s(&(fc).active_batches); \
68 __swab32s(&(fc).zombie_sessions); \
69 __swab32s(&(fc).brw_errors); \
70 __swab32s(&(fc).ping_errors); \
73 #define sfw_unpack_rpc_counters(rc) \
75 __swab32s(&(rc).errors); \
76 __swab32s(&(rc).rpcs_sent); \
77 __swab32s(&(rc).rpcs_rcvd); \
78 __swab32s(&(rc).rpcs_dropped); \
79 __swab32s(&(rc).rpcs_expired); \
80 __swab64s(&(rc).bulk_get); \
81 __swab64s(&(rc).bulk_put); \
84 #define sfw_unpack_lnet_counters(lc) \
86 __swab32s(&(lc).errors); \
87 __swab32s(&(lc).msgs_max); \
88 __swab32s(&(lc).msgs_alloc); \
89 __swab32s(&(lc).send_count); \
90 __swab32s(&(lc).recv_count); \
91 __swab32s(&(lc).drop_count); \
92 __swab32s(&(lc).route_count); \
93 __swab64s(&(lc).send_length); \
94 __swab64s(&(lc).recv_length); \
95 __swab64s(&(lc).drop_length); \
96 __swab64s(&(lc).route_length); \
99 #define sfw_test_active(t) (atomic_read(&(t)->tsi_nactive))
100 #define sfw_batch_active(b) (atomic_read(&(b)->bat_nactive))
102 static struct smoketest_framework {
103 struct list_head fw_zombie_rpcs; /* RPCs to be recycled */
104 struct list_head fw_zombie_sessions; /* stopping sessions */
105 struct list_head fw_tests; /* registered test cases */
106 atomic_t fw_nzombies; /* # zombie sessions */
107 spinlock_t fw_lock; /* serialise */
108 struct sfw_session *fw_session; /* _the_ session */
109 int fw_shuttingdown; /* shutdown in progress */
110 struct srpc_server_rpc *fw_active_srpc;/* running RPC */
114 int sfw_stop_batch(struct sfw_batch *tsb, int force);
115 void sfw_destroy_session(struct sfw_session *sn);
117 static inline struct sfw_test_case *
118 sfw_find_test_case(int id)
120 struct sfw_test_case *tsc;
122 LASSERT(id <= SRPC_SERVICE_MAX_ID);
123 LASSERT(id > SRPC_FRAMEWORK_SERVICE_MAX_ID);
125 list_for_each_entry(tsc, &sfw_data.fw_tests, tsc_list) {
126 if (tsc->tsc_srv_service->sv_id == id)
134 sfw_register_test(struct srpc_service *service,
135 struct sfw_test_client_ops *cliops)
137 struct sfw_test_case *tsc;
139 if (sfw_find_test_case(service->sv_id)) {
140 CERROR("Failed to register test %s (%d)\n",
141 service->sv_name, service->sv_id);
145 LIBCFS_ALLOC(tsc, sizeof(struct sfw_test_case));
149 tsc->tsc_cli_ops = cliops;
150 tsc->tsc_srv_service = service;
152 list_add_tail(&tsc->tsc_list, &sfw_data.fw_tests);
157 sfw_add_session_timer(void)
159 struct sfw_session *sn = sfw_data.fw_session;
160 struct stt_timer *timer = &sn->sn_timer;
162 LASSERT(!sfw_data.fw_shuttingdown);
164 if (!sn || !sn->sn_timeout)
167 LASSERT(!sn->sn_timer_active);
169 sn->sn_timer_active = 1;
170 timer->stt_expires = ktime_get_real_seconds() + sn->sn_timeout;
171 stt_add_timer(timer);
175 sfw_del_session_timer(void)
177 struct sfw_session *sn = sfw_data.fw_session;
179 if (!sn || !sn->sn_timer_active)
182 LASSERT(sn->sn_timeout);
184 if (stt_del_timer(&sn->sn_timer)) { /* timer defused */
185 sn->sn_timer_active = 0;
189 return EBUSY; /* racing with sfw_session_expired() */
193 sfw_deactivate_session(void)
194 __must_hold(&sfw_data.fw_lock)
196 struct sfw_session *sn = sfw_data.fw_session;
198 struct sfw_batch *tsb;
199 struct sfw_test_case *tsc;
204 LASSERT(!sn->sn_timer_active);
206 sfw_data.fw_session = NULL;
207 atomic_inc(&sfw_data.fw_nzombies);
208 list_add(&sn->sn_list, &sfw_data.fw_zombie_sessions);
210 spin_unlock(&sfw_data.fw_lock);
212 list_for_each_entry(tsc, &sfw_data.fw_tests, tsc_list) {
213 srpc_abort_service(tsc->tsc_srv_service);
216 spin_lock(&sfw_data.fw_lock);
218 list_for_each_entry(tsb, &sn->sn_batches, bat_list) {
219 if (sfw_batch_active(tsb)) {
221 sfw_stop_batch(tsb, 1);
226 return; /* wait for active batches to stop */
228 list_del_init(&sn->sn_list);
229 spin_unlock(&sfw_data.fw_lock);
231 sfw_destroy_session(sn);
233 spin_lock(&sfw_data.fw_lock);
237 sfw_session_expired(void *data)
239 struct sfw_session *sn = data;
241 spin_lock(&sfw_data.fw_lock);
243 LASSERT(sn->sn_timer_active);
244 LASSERT(sn == sfw_data.fw_session);
246 CWARN("Session expired! sid: %s-%llu, name: %s\n",
247 libcfs_nid2str(sn->sn_id.ses_nid),
248 sn->sn_id.ses_stamp, &sn->sn_name[0]);
250 sn->sn_timer_active = 0;
251 sfw_deactivate_session();
253 spin_unlock(&sfw_data.fw_lock);
257 sfw_init_session(struct sfw_session *sn, struct lst_sid sid,
258 unsigned int features, const char *name)
260 struct stt_timer *timer = &sn->sn_timer;
262 memset(sn, 0, sizeof(struct sfw_session));
263 INIT_LIST_HEAD(&sn->sn_list);
264 INIT_LIST_HEAD(&sn->sn_batches);
265 atomic_set(&sn->sn_refcount, 1); /* +1 for caller */
266 atomic_set(&sn->sn_brw_errors, 0);
267 atomic_set(&sn->sn_ping_errors, 0);
268 strlcpy(&sn->sn_name[0], name, sizeof(sn->sn_name));
270 sn->sn_timer_active = 0;
272 sn->sn_features = features;
273 sn->sn_timeout = session_timeout;
274 sn->sn_started = cfs_time_current();
276 timer->stt_data = sn;
277 timer->stt_func = sfw_session_expired;
278 INIT_LIST_HEAD(&timer->stt_list);
281 /* completion handler for incoming framework RPCs */
283 sfw_server_rpc_done(struct srpc_server_rpc *rpc)
285 struct srpc_service *sv = rpc->srpc_scd->scd_svc;
286 int status = rpc->srpc_status;
288 CDEBUG(D_NET, "Incoming framework RPC done: service %s, peer %s, status %s:%d\n",
289 sv->sv_name, libcfs_id2str(rpc->srpc_peer),
290 swi_state2str(rpc->srpc_wi.swi_state),
298 sfw_client_rpc_fini(struct srpc_client_rpc *rpc)
300 LASSERT(!rpc->crpc_bulk.bk_niov);
301 LASSERT(list_empty(&rpc->crpc_list));
302 LASSERT(!atomic_read(&rpc->crpc_refcount));
304 CDEBUG(D_NET, "Outgoing framework RPC done: service %d, peer %s, status %s:%d:%d\n",
305 rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
306 swi_state2str(rpc->crpc_wi.swi_state),
307 rpc->crpc_aborted, rpc->crpc_status);
309 spin_lock(&sfw_data.fw_lock);
311 /* my callers must finish all RPCs before shutting me down */
312 LASSERT(!sfw_data.fw_shuttingdown);
313 list_add(&rpc->crpc_list, &sfw_data.fw_zombie_rpcs);
315 spin_unlock(&sfw_data.fw_lock);
318 static struct sfw_batch *
319 sfw_find_batch(struct lst_bid bid)
321 struct sfw_session *sn = sfw_data.fw_session;
322 struct sfw_batch *bat;
326 list_for_each_entry(bat, &sn->sn_batches, bat_list) {
327 if (bat->bat_id.bat_id == bid.bat_id)
334 static struct sfw_batch *
335 sfw_bid2batch(struct lst_bid bid)
337 struct sfw_session *sn = sfw_data.fw_session;
338 struct sfw_batch *bat;
342 bat = sfw_find_batch(bid);
346 LIBCFS_ALLOC(bat, sizeof(struct sfw_batch));
351 bat->bat_session = sn;
353 atomic_set(&bat->bat_nactive, 0);
354 INIT_LIST_HEAD(&bat->bat_tests);
356 list_add_tail(&bat->bat_list, &sn->sn_batches);
361 sfw_get_stats(struct srpc_stat_reqst *request, struct srpc_stat_reply *reply)
363 struct sfw_session *sn = sfw_data.fw_session;
364 struct sfw_counters *cnt = &reply->str_fw;
365 struct sfw_batch *bat;
367 reply->str_sid = !sn ? LST_INVALID_SID : sn->sn_id;
369 if (request->str_sid.ses_nid == LNET_NID_ANY) {
370 reply->str_status = EINVAL;
374 if (!sn || !sfw_sid_equal(request->str_sid, sn->sn_id)) {
375 reply->str_status = ESRCH;
379 lnet_counters_get(&reply->str_lnet);
380 srpc_get_counters(&reply->str_rpc);
383 * send over the msecs since the session was started
384 * with 32 bits to send, this is ~49 days
386 cnt->running_ms = jiffies_to_msecs(jiffies - sn->sn_started);
387 cnt->brw_errors = atomic_read(&sn->sn_brw_errors);
388 cnt->ping_errors = atomic_read(&sn->sn_ping_errors);
389 cnt->zombie_sessions = atomic_read(&sfw_data.fw_nzombies);
391 cnt->active_batches = 0;
392 list_for_each_entry(bat, &sn->sn_batches, bat_list) {
393 if (atomic_read(&bat->bat_nactive) > 0)
394 cnt->active_batches++;
397 reply->str_status = 0;
402 sfw_make_session(struct srpc_mksn_reqst *request, struct srpc_mksn_reply *reply)
404 struct sfw_session *sn = sfw_data.fw_session;
405 struct srpc_msg *msg = container_of(request, struct srpc_msg,
406 msg_body.mksn_reqst);
409 if (request->mksn_sid.ses_nid == LNET_NID_ANY) {
410 reply->mksn_sid = !sn ? LST_INVALID_SID : sn->sn_id;
411 reply->mksn_status = EINVAL;
416 reply->mksn_status = 0;
417 reply->mksn_sid = sn->sn_id;
418 reply->mksn_timeout = sn->sn_timeout;
420 if (sfw_sid_equal(request->mksn_sid, sn->sn_id)) {
421 atomic_inc(&sn->sn_refcount);
425 if (!request->mksn_force) {
426 reply->mksn_status = EBUSY;
427 cplen = strlcpy(&reply->mksn_name[0], &sn->sn_name[0],
428 sizeof(reply->mksn_name));
429 if (cplen >= sizeof(reply->mksn_name))
436 * reject the request if it requires unknown features
437 * NB: old version will always accept all features because it's not
438 * aware of srpc_msg::msg_ses_feats, it's a defect but it's also
439 * harmless because it will return zero feature to console, and it's
440 * console's responsibility to make sure all nodes in a session have
443 if (msg->msg_ses_feats & ~LST_FEATS_MASK) {
444 reply->mksn_status = EPROTO;
448 /* brand new or create by force */
449 LIBCFS_ALLOC(sn, sizeof(struct sfw_session));
451 CERROR("dropping RPC mksn under memory pressure\n");
455 sfw_init_session(sn, request->mksn_sid,
456 msg->msg_ses_feats, &request->mksn_name[0]);
458 spin_lock(&sfw_data.fw_lock);
460 sfw_deactivate_session();
461 LASSERT(!sfw_data.fw_session);
462 sfw_data.fw_session = sn;
464 spin_unlock(&sfw_data.fw_lock);
466 reply->mksn_status = 0;
467 reply->mksn_sid = sn->sn_id;
468 reply->mksn_timeout = sn->sn_timeout;
473 sfw_remove_session(struct srpc_rmsn_reqst *request,
474 struct srpc_rmsn_reply *reply)
476 struct sfw_session *sn = sfw_data.fw_session;
478 reply->rmsn_sid = !sn ? LST_INVALID_SID : sn->sn_id;
480 if (request->rmsn_sid.ses_nid == LNET_NID_ANY) {
481 reply->rmsn_status = EINVAL;
485 if (!sn || !sfw_sid_equal(request->rmsn_sid, sn->sn_id)) {
486 reply->rmsn_status = !sn ? ESRCH : EBUSY;
490 if (!atomic_dec_and_test(&sn->sn_refcount)) {
491 reply->rmsn_status = 0;
495 spin_lock(&sfw_data.fw_lock);
496 sfw_deactivate_session();
497 spin_unlock(&sfw_data.fw_lock);
499 reply->rmsn_status = 0;
500 reply->rmsn_sid = LST_INVALID_SID;
501 LASSERT(!sfw_data.fw_session);
506 sfw_debug_session(struct srpc_debug_reqst *request,
507 struct srpc_debug_reply *reply)
509 struct sfw_session *sn = sfw_data.fw_session;
512 reply->dbg_status = ESRCH;
513 reply->dbg_sid = LST_INVALID_SID;
517 reply->dbg_status = 0;
518 reply->dbg_sid = sn->sn_id;
519 reply->dbg_timeout = sn->sn_timeout;
520 if (strlcpy(reply->dbg_name, &sn->sn_name[0], sizeof(reply->dbg_name))
521 >= sizeof(reply->dbg_name))
528 sfw_test_rpc_fini(struct srpc_client_rpc *rpc)
530 struct sfw_test_unit *tsu = rpc->crpc_priv;
531 struct sfw_test_instance *tsi = tsu->tsu_instance;
533 /* Called with hold of tsi->tsi_lock */
534 LASSERT(list_empty(&rpc->crpc_list));
535 list_add(&rpc->crpc_list, &tsi->tsi_free_rpcs);
539 sfw_test_buffers(struct sfw_test_instance *tsi)
541 struct sfw_test_case *tsc;
542 struct srpc_service *svc;
546 tsc = sfw_find_test_case(tsi->tsi_service);
548 svc = tsc->tsc_srv_service;
551 nbuf = min(svc->sv_wi_total, tsi->tsi_loop) / svc->sv_ncpts;
552 return max(SFW_TEST_WI_MIN, nbuf + SFW_TEST_WI_EXTRA);
556 sfw_load_test(struct sfw_test_instance *tsi)
558 struct sfw_test_case *tsc;
559 struct srpc_service *svc;
564 tsc = sfw_find_test_case(tsi->tsi_service);
565 nbuf = sfw_test_buffers(tsi);
567 svc = tsc->tsc_srv_service;
569 if (tsi->tsi_is_client) {
570 tsi->tsi_ops = tsc->tsc_cli_ops;
574 rc = srpc_service_add_buffers(svc, nbuf);
576 CWARN("Failed to reserve enough buffers: service %s, %d needed: %d\n",
577 svc->sv_name, nbuf, rc);
579 * NB: this error handler is not strictly correct, because
580 * it may release more buffers than already allocated,
581 * but it doesn't matter because request portal should
582 * be lazy portal and will grow buffers if necessary.
584 srpc_service_remove_buffers(svc, nbuf);
588 CDEBUG(D_NET, "Reserved %d buffers for test %s\n",
589 nbuf * (srpc_serv_is_framework(svc) ?
590 2 : cfs_cpt_number(cfs_cpt_table)), svc->sv_name);
595 sfw_unload_test(struct sfw_test_instance *tsi)
597 struct sfw_test_case *tsc;
600 tsc = sfw_find_test_case(tsi->tsi_service);
603 if (tsi->tsi_is_client)
607 * shrink buffers, because request portal is lazy portal
608 * which can grow buffers at runtime so we may leave
609 * some buffers behind, but never mind...
611 srpc_service_remove_buffers(tsc->tsc_srv_service,
612 sfw_test_buffers(tsi));
616 sfw_destroy_test_instance(struct sfw_test_instance *tsi)
618 struct srpc_client_rpc *rpc;
619 struct sfw_test_unit *tsu;
621 if (!tsi->tsi_is_client)
624 tsi->tsi_ops->tso_fini(tsi);
626 LASSERT(!tsi->tsi_stopping);
627 LASSERT(list_empty(&tsi->tsi_active_rpcs));
628 LASSERT(!sfw_test_active(tsi));
630 while (!list_empty(&tsi->tsi_units)) {
631 tsu = list_entry(tsi->tsi_units.next,
632 struct sfw_test_unit, tsu_list);
633 list_del(&tsu->tsu_list);
634 LIBCFS_FREE(tsu, sizeof(*tsu));
637 while (!list_empty(&tsi->tsi_free_rpcs)) {
638 rpc = list_entry(tsi->tsi_free_rpcs.next,
639 struct srpc_client_rpc, crpc_list);
640 list_del(&rpc->crpc_list);
641 LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
645 sfw_unload_test(tsi);
646 LIBCFS_FREE(tsi, sizeof(*tsi));
650 sfw_destroy_batch(struct sfw_batch *tsb)
652 struct sfw_test_instance *tsi;
654 LASSERT(!sfw_batch_active(tsb));
655 LASSERT(list_empty(&tsb->bat_list));
657 while (!list_empty(&tsb->bat_tests)) {
658 tsi = list_entry(tsb->bat_tests.next,
659 struct sfw_test_instance, tsi_list);
660 list_del_init(&tsi->tsi_list);
661 sfw_destroy_test_instance(tsi);
664 LIBCFS_FREE(tsb, sizeof(struct sfw_batch));
668 sfw_destroy_session(struct sfw_session *sn)
670 struct sfw_batch *batch;
672 LASSERT(list_empty(&sn->sn_list));
673 LASSERT(sn != sfw_data.fw_session);
675 while (!list_empty(&sn->sn_batches)) {
676 batch = list_entry(sn->sn_batches.next,
677 struct sfw_batch, bat_list);
678 list_del_init(&batch->bat_list);
679 sfw_destroy_batch(batch);
682 LIBCFS_FREE(sn, sizeof(*sn));
683 atomic_dec(&sfw_data.fw_nzombies);
687 sfw_unpack_addtest_req(struct srpc_msg *msg)
689 struct srpc_test_reqst *req = &msg->msg_body.tes_reqst;
691 LASSERT(msg->msg_type == SRPC_MSG_TEST_REQST);
692 LASSERT(req->tsr_is_client);
694 if (msg->msg_magic == SRPC_MSG_MAGIC)
695 return; /* no flipping needed */
697 LASSERT(msg->msg_magic == __swab32(SRPC_MSG_MAGIC));
699 if (req->tsr_service == SRPC_SERVICE_BRW) {
700 if (!(msg->msg_ses_feats & LST_FEAT_BULK_LEN)) {
701 struct test_bulk_req *bulk = &req->tsr_u.bulk_v0;
703 __swab32s(&bulk->blk_opc);
704 __swab32s(&bulk->blk_npg);
705 __swab32s(&bulk->blk_flags);
708 struct test_bulk_req_v1 *bulk = &req->tsr_u.bulk_v1;
710 __swab16s(&bulk->blk_opc);
711 __swab16s(&bulk->blk_flags);
712 __swab32s(&bulk->blk_offset);
713 __swab32s(&bulk->blk_len);
719 if (req->tsr_service == SRPC_SERVICE_PING) {
720 struct test_ping_req *ping = &req->tsr_u.ping;
722 __swab32s(&ping->png_size);
723 __swab32s(&ping->png_flags);
731 sfw_add_test_instance(struct sfw_batch *tsb, struct srpc_server_rpc *rpc)
733 struct srpc_msg *msg = &rpc->srpc_reqstbuf->buf_msg;
734 struct srpc_test_reqst *req = &msg->msg_body.tes_reqst;
735 struct srpc_bulk *bk = rpc->srpc_bulk;
736 int ndest = req->tsr_ndest;
737 struct sfw_test_unit *tsu;
738 struct sfw_test_instance *tsi;
742 LIBCFS_ALLOC(tsi, sizeof(*tsi));
744 CERROR("Can't allocate test instance for batch: %llu\n",
749 spin_lock_init(&tsi->tsi_lock);
750 atomic_set(&tsi->tsi_nactive, 0);
751 INIT_LIST_HEAD(&tsi->tsi_units);
752 INIT_LIST_HEAD(&tsi->tsi_free_rpcs);
753 INIT_LIST_HEAD(&tsi->tsi_active_rpcs);
755 tsi->tsi_stopping = 0;
756 tsi->tsi_batch = tsb;
757 tsi->tsi_loop = req->tsr_loop;
758 tsi->tsi_concur = req->tsr_concur;
759 tsi->tsi_service = req->tsr_service;
760 tsi->tsi_is_client = !!(req->tsr_is_client);
761 tsi->tsi_stoptsu_onerr = !!(req->tsr_stop_onerr);
763 rc = sfw_load_test(tsi);
765 LIBCFS_FREE(tsi, sizeof(*tsi));
769 LASSERT(!sfw_batch_active(tsb));
771 if (!tsi->tsi_is_client) {
772 /* it's test server, just add it to tsb */
773 list_add_tail(&tsi->tsi_list, &tsb->bat_tests);
778 LASSERT(bk->bk_niov * SFW_ID_PER_PAGE >= (unsigned int)ndest);
779 LASSERT((unsigned int)bk->bk_len >=
780 sizeof(struct lnet_process_id_packed) * ndest);
782 sfw_unpack_addtest_req(msg);
783 memcpy(&tsi->tsi_u, &req->tsr_u, sizeof(tsi->tsi_u));
785 for (i = 0; i < ndest; i++) {
786 struct lnet_process_id_packed *dests;
787 struct lnet_process_id_packed id;
790 dests = page_address(bk->bk_iovs[i / SFW_ID_PER_PAGE].bv_page);
791 LASSERT(dests); /* my pages are within KVM always */
792 id = dests[i % SFW_ID_PER_PAGE];
793 if (msg->msg_magic != SRPC_MSG_MAGIC)
796 for (j = 0; j < tsi->tsi_concur; j++) {
797 LIBCFS_ALLOC(tsu, sizeof(struct sfw_test_unit));
800 CERROR("Can't allocate tsu for %d\n",
805 tsu->tsu_dest.nid = id.nid;
806 tsu->tsu_dest.pid = id.pid;
807 tsu->tsu_instance = tsi;
808 tsu->tsu_private = NULL;
809 list_add_tail(&tsu->tsu_list, &tsi->tsi_units);
813 rc = tsi->tsi_ops->tso_init(tsi);
815 list_add_tail(&tsi->tsi_list, &tsb->bat_tests);
821 sfw_destroy_test_instance(tsi);
826 sfw_test_unit_done(struct sfw_test_unit *tsu)
828 struct sfw_test_instance *tsi = tsu->tsu_instance;
829 struct sfw_batch *tsb = tsi->tsi_batch;
830 struct sfw_session *sn = tsb->bat_session;
832 LASSERT(sfw_test_active(tsi));
834 if (!atomic_dec_and_test(&tsi->tsi_nactive))
837 /* the test instance is done */
838 spin_lock(&tsi->tsi_lock);
840 tsi->tsi_stopping = 0;
842 spin_unlock(&tsi->tsi_lock);
844 spin_lock(&sfw_data.fw_lock);
846 if (!atomic_dec_and_test(&tsb->bat_nactive) || /* tsb still active */
847 sn == sfw_data.fw_session) { /* sn also active */
848 spin_unlock(&sfw_data.fw_lock);
852 LASSERT(!list_empty(&sn->sn_list)); /* I'm a zombie! */
854 list_for_each_entry(tsb, &sn->sn_batches, bat_list) {
855 if (sfw_batch_active(tsb)) {
856 spin_unlock(&sfw_data.fw_lock);
861 list_del_init(&sn->sn_list);
862 spin_unlock(&sfw_data.fw_lock);
864 sfw_destroy_session(sn);
868 sfw_test_rpc_done(struct srpc_client_rpc *rpc)
870 struct sfw_test_unit *tsu = rpc->crpc_priv;
871 struct sfw_test_instance *tsi = tsu->tsu_instance;
874 tsi->tsi_ops->tso_done_rpc(tsu, rpc);
876 spin_lock(&tsi->tsi_lock);
878 LASSERT(sfw_test_active(tsi));
879 LASSERT(!list_empty(&rpc->crpc_list));
881 list_del_init(&rpc->crpc_list);
883 /* batch is stopping or loop is done or get error */
884 if (tsi->tsi_stopping || !tsu->tsu_loop ||
885 (rpc->crpc_status && tsi->tsi_stoptsu_onerr))
888 /* dec ref for poster */
889 srpc_client_rpc_decref(rpc);
891 spin_unlock(&tsi->tsi_lock);
894 swi_schedule_workitem(&tsu->tsu_worker);
898 sfw_test_unit_done(tsu);
902 sfw_create_test_rpc(struct sfw_test_unit *tsu, struct lnet_process_id peer,
903 unsigned int features, int nblk, int blklen,
904 struct srpc_client_rpc **rpcpp)
906 struct srpc_client_rpc *rpc = NULL;
907 struct sfw_test_instance *tsi = tsu->tsu_instance;
909 spin_lock(&tsi->tsi_lock);
911 LASSERT(sfw_test_active(tsi));
912 /* pick request from buffer */
913 rpc = list_first_entry_or_null(&tsi->tsi_free_rpcs,
914 struct srpc_client_rpc, crpc_list);
916 LASSERT(nblk == rpc->crpc_bulk.bk_niov);
917 list_del_init(&rpc->crpc_list);
920 spin_unlock(&tsi->tsi_lock);
923 rpc = srpc_create_client_rpc(peer, tsi->tsi_service, nblk,
924 blklen, sfw_test_rpc_done,
925 sfw_test_rpc_fini, tsu);
927 srpc_init_client_rpc(rpc, peer, tsi->tsi_service, nblk,
928 blklen, sfw_test_rpc_done,
929 sfw_test_rpc_fini, tsu);
933 CERROR("Can't create rpc for test %d\n", tsi->tsi_service);
937 rpc->crpc_reqstmsg.msg_ses_feats = features;
944 sfw_run_test(struct swi_workitem *wi)
946 struct sfw_test_unit *tsu = wi->swi_workitem.wi_data;
947 struct sfw_test_instance *tsi = tsu->tsu_instance;
948 struct srpc_client_rpc *rpc = NULL;
950 LASSERT(wi == &tsu->tsu_worker);
952 if (tsi->tsi_ops->tso_prep_rpc(tsu, tsu->tsu_dest, &rpc)) {
959 spin_lock(&tsi->tsi_lock);
961 if (tsi->tsi_stopping) {
962 list_add(&rpc->crpc_list, &tsi->tsi_free_rpcs);
963 spin_unlock(&tsi->tsi_lock);
967 if (tsu->tsu_loop > 0)
970 list_add_tail(&rpc->crpc_list, &tsi->tsi_active_rpcs);
971 spin_unlock(&tsi->tsi_lock);
973 spin_lock(&rpc->crpc_lock);
974 rpc->crpc_timeout = rpc_timeout;
976 spin_unlock(&rpc->crpc_lock);
981 * No one can schedule me now since:
982 * - previous RPC, if any, has done and
983 * - no new RPC is initiated.
984 * - my batch is still active; no one can run it again now.
985 * Cancel pending schedules and prevent future schedule attempts:
987 swi_exit_workitem(wi);
988 sfw_test_unit_done(tsu);
993 sfw_run_batch(struct sfw_batch *tsb)
995 struct swi_workitem *wi;
996 struct sfw_test_unit *tsu;
997 struct sfw_test_instance *tsi;
999 if (sfw_batch_active(tsb)) {
1000 CDEBUG(D_NET, "Batch already active: %llu (%d)\n",
1001 tsb->bat_id.bat_id, atomic_read(&tsb->bat_nactive));
1005 list_for_each_entry(tsi, &tsb->bat_tests, tsi_list) {
1006 if (!tsi->tsi_is_client) /* skip server instances */
1009 LASSERT(!tsi->tsi_stopping);
1010 LASSERT(!sfw_test_active(tsi));
1012 atomic_inc(&tsb->bat_nactive);
1014 list_for_each_entry(tsu, &tsi->tsi_units, tsu_list) {
1015 atomic_inc(&tsi->tsi_nactive);
1016 tsu->tsu_loop = tsi->tsi_loop;
1017 wi = &tsu->tsu_worker;
1018 swi_init_workitem(wi, tsu, sfw_run_test,
1019 lst_sched_test[lnet_cpt_of_nid(tsu->tsu_dest.nid)]);
1020 swi_schedule_workitem(wi);
1028 sfw_stop_batch(struct sfw_batch *tsb, int force)
1030 struct sfw_test_instance *tsi;
1031 struct srpc_client_rpc *rpc;
1033 if (!sfw_batch_active(tsb)) {
1034 CDEBUG(D_NET, "Batch %llu inactive\n", tsb->bat_id.bat_id);
1038 list_for_each_entry(tsi, &tsb->bat_tests, tsi_list) {
1039 spin_lock(&tsi->tsi_lock);
1041 if (!tsi->tsi_is_client ||
1042 !sfw_test_active(tsi) || tsi->tsi_stopping) {
1043 spin_unlock(&tsi->tsi_lock);
1047 tsi->tsi_stopping = 1;
1050 spin_unlock(&tsi->tsi_lock);
1054 /* abort launched rpcs in the test */
1055 list_for_each_entry(rpc, &tsi->tsi_active_rpcs, crpc_list) {
1056 spin_lock(&rpc->crpc_lock);
1058 srpc_abort_rpc(rpc, -EINTR);
1060 spin_unlock(&rpc->crpc_lock);
1063 spin_unlock(&tsi->tsi_lock);
1070 sfw_query_batch(struct sfw_batch *tsb, int testidx,
1071 struct srpc_batch_reply *reply)
1073 struct sfw_test_instance *tsi;
1079 reply->bar_active = atomic_read(&tsb->bat_nactive);
1083 list_for_each_entry(tsi, &tsb->bat_tests, tsi_list) {
1087 reply->bar_active = atomic_read(&tsi->tsi_nactive);
1095 sfw_free_pages(struct srpc_server_rpc *rpc)
1097 srpc_free_bulk(rpc->srpc_bulk);
1098 rpc->srpc_bulk = NULL;
1102 sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len,
1105 LASSERT(!rpc->srpc_bulk);
1106 LASSERT(npages > 0 && npages <= LNET_MAX_IOV);
1108 rpc->srpc_bulk = srpc_alloc_bulk(cpt, 0, npages, len, sink);
1109 if (!rpc->srpc_bulk)
1116 sfw_add_test(struct srpc_server_rpc *rpc)
1118 struct sfw_session *sn = sfw_data.fw_session;
1119 struct srpc_test_reply *reply = &rpc->srpc_replymsg.msg_body.tes_reply;
1120 struct srpc_test_reqst *request;
1122 struct sfw_batch *bat;
1124 request = &rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst;
1125 reply->tsr_sid = !sn ? LST_INVALID_SID : sn->sn_id;
1127 if (!request->tsr_loop ||
1128 !request->tsr_concur ||
1129 request->tsr_sid.ses_nid == LNET_NID_ANY ||
1130 request->tsr_ndest > SFW_MAX_NDESTS ||
1131 (request->tsr_is_client && !request->tsr_ndest) ||
1132 request->tsr_concur > SFW_MAX_CONCUR ||
1133 request->tsr_service > SRPC_SERVICE_MAX_ID ||
1134 request->tsr_service <= SRPC_FRAMEWORK_SERVICE_MAX_ID) {
1135 reply->tsr_status = EINVAL;
1139 if (!sn || !sfw_sid_equal(request->tsr_sid, sn->sn_id) ||
1140 !sfw_find_test_case(request->tsr_service)) {
1141 reply->tsr_status = ENOENT;
1145 bat = sfw_bid2batch(request->tsr_bid);
1147 CERROR("dropping RPC %s from %s under memory pressure\n",
1148 rpc->srpc_scd->scd_svc->sv_name,
1149 libcfs_id2str(rpc->srpc_peer));
1153 if (sfw_batch_active(bat)) {
1154 reply->tsr_status = EBUSY;
1158 if (request->tsr_is_client && !rpc->srpc_bulk) {
1159 /* rpc will be resumed later in sfw_bulk_ready */
1160 int npg = sfw_id_pages(request->tsr_ndest);
1163 if (!(sn->sn_features & LST_FEAT_BULK_LEN)) {
1164 len = npg * PAGE_SIZE;
1167 len = sizeof(struct lnet_process_id_packed) *
1171 return sfw_alloc_pages(rpc, CFS_CPT_ANY, npg, len, 1);
1174 rc = sfw_add_test_instance(bat, rpc);
1175 CDEBUG(!rc ? D_NET : D_WARNING,
1176 "%s test: sv %d %s, loop %d, concur %d, ndest %d\n",
1177 !rc ? "Added" : "Failed to add", request->tsr_service,
1178 request->tsr_is_client ? "client" : "server",
1179 request->tsr_loop, request->tsr_concur, request->tsr_ndest);
1181 reply->tsr_status = (rc < 0) ? -rc : rc;
1186 sfw_control_batch(struct srpc_batch_reqst *request,
1187 struct srpc_batch_reply *reply)
1189 struct sfw_session *sn = sfw_data.fw_session;
1191 struct sfw_batch *bat;
1193 reply->bar_sid = !sn ? LST_INVALID_SID : sn->sn_id;
1195 if (!sn || !sfw_sid_equal(request->bar_sid, sn->sn_id)) {
1196 reply->bar_status = ESRCH;
1200 bat = sfw_find_batch(request->bar_bid);
1202 reply->bar_status = ENOENT;
1206 switch (request->bar_opc) {
1207 case SRPC_BATCH_OPC_RUN:
1208 rc = sfw_run_batch(bat);
1211 case SRPC_BATCH_OPC_STOP:
1212 rc = sfw_stop_batch(bat, request->bar_arg);
1215 case SRPC_BATCH_OPC_QUERY:
1216 rc = sfw_query_batch(bat, request->bar_testidx, reply);
1220 return -EINVAL; /* drop it */
1223 reply->bar_status = (rc < 0) ? -rc : rc;
1228 sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
1230 struct srpc_service *sv = rpc->srpc_scd->scd_svc;
1231 struct srpc_msg *reply = &rpc->srpc_replymsg;
1232 struct srpc_msg *request = &rpc->srpc_reqstbuf->buf_msg;
1233 unsigned int features = LST_FEATS_MASK;
1236 LASSERT(!sfw_data.fw_active_srpc);
1237 LASSERT(sv->sv_id <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
1239 spin_lock(&sfw_data.fw_lock);
1241 if (sfw_data.fw_shuttingdown) {
1242 spin_unlock(&sfw_data.fw_lock);
1246 /* Remove timer to avoid racing with it or expiring active session */
1247 if (sfw_del_session_timer()) {
1248 CERROR("dropping RPC %s from %s: racing with expiry timer\n",
1249 sv->sv_name, libcfs_id2str(rpc->srpc_peer));
1250 spin_unlock(&sfw_data.fw_lock);
1254 sfw_data.fw_active_srpc = rpc;
1255 spin_unlock(&sfw_data.fw_lock);
1257 sfw_unpack_message(request);
1258 LASSERT(request->msg_type == srpc_service2request(sv->sv_id));
1260 /* rpc module should have checked this */
1261 LASSERT(request->msg_version == SRPC_MSG_VERSION);
1263 if (sv->sv_id != SRPC_SERVICE_MAKE_SESSION &&
1264 sv->sv_id != SRPC_SERVICE_DEBUG) {
1265 struct sfw_session *sn = sfw_data.fw_session;
1268 sn->sn_features != request->msg_ses_feats) {
1269 CNETERR("Features of framework RPC don't match features of current session: %x/%x\n",
1270 request->msg_ses_feats, sn->sn_features);
1271 reply->msg_body.reply.status = EPROTO;
1272 reply->msg_body.reply.sid = sn->sn_id;
1276 } else if (request->msg_ses_feats & ~LST_FEATS_MASK) {
1278 * NB: at this point, old version will ignore features and
1279 * create new session anyway, so console should be able
1282 reply->msg_body.reply.status = EPROTO;
1286 switch (sv->sv_id) {
1289 case SRPC_SERVICE_TEST:
1290 rc = sfw_add_test(rpc);
1293 case SRPC_SERVICE_BATCH:
1294 rc = sfw_control_batch(&request->msg_body.bat_reqst,
1295 &reply->msg_body.bat_reply);
1298 case SRPC_SERVICE_QUERY_STAT:
1299 rc = sfw_get_stats(&request->msg_body.stat_reqst,
1300 &reply->msg_body.stat_reply);
1303 case SRPC_SERVICE_DEBUG:
1304 rc = sfw_debug_session(&request->msg_body.dbg_reqst,
1305 &reply->msg_body.dbg_reply);
1308 case SRPC_SERVICE_MAKE_SESSION:
1309 rc = sfw_make_session(&request->msg_body.mksn_reqst,
1310 &reply->msg_body.mksn_reply);
1313 case SRPC_SERVICE_REMOVE_SESSION:
1314 rc = sfw_remove_session(&request->msg_body.rmsn_reqst,
1315 &reply->msg_body.rmsn_reply);
1319 if (sfw_data.fw_session)
1320 features = sfw_data.fw_session->sn_features;
1322 reply->msg_ses_feats = features;
1323 rpc->srpc_done = sfw_server_rpc_done;
1324 spin_lock(&sfw_data.fw_lock);
1326 if (!sfw_data.fw_shuttingdown)
1327 sfw_add_session_timer();
1329 sfw_data.fw_active_srpc = NULL;
1330 spin_unlock(&sfw_data.fw_lock);
1335 sfw_bulk_ready(struct srpc_server_rpc *rpc, int status)
1337 struct srpc_service *sv = rpc->srpc_scd->scd_svc;
1340 LASSERT(rpc->srpc_bulk);
1341 LASSERT(sv->sv_id == SRPC_SERVICE_TEST);
1342 LASSERT(!sfw_data.fw_active_srpc);
1343 LASSERT(rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst.tsr_is_client);
1345 spin_lock(&sfw_data.fw_lock);
1348 CERROR("Bulk transfer failed for RPC: service %s, peer %s, status %d\n",
1349 sv->sv_name, libcfs_id2str(rpc->srpc_peer), status);
1350 spin_unlock(&sfw_data.fw_lock);
1354 if (sfw_data.fw_shuttingdown) {
1355 spin_unlock(&sfw_data.fw_lock);
1359 if (sfw_del_session_timer()) {
1360 CERROR("dropping RPC %s from %s: racing with expiry timer\n",
1361 sv->sv_name, libcfs_id2str(rpc->srpc_peer));
1362 spin_unlock(&sfw_data.fw_lock);
1366 sfw_data.fw_active_srpc = rpc;
1367 spin_unlock(&sfw_data.fw_lock);
1369 rc = sfw_add_test(rpc);
1371 spin_lock(&sfw_data.fw_lock);
1373 if (!sfw_data.fw_shuttingdown)
1374 sfw_add_session_timer();
1376 sfw_data.fw_active_srpc = NULL;
1377 spin_unlock(&sfw_data.fw_lock);
1381 struct srpc_client_rpc *
1382 sfw_create_rpc(struct lnet_process_id peer, int service,
1383 unsigned int features, int nbulkiov, int bulklen,
1384 void (*done)(struct srpc_client_rpc *), void *priv)
1386 struct srpc_client_rpc *rpc = NULL;
1388 spin_lock(&sfw_data.fw_lock);
1390 LASSERT(!sfw_data.fw_shuttingdown);
1391 LASSERT(service <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
1393 if (!nbulkiov && !list_empty(&sfw_data.fw_zombie_rpcs)) {
1394 rpc = list_entry(sfw_data.fw_zombie_rpcs.next,
1395 struct srpc_client_rpc, crpc_list);
1396 list_del(&rpc->crpc_list);
1398 srpc_init_client_rpc(rpc, peer, service, 0, 0,
1399 done, sfw_client_rpc_fini, priv);
1402 spin_unlock(&sfw_data.fw_lock);
1405 rpc = srpc_create_client_rpc(peer, service,
1406 nbulkiov, bulklen, done,
1408 sfw_client_rpc_fini,
1412 if (rpc) /* "session" is concept in framework */
1413 rpc->crpc_reqstmsg.msg_ses_feats = features;
1419 sfw_unpack_message(struct srpc_msg *msg)
1421 if (msg->msg_magic == SRPC_MSG_MAGIC)
1422 return; /* no flipping needed */
1424 /* srpc module should guarantee I wouldn't get crap */
1425 LASSERT(msg->msg_magic == __swab32(SRPC_MSG_MAGIC));
1427 if (msg->msg_type == SRPC_MSG_STAT_REQST) {
1428 struct srpc_stat_reqst *req = &msg->msg_body.stat_reqst;
1430 __swab32s(&req->str_type);
1431 __swab64s(&req->str_rpyid);
1432 sfw_unpack_sid(req->str_sid);
1436 if (msg->msg_type == SRPC_MSG_STAT_REPLY) {
1437 struct srpc_stat_reply *rep = &msg->msg_body.stat_reply;
1439 __swab32s(&rep->str_status);
1440 sfw_unpack_sid(rep->str_sid);
1441 sfw_unpack_fw_counters(rep->str_fw);
1442 sfw_unpack_rpc_counters(rep->str_rpc);
1443 sfw_unpack_lnet_counters(rep->str_lnet);
1447 if (msg->msg_type == SRPC_MSG_MKSN_REQST) {
1448 struct srpc_mksn_reqst *req = &msg->msg_body.mksn_reqst;
1450 __swab64s(&req->mksn_rpyid);
1451 __swab32s(&req->mksn_force);
1452 sfw_unpack_sid(req->mksn_sid);
1456 if (msg->msg_type == SRPC_MSG_MKSN_REPLY) {
1457 struct srpc_mksn_reply *rep = &msg->msg_body.mksn_reply;
1459 __swab32s(&rep->mksn_status);
1460 __swab32s(&rep->mksn_timeout);
1461 sfw_unpack_sid(rep->mksn_sid);
1465 if (msg->msg_type == SRPC_MSG_RMSN_REQST) {
1466 struct srpc_rmsn_reqst *req = &msg->msg_body.rmsn_reqst;
1468 __swab64s(&req->rmsn_rpyid);
1469 sfw_unpack_sid(req->rmsn_sid);
1473 if (msg->msg_type == SRPC_MSG_RMSN_REPLY) {
1474 struct srpc_rmsn_reply *rep = &msg->msg_body.rmsn_reply;
1476 __swab32s(&rep->rmsn_status);
1477 sfw_unpack_sid(rep->rmsn_sid);
1481 if (msg->msg_type == SRPC_MSG_DEBUG_REQST) {
1482 struct srpc_debug_reqst *req = &msg->msg_body.dbg_reqst;
1484 __swab64s(&req->dbg_rpyid);
1485 __swab32s(&req->dbg_flags);
1486 sfw_unpack_sid(req->dbg_sid);
1490 if (msg->msg_type == SRPC_MSG_DEBUG_REPLY) {
1491 struct srpc_debug_reply *rep = &msg->msg_body.dbg_reply;
1493 __swab32s(&rep->dbg_nbatch);
1494 __swab32s(&rep->dbg_timeout);
1495 sfw_unpack_sid(rep->dbg_sid);
1499 if (msg->msg_type == SRPC_MSG_BATCH_REQST) {
1500 struct srpc_batch_reqst *req = &msg->msg_body.bat_reqst;
1502 __swab32s(&req->bar_opc);
1503 __swab64s(&req->bar_rpyid);
1504 __swab32s(&req->bar_testidx);
1505 __swab32s(&req->bar_arg);
1506 sfw_unpack_sid(req->bar_sid);
1507 __swab64s(&req->bar_bid.bat_id);
1511 if (msg->msg_type == SRPC_MSG_BATCH_REPLY) {
1512 struct srpc_batch_reply *rep = &msg->msg_body.bat_reply;
1514 __swab32s(&rep->bar_status);
1515 sfw_unpack_sid(rep->bar_sid);
1519 if (msg->msg_type == SRPC_MSG_TEST_REQST) {
1520 struct srpc_test_reqst *req = &msg->msg_body.tes_reqst;
1522 __swab64s(&req->tsr_rpyid);
1523 __swab64s(&req->tsr_bulkid);
1524 __swab32s(&req->tsr_loop);
1525 __swab32s(&req->tsr_ndest);
1526 __swab32s(&req->tsr_concur);
1527 __swab32s(&req->tsr_service);
1528 sfw_unpack_sid(req->tsr_sid);
1529 __swab64s(&req->tsr_bid.bat_id);
1533 if (msg->msg_type == SRPC_MSG_TEST_REPLY) {
1534 struct srpc_test_reply *rep = &msg->msg_body.tes_reply;
1536 __swab32s(&rep->tsr_status);
1537 sfw_unpack_sid(rep->tsr_sid);
1541 if (msg->msg_type == SRPC_MSG_JOIN_REQST) {
1542 struct srpc_join_reqst *req = &msg->msg_body.join_reqst;
1544 __swab64s(&req->join_rpyid);
1545 sfw_unpack_sid(req->join_sid);
1549 if (msg->msg_type == SRPC_MSG_JOIN_REPLY) {
1550 struct srpc_join_reply *rep = &msg->msg_body.join_reply;
1552 __swab32s(&rep->join_status);
1553 __swab32s(&rep->join_timeout);
1554 sfw_unpack_sid(rep->join_sid);
1562 sfw_abort_rpc(struct srpc_client_rpc *rpc)
1564 LASSERT(atomic_read(&rpc->crpc_refcount) > 0);
1565 LASSERT(rpc->crpc_service <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
1567 spin_lock(&rpc->crpc_lock);
1568 srpc_abort_rpc(rpc, -EINTR);
1569 spin_unlock(&rpc->crpc_lock);
1573 sfw_post_rpc(struct srpc_client_rpc *rpc)
1575 spin_lock(&rpc->crpc_lock);
1577 LASSERT(!rpc->crpc_closed);
1578 LASSERT(!rpc->crpc_aborted);
1579 LASSERT(list_empty(&rpc->crpc_list));
1580 LASSERT(!sfw_data.fw_shuttingdown);
1582 rpc->crpc_timeout = rpc_timeout;
1585 spin_unlock(&rpc->crpc_lock);
1588 static struct srpc_service sfw_services[] = {
1590 /* sv_id */ SRPC_SERVICE_DEBUG,
1591 /* sv_name */ "debug",
1595 /* sv_id */ SRPC_SERVICE_QUERY_STAT,
1596 /* sv_name */ "query stats",
1600 /* sv_id */ SRPC_SERVICE_MAKE_SESSION,
1601 /* sv_name */ "make session",
1605 /* sv_id */ SRPC_SERVICE_REMOVE_SESSION,
1606 /* sv_name */ "remove session",
1610 /* sv_id */ SRPC_SERVICE_BATCH,
1611 /* sv_name */ "batch service",
1615 /* sv_id */ SRPC_SERVICE_TEST,
1616 /* sv_name */ "test service",
1632 struct srpc_service *sv;
1633 struct sfw_test_case *tsc;
1635 if (session_timeout < 0) {
1636 CERROR("Session timeout must be non-negative: %d\n",
1641 if (rpc_timeout < 0) {
1642 CERROR("RPC timeout must be non-negative: %d\n",
1647 if (!session_timeout)
1648 CWARN("Zero session_timeout specified - test sessions never expire.\n");
1651 CWARN("Zero rpc_timeout specified - test RPC never expire.\n");
1653 memset(&sfw_data, 0, sizeof(struct smoketest_framework));
1655 sfw_data.fw_session = NULL;
1656 sfw_data.fw_active_srpc = NULL;
1657 spin_lock_init(&sfw_data.fw_lock);
1658 atomic_set(&sfw_data.fw_nzombies, 0);
1659 INIT_LIST_HEAD(&sfw_data.fw_tests);
1660 INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
1661 INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
1663 brw_init_test_client();
1664 brw_init_test_service();
1665 rc = sfw_register_test(&brw_test_service, &brw_test_client);
1668 ping_init_test_client();
1669 ping_init_test_service();
1670 rc = sfw_register_test(&ping_test_service, &ping_test_client);
1674 list_for_each_entry(tsc, &sfw_data.fw_tests, tsc_list) {
1675 sv = tsc->tsc_srv_service;
1677 rc = srpc_add_service(sv);
1678 LASSERT(rc != -EBUSY);
1680 CWARN("Failed to add %s service: %d\n",
1686 for (i = 0; ; i++) {
1687 sv = &sfw_services[i];
1691 sv->sv_bulk_ready = NULL;
1692 sv->sv_handler = sfw_handle_server_rpc;
1693 sv->sv_wi_total = SFW_FRWK_WI_MAX;
1694 if (sv->sv_id == SRPC_SERVICE_TEST)
1695 sv->sv_bulk_ready = sfw_bulk_ready;
1697 rc = srpc_add_service(sv);
1698 LASSERT(rc != -EBUSY);
1700 CWARN("Failed to add %s service: %d\n",
1705 /* about to sfw_shutdown, no need to add buffer */
1709 rc = srpc_service_add_buffers(sv, sv->sv_wi_total);
1711 CWARN("Failed to reserve enough buffers: service %s, %d needed: %d\n",
1712 sv->sv_name, sv->sv_wi_total, rc);
1725 struct srpc_service *sv;
1726 struct sfw_test_case *tsc;
1729 spin_lock(&sfw_data.fw_lock);
1731 sfw_data.fw_shuttingdown = 1;
1732 lst_wait_until(!sfw_data.fw_active_srpc, sfw_data.fw_lock,
1733 "waiting for active RPC to finish.\n");
1735 if (sfw_del_session_timer())
1736 lst_wait_until(!sfw_data.fw_session, sfw_data.fw_lock,
1737 "waiting for session timer to explode.\n");
1739 sfw_deactivate_session();
1740 lst_wait_until(!atomic_read(&sfw_data.fw_nzombies),
1742 "waiting for %d zombie sessions to die.\n",
1743 atomic_read(&sfw_data.fw_nzombies));
1745 spin_unlock(&sfw_data.fw_lock);
1747 for (i = 0; ; i++) {
1748 sv = &sfw_services[i];
1752 srpc_shutdown_service(sv);
1753 srpc_remove_service(sv);
1756 list_for_each_entry(tsc, &sfw_data.fw_tests, tsc_list) {
1757 sv = tsc->tsc_srv_service;
1758 srpc_shutdown_service(sv);
1759 srpc_remove_service(sv);
1762 while (!list_empty(&sfw_data.fw_zombie_rpcs)) {
1763 struct srpc_client_rpc *rpc;
1765 rpc = list_entry(sfw_data.fw_zombie_rpcs.next,
1766 struct srpc_client_rpc, crpc_list);
1767 list_del(&rpc->crpc_list);
1769 LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
1772 for (i = 0; ; i++) {
1773 sv = &sfw_services[i];
1777 srpc_wait_service_shutdown(sv);
1780 while (!list_empty(&sfw_data.fw_tests)) {
1781 tsc = list_entry(sfw_data.fw_tests.next,
1782 struct sfw_test_case, tsc_list);
1784 srpc_wait_service_shutdown(tsc->tsc_srv_service);
1786 list_del(&tsc->tsc_list);
1787 LIBCFS_FREE(tsc, sizeof(*tsc));