1 // SPDX-License-Identifier: GPL-2.0
5 * Setup and helper functions to access QDIO.
7 * Copyright IBM Corp. 2002, 2020
10 #define KMSG_COMPONENT "zfcp"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 #include <linux/slab.h>
14 #include <linux/module.h>
16 #include "zfcp_qdio.h"
18 static bool enable_multibuffer = true;
19 module_param_named(datarouter, enable_multibuffer, bool, 0400);
20 MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)");
22 static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *dbftag,
23 unsigned int qdio_err)
25 struct zfcp_adapter *adapter = qdio->adapter;
27 dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
29 if (qdio_err & QDIO_ERROR_SLSB_STATE) {
30 zfcp_qdio_siosl(adapter);
31 zfcp_erp_adapter_shutdown(adapter, 0, dbftag);
34 zfcp_erp_adapter_reopen(adapter,
35 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
36 ZFCP_STATUS_COMMON_ERP_FAILED, dbftag);
39 static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
43 for (i = first; i < first + cnt; i++) {
44 sbal_idx = i % QDIO_MAX_BUFFERS_PER_Q;
45 memset(sbal[sbal_idx], 0, sizeof(struct qdio_buffer));
49 /* this needs to be called prior to updating the queue fill level */
50 static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
52 unsigned long long now, span;
55 now = get_tod_clock_monotonic();
56 span = (now - qdio->req_q_time) >> 12;
57 used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
58 qdio->req_q_util += used * span;
59 qdio->req_q_time = now;
62 static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
63 int queue_no, int idx, int count,
66 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
68 if (unlikely(qdio_err)) {
69 zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
73 /* cleanup all SBALs being program-owned now */
74 zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
76 spin_lock_irq(&qdio->stat_lock);
77 zfcp_qdio_account(qdio);
78 spin_unlock_irq(&qdio->stat_lock);
79 atomic_add(count, &qdio->req_q_free);
80 wake_up(&qdio->req_q_wq);
83 static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
84 int queue_no, int idx, int count,
87 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
88 struct zfcp_adapter *adapter = qdio->adapter;
89 int sbal_no, sbal_idx;
91 if (unlikely(qdio_err)) {
92 if (zfcp_adapter_multi_buffer_active(adapter)) {
93 void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
94 struct qdio_buffer_element *sbale;
99 ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
100 sbale = qdio->res_q[idx]->element;
101 req_id = sbale->addr;
102 scount = min(sbale->scount + 1,
103 ZFCP_QDIO_MAX_SBALS_PER_REQ + 1);
104 /* incl. signaling SBAL */
106 for (sbal_no = 0; sbal_no < scount; sbal_no++) {
107 sbal_idx = (idx + sbal_no) %
108 QDIO_MAX_BUFFERS_PER_Q;
109 pl[sbal_no] = qdio->res_q[sbal_idx];
111 zfcp_dbf_hba_def_err(adapter, req_id, scount, pl);
113 zfcp_qdio_handler_error(qdio, "qdires1", qdio_err);
118 * go through all SBALs from input queue currently
119 * returned by QDIO layer
121 for (sbal_no = 0; sbal_no < count; sbal_no++) {
122 sbal_idx = (idx + sbal_no) % QDIO_MAX_BUFFERS_PER_Q;
123 /* go through all SBALEs of SBAL */
124 zfcp_fsf_reqid_check(qdio, sbal_idx);
128 * put SBALs back to response queue
130 if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count))
131 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2");
134 static struct qdio_buffer_element *
135 zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
137 struct qdio_buffer_element *sbale;
139 /* set last entry flag in current SBALE of current SBAL */
140 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
141 sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
143 /* don't exceed last allowed SBAL */
144 if (q_req->sbal_last == q_req->sbal_limit)
147 /* set chaining flag in first SBALE of current SBAL */
148 sbale = zfcp_qdio_sbale_req(qdio, q_req);
149 sbale->sflags |= SBAL_SFLAGS0_MORE_SBALS;
151 /* calculate index of next SBAL */
153 q_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q;
155 /* keep this requests number of SBALs up-to-date */
156 q_req->sbal_number++;
157 BUG_ON(q_req->sbal_number > ZFCP_QDIO_MAX_SBALS_PER_REQ);
159 /* start at first SBALE of new SBAL */
160 q_req->sbale_curr = 0;
162 /* set storage-block type for new SBAL */
163 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
164 sbale->sflags |= q_req->sbtype;
169 static struct qdio_buffer_element *
170 zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
172 if (q_req->sbale_curr == qdio->max_sbale_per_sbal - 1)
173 return zfcp_qdio_sbal_chain(qdio, q_req);
175 return zfcp_qdio_sbale_curr(qdio, q_req);
179 * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
180 * @qdio: pointer to struct zfcp_qdio
181 * @q_req: pointer to struct zfcp_qdio_req
182 * @sg: scatter-gather list
183 * Returns: zero or -EINVAL on error
185 int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
186 struct scatterlist *sg)
188 struct qdio_buffer_element *sbale;
190 /* set storage-block type for this request */
191 sbale = zfcp_qdio_sbale_req(qdio, q_req);
192 sbale->sflags |= q_req->sbtype;
194 for (; sg; sg = sg_next(sg)) {
195 sbale = zfcp_qdio_sbale_next(qdio, q_req);
197 atomic_inc(&qdio->req_q_full);
198 zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
202 sbale->addr = sg_phys(sg);
203 sbale->length = sg->length;
208 static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
210 if (atomic_read(&qdio->req_q_free) ||
211 !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
217 * zfcp_qdio_sbal_get - get free sbal in request queue, wait if necessary
218 * @qdio: pointer to struct zfcp_qdio
220 * The req_q_lock must be held by the caller of this function, and
221 * this function may only be called from process context; it will
222 * sleep when waiting for a free sbal.
224 * Returns: 0 on success, -EIO if there is no free sbal after waiting.
226 int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
230 ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
231 zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
233 if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
240 atomic_inc(&qdio->req_q_full);
241 /* assume hanging outbound queue, try queue recovery */
242 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
249 * zfcp_qdio_send - send req to QDIO
250 * @qdio: pointer to struct zfcp_qdio
251 * @q_req: pointer to struct zfcp_qdio_req
252 * Returns: 0 on success, error otherwise
254 int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
257 u8 sbal_number = q_req->sbal_number;
259 spin_lock(&qdio->stat_lock);
260 zfcp_qdio_account(qdio);
261 spin_unlock(&qdio->stat_lock);
263 atomic_sub(sbal_number, &qdio->req_q_free);
265 retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0,
266 q_req->sbal_first, sbal_number);
268 if (unlikely(retval)) {
269 /* Failed to submit the IO, roll back our modifications. */
270 atomic_add(sbal_number, &qdio->req_q_free);
271 zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
276 /* account for transferred buffers */
277 qdio->req_q_idx += sbal_number;
278 qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q;
284 * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
285 * @qdio: pointer to struct zfcp_qdio
286 * Returns: -ENOMEM on memory allocation error or return value from
289 static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
293 ret = qdio_alloc_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
297 ret = qdio_alloc_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
301 init_waitqueue_head(&qdio->req_q_wq);
303 ret = qdio_allocate(qdio->adapter->ccw_device, 1, 1);
310 qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
312 qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
317 * zfcp_close_qdio - close qdio queues for an adapter
318 * @qdio: pointer to structure zfcp_qdio
320 void zfcp_qdio_close(struct zfcp_qdio *qdio)
322 struct zfcp_adapter *adapter = qdio->adapter;
325 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
328 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
329 spin_lock_irq(&qdio->req_q_lock);
330 atomic_andnot(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
331 spin_unlock_irq(&qdio->req_q_lock);
333 wake_up(&qdio->req_q_wq);
335 qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
337 /* cleanup used outbound sbals */
338 count = atomic_read(&qdio->req_q_free);
339 if (count < QDIO_MAX_BUFFERS_PER_Q) {
340 idx = (qdio->req_q_idx + count) % QDIO_MAX_BUFFERS_PER_Q;
341 count = QDIO_MAX_BUFFERS_PER_Q - count;
342 zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
345 atomic_set(&qdio->req_q_free, 0);
348 void zfcp_qdio_shost_update(struct zfcp_adapter *const adapter,
349 const struct zfcp_qdio *const qdio)
351 struct Scsi_Host *const shost = adapter->scsi_host;
356 shost->sg_tablesize = qdio->max_sbale_per_req;
357 shost->max_sectors = qdio->max_sbale_per_req * 8;
361 * zfcp_qdio_open - prepare and initialize response queue
362 * @qdio: pointer to struct zfcp_qdio
363 * Returns: 0 on success, otherwise -EIO
365 int zfcp_qdio_open(struct zfcp_qdio *qdio)
367 struct qdio_buffer **input_sbals[1] = {qdio->res_q};
368 struct qdio_buffer **output_sbals[1] = {qdio->req_q};
369 struct qdio_buffer_element *sbale;
370 struct qdio_initialize init_data = {0};
371 struct zfcp_adapter *adapter = qdio->adapter;
372 struct ccw_device *cdev = adapter->ccw_device;
373 struct qdio_ssqd_desc ssqd;
376 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
379 atomic_andnot(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
380 &qdio->adapter->status);
382 init_data.q_format = QDIO_ZFCP_QFMT;
383 init_data.qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
384 if (enable_multibuffer)
385 init_data.qdr_ac |= QDR_AC_MULTI_BUFFER_ENABLE;
386 init_data.no_input_qs = 1;
387 init_data.no_output_qs = 1;
388 init_data.input_handler = zfcp_qdio_int_resp;
389 init_data.output_handler = zfcp_qdio_int_req;
390 init_data.int_parm = (unsigned long) qdio;
391 init_data.input_sbal_addr_array = input_sbals;
392 init_data.output_sbal_addr_array = output_sbals;
393 init_data.scan_threshold =
394 QDIO_MAX_BUFFERS_PER_Q - ZFCP_QDIO_MAX_SBALS_PER_REQ * 2;
396 if (qdio_establish(cdev, &init_data))
397 goto failed_establish;
399 if (qdio_get_ssqd_desc(cdev, &ssqd))
402 if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED)
403 atomic_or(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
404 &qdio->adapter->status);
406 if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) {
407 atomic_or(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
408 qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER;
410 atomic_andnot(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
411 qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1;
414 qdio->max_sbale_per_req =
415 ZFCP_QDIO_MAX_SBALS_PER_REQ * qdio->max_sbale_per_sbal
417 if (qdio_activate(cdev))
420 for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
421 sbale = &(qdio->res_q[cc]->element[0]);
423 sbale->eflags = SBAL_EFLAGS_LAST_ENTRY;
428 if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
431 /* set index of first available SBALS / number of available SBALS */
433 atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
434 atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
436 zfcp_qdio_shost_update(adapter, qdio);
441 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
444 "Setting up the QDIO connection to the FCP adapter failed\n");
448 void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
453 if (qdio->adapter->ccw_device)
454 qdio_free(qdio->adapter->ccw_device);
456 qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
457 qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
461 int zfcp_qdio_setup(struct zfcp_adapter *adapter)
463 struct zfcp_qdio *qdio;
465 qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL);
469 qdio->adapter = adapter;
471 if (zfcp_qdio_allocate(qdio)) {
476 spin_lock_init(&qdio->req_q_lock);
477 spin_lock_init(&qdio->stat_lock);
479 adapter->qdio = qdio;
484 * zfcp_qdio_siosl - Trigger logging in FCP channel
485 * @adapter: The zfcp_adapter where to trigger logging
487 * Call the cio siosl function to trigger hardware logging. This
488 * wrapper function sets a flag to ensure hardware logging is only
489 * triggered once before going through qdio shutdown.
491 * The triggers are always run from qdio tasklet context, so no
492 * additional synchronization is necessary.
494 void zfcp_qdio_siosl(struct zfcp_adapter *adapter)
498 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_SIOSL_ISSUED)
501 rc = ccw_device_siosl(adapter->ccw_device);
503 atomic_or(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,