2 * S/390 common I/O routines -- channel subsystem call
4 * Copyright IBM Corp. 1999,2012
5 * Author(s): Ingo Adlung (adlung@de.ibm.com)
6 * Cornelia Huck (cornelia.huck@de.ibm.com)
7 * Arnd Bergmann (arndb@de.ibm.com)
10 #define KMSG_COMPONENT "cio"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/device.h>
17 #include <linux/mutex.h>
18 #include <linux/pci.h>
21 #include <asm/chpid.h>
25 #include <asm/ebcdic.h>
29 #include "cio_debug.h"
34 static void *sei_page;
35 static void *chsc_page;
36 static DEFINE_SPINLOCK(chsc_page_lock);
39 * chsc_error_from_response() - convert a chsc response to an error
40 * @response: chsc response code
42 * Returns an appropriate Linux error code for @response.
44 int chsc_error_from_response(int response)
60 case 0x0107: /* "Channel busy" for the op 0x003d */
69 EXPORT_SYMBOL_GPL(chsc_error_from_response);
71 struct chsc_ssd_area {
72 struct chsc_header request;
76 u16 f_sch; /* first subchannel */
78 u16 l_sch; /* last subchannel */
80 struct chsc_header response;
84 u8 st : 3; /* subchannel type */
86 u8 unit_addr; /* unit address */
87 u16 devno; /* device number */
90 u16 sch; /* subchannel */
91 u8 chpid[8]; /* chpids 0-7 */
92 u16 fla[8]; /* full link addresses 0-7 */
93 } __attribute__ ((packed));
95 int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
97 struct chsc_ssd_area *ssd_area;
104 spin_lock_irqsave(&chsc_page_lock, flags);
105 memset(chsc_page, 0, PAGE_SIZE);
106 ssd_area = chsc_page;
107 ssd_area->request.length = 0x0010;
108 ssd_area->request.code = 0x0004;
109 ssd_area->ssid = schid.ssid;
110 ssd_area->f_sch = schid.sch_no;
111 ssd_area->l_sch = schid.sch_no;
113 ccode = chsc(ssd_area);
114 /* Check response. */
116 ret = (ccode == 3) ? -ENODEV : -EBUSY;
119 ret = chsc_error_from_response(ssd_area->response.code);
121 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
122 schid.ssid, schid.sch_no,
123 ssd_area->response.code);
126 if (!ssd_area->sch_valid) {
132 memset(ssd, 0, sizeof(struct chsc_ssd_info));
133 if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
134 (ssd_area->st != SUBCHANNEL_TYPE_MSG))
136 ssd->path_mask = ssd_area->path_mask;
137 ssd->fla_valid_mask = ssd_area->fla_valid_mask;
138 for (i = 0; i < 8; i++) {
140 if (ssd_area->path_mask & mask) {
141 chp_id_init(&ssd->chpid[i]);
142 ssd->chpid[i].id = ssd_area->chpid[i];
144 if (ssd_area->fla_valid_mask & mask)
145 ssd->fla[i] = ssd_area->fla[i];
148 spin_unlock_irqrestore(&chsc_page_lock, flags);
153 * chsc_ssqd() - store subchannel QDIO data (SSQD)
154 * @schid: id of the subchannel on which SSQD is performed
155 * @ssqd: request and response block for SSQD
157 * Returns 0 on success.
159 int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd)
161 memset(ssqd, 0, sizeof(*ssqd));
162 ssqd->request.length = 0x0010;
163 ssqd->request.code = 0x0024;
164 ssqd->first_sch = schid.sch_no;
165 ssqd->last_sch = schid.sch_no;
166 ssqd->ssid = schid.ssid;
171 return chsc_error_from_response(ssqd->response.code);
173 EXPORT_SYMBOL_GPL(chsc_ssqd);
176 * chsc_sadc() - set adapter device controls (SADC)
177 * @schid: id of the subchannel on which SADC is performed
178 * @scssc: request and response block for SADC
179 * @summary_indicator_addr: summary indicator address
180 * @subchannel_indicator_addr: subchannel indicator address
182 * Returns 0 on success.
184 int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
185 u64 summary_indicator_addr, u64 subchannel_indicator_addr)
187 memset(scssc, 0, sizeof(*scssc));
188 scssc->request.length = 0x0fe0;
189 scssc->request.code = 0x0021;
190 scssc->operation_code = 0;
192 scssc->summary_indicator_addr = summary_indicator_addr;
193 scssc->subchannel_indicator_addr = subchannel_indicator_addr;
195 scssc->ks = PAGE_DEFAULT_KEY >> 4;
196 scssc->kc = PAGE_DEFAULT_KEY >> 4;
197 scssc->isc = QDIO_AIRQ_ISC;
198 scssc->schid = schid;
200 /* enable the time delay disablement facility */
201 if (css_general_characteristics.aif_tdd)
202 scssc->word_with_d_bit = 0x10000000;
207 return chsc_error_from_response(scssc->response.code);
209 EXPORT_SYMBOL_GPL(chsc_sadc);
211 static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
213 spin_lock_irq(sch->lock);
214 if (sch->driver && sch->driver->chp_event)
215 if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
217 spin_unlock_irq(sch->lock);
222 spin_unlock_irq(sch->lock);
223 css_schedule_eval(sch->schid);
227 void chsc_chp_offline(struct chp_id chpid)
229 struct channel_path *chp = chpid_to_chp(chpid);
230 struct chp_link link;
233 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
234 CIO_TRACE_EVENT(2, dbf_txt);
236 if (chp_get_status(chpid) <= 0)
238 memset(&link, 0, sizeof(struct chp_link));
240 /* Wait until previous actions have settled. */
241 css_wait_for_slow_path();
243 mutex_lock(&chp->lock);
244 chp_update_desc(chp);
245 mutex_unlock(&chp->lock);
247 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
250 static int __s390_process_res_acc(struct subchannel *sch, void *data)
252 spin_lock_irq(sch->lock);
253 if (sch->driver && sch->driver->chp_event)
254 sch->driver->chp_event(sch, data, CHP_ONLINE);
255 spin_unlock_irq(sch->lock);
260 static void s390_process_res_acc(struct chp_link *link)
264 sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid,
266 CIO_TRACE_EVENT( 2, dbf_txt);
267 if (link->fla != 0) {
268 sprintf(dbf_txt, "fla%x", link->fla);
269 CIO_TRACE_EVENT( 2, dbf_txt);
271 /* Wait until previous actions have settled. */
272 css_wait_for_slow_path();
274 * I/O resources may have become accessible.
275 * Scan through all subchannels that may be concerned and
276 * do a validation on those.
277 * The more information we have (info), the less scanning
278 * will we have to do.
280 for_each_subchannel_staged(__s390_process_res_acc, NULL, link);
281 css_schedule_reprobe();
284 struct chsc_sei_nt0_area {
286 u8 vf; /* validity flags */
287 u8 rs; /* reporting source */
288 u8 cc; /* content code */
289 u16 fla; /* full link address */
290 u16 rsid; /* reporting source id */
293 /* ccdf has to be big enough for a link-incident record */
294 u8 ccdf[PAGE_SIZE - 24 - 16]; /* content-code dependent field */
297 struct chsc_sei_nt2_area {
298 u8 flags; /* p and v bit */
301 u8 cc; /* content code */
303 u8 ccdf[PAGE_SIZE - 24 - 56]; /* content-code dependent field */
306 #define CHSC_SEI_NT0 (1ULL << 63)
307 #define CHSC_SEI_NT2 (1ULL << 61)
310 struct chsc_header request;
312 u64 ntsm; /* notification type mask */
313 struct chsc_header response;
317 struct chsc_sei_nt0_area nt0_area;
318 struct chsc_sei_nt2_area nt2_area;
319 u8 nt_area[PAGE_SIZE - 24];
324 * Node Descriptor as defined in SA22-7204, "Common I/O-Device Commands"
327 #define ND_VALIDITY_VALID 0
328 #define ND_VALIDITY_OUTDATED 1
329 #define ND_VALIDITY_INVALID 2
331 struct node_descriptor {
341 /* Node parameters. */
347 char manufacturer[3];
354 * Link Incident Record as defined in SA22-7202, "ESCON I/O Interface"
357 #define LIR_IQ_CLASS_INFO 0
358 #define LIR_IQ_CLASS_DEGRADED 1
359 #define LIR_IQ_CLASS_NOT_OPERATIONAL 2
370 struct node_descriptor incident_node;
371 struct node_descriptor attached_node;
375 #define PARAMS_LEN 10 /* PARAMS=xx,xxxxxx */
376 #define NODEID_LEN 35 /* NODEID=tttttt/mdl,mmm.ppssssssssssss,xxxx */
378 /* Copy EBCIDC text, convert to ASCII and optionally add delimiter. */
379 static char *store_ebcdic(char *dest, const char *src, unsigned long len,
382 memcpy(dest, src, len);
391 /* Format node ID and parameters for output in LIR log message. */
392 static void format_node_data(char *params, char *id, struct node_descriptor *nd)
394 memset(params, 0, PARAMS_LEN);
395 memset(id, 0, NODEID_LEN);
397 if (nd->validity != ND_VALIDITY_VALID) {
398 strncpy(params, "n/a", PARAMS_LEN - 1);
399 strncpy(id, "n/a", NODEID_LEN - 1);
403 /* PARAMS=xx,xxxxxx */
404 snprintf(params, PARAMS_LEN, "%02x,%06x", nd->byte0, nd->params);
405 /* NODEID=tttttt/mdl,mmm.ppssssssssssss,xxxx */
406 id = store_ebcdic(id, nd->type, sizeof(nd->type), '/');
407 id = store_ebcdic(id, nd->model, sizeof(nd->model), ',');
408 id = store_ebcdic(id, nd->manufacturer, sizeof(nd->manufacturer), '.');
409 id = store_ebcdic(id, nd->plant, sizeof(nd->plant), 0);
410 id = store_ebcdic(id, nd->seq, sizeof(nd->seq), ',');
411 sprintf(id, "%04X", nd->tag);
414 static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
416 struct lir *lir = (struct lir *) &sei_area->ccdf;
417 char iuparams[PARAMS_LEN], iunodeid[NODEID_LEN], auparams[PARAMS_LEN],
418 aunodeid[NODEID_LEN];
420 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x, iq=%02x)\n",
421 sei_area->rs, sei_area->rsid, sei_area->ccdf[0]);
423 /* Ignore NULL Link Incident Records. */
427 /* Inform user that a link requires maintenance actions because it has
428 * become degraded or not operational. Note that this log message is
429 * the primary intention behind a Link Incident Record. */
431 format_node_data(iuparams, iunodeid, &lir->incident_node);
432 format_node_data(auparams, aunodeid, &lir->attached_node);
434 switch (lir->iq.class) {
435 case LIR_IQ_CLASS_DEGRADED:
436 pr_warn("Link degraded: RS=%02x RSID=%04x IC=%02x "
437 "IUPARAMS=%s IUNODEID=%s AUPARAMS=%s AUNODEID=%s\n",
438 sei_area->rs, sei_area->rsid, lir->ic, iuparams,
439 iunodeid, auparams, aunodeid);
441 case LIR_IQ_CLASS_NOT_OPERATIONAL:
442 pr_err("Link stopped: RS=%02x RSID=%04x IC=%02x "
443 "IUPARAMS=%s IUNODEID=%s AUPARAMS=%s AUNODEID=%s\n",
444 sei_area->rs, sei_area->rsid, lir->ic, iuparams,
445 iunodeid, auparams, aunodeid);
452 static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
454 struct channel_path *chp;
455 struct chp_link link;
459 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
460 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
461 if (sei_area->rs != 4)
464 chpid.id = sei_area->rsid;
465 /* allocate a new channel path structure, if needed */
466 status = chp_get_status(chpid);
473 chp = chpid_to_chp(chpid);
474 mutex_lock(&chp->lock);
475 chp_update_desc(chp);
476 mutex_unlock(&chp->lock);
478 memset(&link, 0, sizeof(struct chp_link));
480 if ((sei_area->vf & 0xc0) != 0) {
481 link.fla = sei_area->fla;
482 if ((sei_area->vf & 0xc0) == 0xc0)
483 /* full link address */
484 link.fla_mask = 0xffff;
487 link.fla_mask = 0xff00;
489 s390_process_res_acc(&link);
492 static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area)
494 struct channel_path *chp;
499 CIO_CRW_EVENT(4, "chsc: channel path availability information\n");
500 if (sei_area->rs != 0)
502 data = sei_area->ccdf;
504 for (num = 0; num <= __MAX_CHPID; num++) {
505 if (!chp_test_bit(data, num))
509 CIO_CRW_EVENT(4, "Update information for channel path "
510 "%x.%02x\n", chpid.cssid, chpid.id);
511 chp = chpid_to_chp(chpid);
516 mutex_lock(&chp->lock);
517 chp_update_desc(chp);
518 mutex_unlock(&chp->lock);
522 struct chp_config_data {
528 static void chsc_process_sei_chp_config(struct chsc_sei_nt0_area *sei_area)
530 struct chp_config_data *data;
533 char *events[3] = {"configure", "deconfigure", "cancel deconfigure"};
535 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
536 if (sei_area->rs != 0)
538 data = (struct chp_config_data *) &(sei_area->ccdf);
540 for (num = 0; num <= __MAX_CHPID; num++) {
541 if (!chp_test_bit(data->map, num))
544 pr_notice("Processing %s for channel path %x.%02x\n",
545 events[data->op], chpid.cssid, chpid.id);
548 chp_cfg_schedule(chpid, 1);
551 chp_cfg_schedule(chpid, 0);
554 chp_cfg_cancel_deconfigure(chpid);
560 static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area)
564 CIO_CRW_EVENT(4, "chsc: scm change notification\n");
565 if (sei_area->rs != 7)
568 ret = scm_update_information();
570 CIO_CRW_EVENT(0, "chsc: updating change notification"
571 " failed (rc=%d).\n", ret);
574 static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area)
578 CIO_CRW_EVENT(4, "chsc: scm available information\n");
579 if (sei_area->rs != 7)
582 ret = scm_process_availability_information();
584 CIO_CRW_EVENT(0, "chsc: process availability information"
585 " failed (rc=%d).\n", ret);
588 static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
590 switch (sei_area->cc) {
592 zpci_event_error(sei_area->ccdf);
595 zpci_event_availability(sei_area->ccdf);
598 CIO_CRW_EVENT(2, "chsc: sei nt2 unhandled cc=%d\n",
604 static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
606 /* which kind of information was stored? */
607 switch (sei_area->cc) {
608 case 1: /* link incident*/
609 chsc_process_sei_link_incident(sei_area);
611 case 2: /* i/o resource accessibility */
612 chsc_process_sei_res_acc(sei_area);
614 case 7: /* channel-path-availability information */
615 chsc_process_sei_chp_avail(sei_area);
617 case 8: /* channel-path-configuration notification */
618 chsc_process_sei_chp_config(sei_area);
620 case 12: /* scm change notification */
621 chsc_process_sei_scm_change(sei_area);
623 case 14: /* scm available notification */
624 chsc_process_sei_scm_avail(sei_area);
626 default: /* other stuff */
627 CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n",
632 /* Check if we might have lost some information. */
633 if (sei_area->flags & 0x40) {
634 CIO_CRW_EVENT(2, "chsc: event overflow\n");
635 css_schedule_eval_all();
639 static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
641 static int ntsm_unsupported;
644 memset(sei, 0, sizeof(*sei));
645 sei->request.length = 0x0010;
646 sei->request.code = 0x000e;
647 if (!ntsm_unsupported)
653 if (sei->response.code != 0x0001) {
654 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x, ntsm=%llx)\n",
655 sei->response.code, sei->ntsm);
657 if (sei->response.code == 3 && sei->ntsm) {
658 /* Fallback for old firmware. */
659 ntsm_unsupported = 1;
665 CIO_CRW_EVENT(2, "chsc: sei successful (nt=%d)\n", sei->nt);
668 chsc_process_sei_nt0(&sei->u.nt0_area);
671 chsc_process_sei_nt2(&sei->u.nt2_area);
674 CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt);
678 if (!(sei->u.nt0_area.flags & 0x80))
684 * Handle channel subsystem related CRWs.
685 * Use store event information to find out what's going on.
687 * Note: Access to sei_page is serialized through machine check handler
688 * thread, so no need for locking.
690 static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
692 struct chsc_sei *sei = sei_page;
695 css_schedule_eval_all();
698 CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
699 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
700 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
701 crw0->erc, crw0->rsid);
703 CIO_TRACE_EVENT(2, "prcss");
704 chsc_process_event_information(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2);
707 void chsc_chp_online(struct chp_id chpid)
709 struct channel_path *chp = chpid_to_chp(chpid);
710 struct chp_link link;
713 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
714 CIO_TRACE_EVENT(2, dbf_txt);
716 if (chp_get_status(chpid) != 0) {
717 memset(&link, 0, sizeof(struct chp_link));
719 /* Wait until previous actions have settled. */
720 css_wait_for_slow_path();
722 mutex_lock(&chp->lock);
723 chp_update_desc(chp);
724 mutex_unlock(&chp->lock);
726 for_each_subchannel_staged(__s390_process_res_acc, NULL,
728 css_schedule_reprobe();
732 static void __s390_subchannel_vary_chpid(struct subchannel *sch,
733 struct chp_id chpid, int on)
736 struct chp_link link;
738 memset(&link, 0, sizeof(struct chp_link));
740 spin_lock_irqsave(sch->lock, flags);
741 if (sch->driver && sch->driver->chp_event)
742 sch->driver->chp_event(sch, &link,
743 on ? CHP_VARY_ON : CHP_VARY_OFF);
744 spin_unlock_irqrestore(sch->lock, flags);
747 static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
749 struct chp_id *chpid = data;
751 __s390_subchannel_vary_chpid(sch, *chpid, 0);
755 static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
757 struct chp_id *chpid = data;
759 __s390_subchannel_vary_chpid(sch, *chpid, 1);
764 * chsc_chp_vary - propagate channel-path vary operation to subchannels
765 * @chpid: channl-path ID
766 * @on: non-zero for vary online, zero for vary offline
768 int chsc_chp_vary(struct chp_id chpid, int on)
770 struct channel_path *chp = chpid_to_chp(chpid);
773 * Redo PathVerification on the devices the chpid connects to
776 /* Try to update the channel path description. */
777 chp_update_desc(chp);
778 for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
780 css_schedule_reprobe();
782 for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
789 chsc_remove_cmg_attr(struct channel_subsystem *css)
793 for (i = 0; i <= __MAX_CHPID; i++) {
796 chp_remove_cmg_attr(css->chps[i]);
801 chsc_add_cmg_attr(struct channel_subsystem *css)
806 for (i = 0; i <= __MAX_CHPID; i++) {
809 ret = chp_add_cmg_attr(css->chps[i]);
815 for (--i; i >= 0; i--) {
818 chp_remove_cmg_attr(css->chps[i]);
823 int __chsc_do_secm(struct channel_subsystem *css, int enable)
826 struct chsc_header request;
827 u32 operation_code : 2;
836 struct chsc_header response;
841 } __attribute__ ((packed)) *secm_area;
845 spin_lock_irqsave(&chsc_page_lock, flags);
846 memset(chsc_page, 0, PAGE_SIZE);
847 secm_area = chsc_page;
848 secm_area->request.length = 0x0050;
849 secm_area->request.code = 0x0016;
851 secm_area->key = PAGE_DEFAULT_KEY >> 4;
852 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
853 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
855 secm_area->operation_code = enable ? 0 : 1;
857 ccode = chsc(secm_area);
859 ret = (ccode == 3) ? -ENODEV : -EBUSY;
863 switch (secm_area->response.code) {
869 ret = chsc_error_from_response(secm_area->response.code);
872 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
873 secm_area->response.code);
875 spin_unlock_irqrestore(&chsc_page_lock, flags);
880 chsc_secm(struct channel_subsystem *css, int enable)
884 if (enable && !css->cm_enabled) {
885 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
886 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
887 if (!css->cub_addr1 || !css->cub_addr2) {
888 free_page((unsigned long)css->cub_addr1);
889 free_page((unsigned long)css->cub_addr2);
893 ret = __chsc_do_secm(css, enable);
895 css->cm_enabled = enable;
896 if (css->cm_enabled) {
897 ret = chsc_add_cmg_attr(css);
899 __chsc_do_secm(css, 0);
903 chsc_remove_cmg_attr(css);
905 if (!css->cm_enabled) {
906 free_page((unsigned long)css->cub_addr1);
907 free_page((unsigned long)css->cub_addr2);
912 int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
913 int c, int m, void *page)
915 struct chsc_scpd *scpd_area;
918 if ((rfmt == 1) && !css_general_characteristics.fcs)
920 if ((rfmt == 2) && !css_general_characteristics.cib)
923 memset(page, 0, PAGE_SIZE);
925 scpd_area->request.length = 0x0010;
926 scpd_area->request.code = 0x0002;
927 scpd_area->cssid = chpid.cssid;
928 scpd_area->first_chpid = chpid.id;
929 scpd_area->last_chpid = chpid.id;
932 scpd_area->fmt = fmt;
933 scpd_area->rfmt = rfmt;
935 ccode = chsc(scpd_area);
937 return (ccode == 3) ? -ENODEV : -EBUSY;
939 ret = chsc_error_from_response(scpd_area->response.code);
941 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
942 scpd_area->response.code);
945 EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);
947 int chsc_determine_base_channel_path_desc(struct chp_id chpid,
948 struct channel_path_desc *desc)
950 struct chsc_response_struct *chsc_resp;
951 struct chsc_scpd *scpd_area;
955 spin_lock_irqsave(&chsc_page_lock, flags);
956 scpd_area = chsc_page;
957 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area);
960 chsc_resp = (void *)&scpd_area->response;
961 memcpy(desc, &chsc_resp->data, sizeof(*desc));
963 spin_unlock_irqrestore(&chsc_page_lock, flags);
967 int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
968 struct channel_path_desc_fmt1 *desc)
970 struct chsc_response_struct *chsc_resp;
971 struct chsc_scpd *scpd_area;
975 spin_lock_irqsave(&chsc_page_lock, flags);
976 scpd_area = chsc_page;
977 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area);
980 chsc_resp = (void *)&scpd_area->response;
981 memcpy(desc, &chsc_resp->data, sizeof(*desc));
983 spin_unlock_irqrestore(&chsc_page_lock, flags);
988 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
989 struct cmg_chars *chars)
993 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
994 mask = 0x80 >> (i + 3);
996 chp->cmg_chars.values[i] = chars->values[i];
998 chp->cmg_chars.values[i] = 0;
1002 int chsc_get_channel_measurement_chars(struct channel_path *chp)
1004 unsigned long flags;
1008 struct chsc_header request;
1010 u32 first_chpid : 8;
1014 struct chsc_header response;
1025 u32 data[NR_MEASUREMENT_CHARS];
1026 } __attribute__ ((packed)) *scmc_area;
1031 if (!css_chsc_characteristics.scmc || !css_chsc_characteristics.secm)
1034 spin_lock_irqsave(&chsc_page_lock, flags);
1035 memset(chsc_page, 0, PAGE_SIZE);
1036 scmc_area = chsc_page;
1037 scmc_area->request.length = 0x0010;
1038 scmc_area->request.code = 0x0022;
1039 scmc_area->first_chpid = chp->chpid.id;
1040 scmc_area->last_chpid = chp->chpid.id;
1042 ccode = chsc(scmc_area);
1044 ret = (ccode == 3) ? -ENODEV : -EBUSY;
1048 ret = chsc_error_from_response(scmc_area->response.code);
1050 CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
1051 scmc_area->response.code);
1054 if (scmc_area->not_valid)
1057 chp->cmg = scmc_area->cmg;
1058 chp->shared = scmc_area->shared;
1059 if (chp->cmg != 2 && chp->cmg != 3) {
1060 /* No cmg-dependent data. */
1063 chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
1064 (struct cmg_chars *) &scmc_area->data);
1066 spin_unlock_irqrestore(&chsc_page_lock, flags);
1070 int __init chsc_init(void)
1074 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1075 chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1076 if (!sei_page || !chsc_page) {
1080 ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw);
1085 free_page((unsigned long)chsc_page);
1086 free_page((unsigned long)sei_page);
1090 void __init chsc_init_cleanup(void)
1092 crw_unregister_handler(CRW_RSC_CSS);
1093 free_page((unsigned long)chsc_page);
1094 free_page((unsigned long)sei_page);
1097 int __chsc_enable_facility(struct chsc_sda_area *sda_area, int operation_code)
1101 sda_area->request.length = 0x0400;
1102 sda_area->request.code = 0x0031;
1103 sda_area->operation_code = operation_code;
1105 ret = chsc(sda_area);
1107 ret = (ret == 3) ? -ENODEV : -EBUSY;
1111 switch (sda_area->response.code) {
1116 ret = chsc_error_from_response(sda_area->response.code);
1122 int chsc_enable_facility(int operation_code)
1124 struct chsc_sda_area *sda_area;
1125 unsigned long flags;
1128 spin_lock_irqsave(&chsc_page_lock, flags);
1129 memset(chsc_page, 0, PAGE_SIZE);
1130 sda_area = chsc_page;
1132 ret = __chsc_enable_facility(sda_area, operation_code);
1134 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
1135 operation_code, sda_area->response.code);
1137 spin_unlock_irqrestore(&chsc_page_lock, flags);
1141 struct css_general_char css_general_characteristics;
1142 struct css_chsc_char css_chsc_characteristics;
1145 chsc_determine_css_characteristics(void)
1147 unsigned long flags;
1150 struct chsc_header request;
1154 struct chsc_header response;
1156 u32 general_char[510];
1158 } __attribute__ ((packed)) *scsc_area;
1160 spin_lock_irqsave(&chsc_page_lock, flags);
1161 memset(chsc_page, 0, PAGE_SIZE);
1162 scsc_area = chsc_page;
1163 scsc_area->request.length = 0x0010;
1164 scsc_area->request.code = 0x0010;
1166 result = chsc(scsc_area);
1168 result = (result == 3) ? -ENODEV : -EBUSY;
1172 result = chsc_error_from_response(scsc_area->response.code);
1174 memcpy(&css_general_characteristics, scsc_area->general_char,
1175 sizeof(css_general_characteristics));
1176 memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
1177 sizeof(css_chsc_characteristics));
1179 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
1180 scsc_area->response.code);
1182 spin_unlock_irqrestore(&chsc_page_lock, flags);
1186 EXPORT_SYMBOL_GPL(css_general_characteristics);
1187 EXPORT_SYMBOL_GPL(css_chsc_characteristics);
1189 int chsc_sstpc(void *page, unsigned int op, u16 ctrl)
1192 struct chsc_header request;
1194 unsigned int op : 8;
1195 unsigned int rsvd1 : 8;
1196 unsigned int ctrl : 16;
1197 unsigned int rsvd2[5];
1198 struct chsc_header response;
1199 unsigned int rsvd3[7];
1200 } __attribute__ ((packed)) *rr;
1203 memset(page, 0, PAGE_SIZE);
1205 rr->request.length = 0x0020;
1206 rr->request.code = 0x0033;
1212 rc = (rr->response.code == 0x0001) ? 0 : -EIO;
1216 int chsc_sstpi(void *page, void *result, size_t size)
1219 struct chsc_header request;
1220 unsigned int rsvd0[3];
1221 struct chsc_header response;
1223 } __attribute__ ((packed)) *rr;
1226 memset(page, 0, PAGE_SIZE);
1228 rr->request.length = 0x0010;
1229 rr->request.code = 0x0038;
1233 memcpy(result, &rr->data, size);
1234 return (rr->response.code == 0x0001) ? 0 : -EIO;
1237 int chsc_siosl(struct subchannel_id schid)
1240 struct chsc_header request;
1242 struct subchannel_id sid;
1244 struct chsc_header response;
1246 } __attribute__ ((packed)) *siosl_area;
1247 unsigned long flags;
1251 spin_lock_irqsave(&chsc_page_lock, flags);
1252 memset(chsc_page, 0, PAGE_SIZE);
1253 siosl_area = chsc_page;
1254 siosl_area->request.length = 0x0010;
1255 siosl_area->request.code = 0x0046;
1256 siosl_area->word1 = 0x80000000;
1257 siosl_area->sid = schid;
1259 ccode = chsc(siosl_area);
1265 CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n",
1266 schid.ssid, schid.sch_no, ccode);
1269 rc = chsc_error_from_response(siosl_area->response.code);
1271 CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n",
1272 schid.ssid, schid.sch_no,
1273 siosl_area->response.code);
1275 CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n",
1276 schid.ssid, schid.sch_no);
1278 spin_unlock_irqrestore(&chsc_page_lock, flags);
1281 EXPORT_SYMBOL_GPL(chsc_siosl);
1284 * chsc_scm_info() - store SCM information (SSI)
1285 * @scm_area: request and response block for SSI
1286 * @token: continuation token
1288 * Returns 0 on success.
1290 int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token)
1294 memset(scm_area, 0, sizeof(*scm_area));
1295 scm_area->request.length = 0x0020;
1296 scm_area->request.code = 0x004C;
1297 scm_area->reqtok = token;
1299 ccode = chsc(scm_area);
1301 ret = (ccode == 3) ? -ENODEV : -EBUSY;
1304 ret = chsc_error_from_response(scm_area->response.code);
1306 CIO_MSG_EVENT(2, "chsc: scm info failed (rc=%04x)\n",
1307 scm_area->response.code);
1311 EXPORT_SYMBOL_GPL(chsc_scm_info);
1314 * chsc_pnso_brinfo() - Perform Network-Subchannel Operation, Bridge Info.
1315 * @schid: id of the subchannel on which PNSO is performed
1316 * @brinfo_area: request and response block for the operation
1317 * @resume_token: resume token for multiblock response
1318 * @cnc: Boolean change-notification control
1320 * brinfo_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL)
1322 * Returns 0 on success.
1324 int chsc_pnso_brinfo(struct subchannel_id schid,
1325 struct chsc_pnso_area *brinfo_area,
1326 struct chsc_brinfo_resume_token resume_token,
1329 memset(brinfo_area, 0, sizeof(*brinfo_area));
1330 brinfo_area->request.length = 0x0030;
1331 brinfo_area->request.code = 0x003d; /* network-subchannel operation */
1332 brinfo_area->m = schid.m;
1333 brinfo_area->ssid = schid.ssid;
1334 brinfo_area->sch = schid.sch_no;
1335 brinfo_area->cssid = schid.cssid;
1336 brinfo_area->oc = 0; /* Store-network-bridging-information list */
1337 brinfo_area->resume_token = resume_token;
1338 brinfo_area->n = (cnc != 0);
1339 if (chsc(brinfo_area))
1341 return chsc_error_from_response(brinfo_area->response.code);
1343 EXPORT_SYMBOL_GPL(chsc_pnso_brinfo);