2 * libcxgbi.c: Chelsio common library for T3/T4 iSCSI driver.
4 * Copyright (c) 2010-2015 Chelsio Communications, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
10 * Written by: Karen Xie (kxie@chelsio.com)
11 * Written by: Rakesh Ranjan (rranjan@chelsio.com)
14 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
16 #include <linux/skbuff.h>
17 #include <linux/crypto.h>
18 #include <linux/scatterlist.h>
19 #include <linux/pci.h>
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_cmnd.h>
22 #include <scsi/scsi_host.h>
23 #include <linux/if_vlan.h>
24 #include <linux/inet.h>
26 #include <net/route.h>
28 #include <net/ip6_route.h>
29 #include <net/addrconf.h>
31 #include <linux/inetdevice.h> /* ip_dev_find */
32 #include <linux/module.h>
35 static unsigned int dbg_level;
39 #define DRV_MODULE_NAME "libcxgbi"
40 #define DRV_MODULE_DESC "Chelsio iSCSI driver library"
41 #define DRV_MODULE_VERSION "0.9.1-ko"
42 #define DRV_MODULE_RELDATE "Apr. 2015"
44 static char version[] =
45 DRV_MODULE_DESC " " DRV_MODULE_NAME
46 " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
48 MODULE_AUTHOR("Chelsio Communications, Inc.");
49 MODULE_DESCRIPTION(DRV_MODULE_DESC);
50 MODULE_VERSION(DRV_MODULE_VERSION);
51 MODULE_LICENSE("GPL");
53 module_param(dbg_level, uint, 0644);
54 MODULE_PARM_DESC(dbg_level, "libiscsi debug level (default=0)");
58 * cxgbi device management
59 * maintains a list of the cxgbi devices
61 static LIST_HEAD(cdev_list);
62 static DEFINE_MUTEX(cdev_mutex);
64 static LIST_HEAD(cdev_rcu_list);
65 static DEFINE_SPINLOCK(cdev_rcu_lock);
67 static inline void cxgbi_decode_sw_tag(u32 sw_tag, int *idx, int *age)
70 *age = sw_tag & 0x7FFF;
72 *idx = (sw_tag >> 16) & 0x7FFF;
75 int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base,
76 unsigned int max_conn)
78 struct cxgbi_ports_map *pmap = &cdev->pmap;
80 pmap->port_csk = cxgbi_alloc_big_mem(max_conn *
81 sizeof(struct cxgbi_sock *),
83 if (!pmap->port_csk) {
84 pr_warn("cdev 0x%p, portmap OOM %u.\n", cdev, max_conn);
88 pmap->max_connect = max_conn;
89 pmap->sport_base = base;
90 spin_lock_init(&pmap->lock);
93 EXPORT_SYMBOL_GPL(cxgbi_device_portmap_create);
95 void cxgbi_device_portmap_cleanup(struct cxgbi_device *cdev)
97 struct cxgbi_ports_map *pmap = &cdev->pmap;
98 struct cxgbi_sock *csk;
101 for (i = 0; i < pmap->max_connect; i++) {
102 if (pmap->port_csk[i]) {
103 csk = pmap->port_csk[i];
104 pmap->port_csk[i] = NULL;
105 log_debug(1 << CXGBI_DBG_SOCK,
106 "csk 0x%p, cdev 0x%p, offload down.\n",
108 spin_lock_bh(&csk->lock);
109 cxgbi_sock_set_flag(csk, CTPF_OFFLOAD_DOWN);
110 cxgbi_sock_closed(csk);
111 spin_unlock_bh(&csk->lock);
116 EXPORT_SYMBOL_GPL(cxgbi_device_portmap_cleanup);
118 static inline void cxgbi_device_destroy(struct cxgbi_device *cdev)
120 log_debug(1 << CXGBI_DBG_DEV,
121 "cdev 0x%p, p# %u.\n", cdev, cdev->nports);
122 cxgbi_hbas_remove(cdev);
123 cxgbi_device_portmap_cleanup(cdev);
125 cxgbi_ppm_release(cdev->cdev2ppm(cdev));
126 if (cdev->pmap.max_connect)
127 cxgbi_free_big_mem(cdev->pmap.port_csk);
131 struct cxgbi_device *cxgbi_device_register(unsigned int extra,
134 struct cxgbi_device *cdev;
136 cdev = kzalloc(sizeof(*cdev) + extra + nports *
137 (sizeof(struct cxgbi_hba *) +
138 sizeof(struct net_device *)),
141 pr_warn("nport %d, OOM.\n", nports);
144 cdev->ports = (struct net_device **)(cdev + 1);
145 cdev->hbas = (struct cxgbi_hba **)(((char*)cdev->ports) + nports *
146 sizeof(struct net_device *));
148 cdev->dd_data = ((char *)cdev->hbas) +
149 nports * sizeof(struct cxgbi_hba *);
150 spin_lock_init(&cdev->pmap.lock);
152 mutex_lock(&cdev_mutex);
153 list_add_tail(&cdev->list_head, &cdev_list);
154 mutex_unlock(&cdev_mutex);
156 spin_lock(&cdev_rcu_lock);
157 list_add_tail_rcu(&cdev->rcu_node, &cdev_rcu_list);
158 spin_unlock(&cdev_rcu_lock);
160 log_debug(1 << CXGBI_DBG_DEV,
161 "cdev 0x%p, p# %u.\n", cdev, nports);
164 EXPORT_SYMBOL_GPL(cxgbi_device_register);
166 void cxgbi_device_unregister(struct cxgbi_device *cdev)
168 log_debug(1 << CXGBI_DBG_DEV,
169 "cdev 0x%p, p# %u,%s.\n",
170 cdev, cdev->nports, cdev->nports ? cdev->ports[0]->name : "");
172 mutex_lock(&cdev_mutex);
173 list_del(&cdev->list_head);
174 mutex_unlock(&cdev_mutex);
176 spin_lock(&cdev_rcu_lock);
177 list_del_rcu(&cdev->rcu_node);
178 spin_unlock(&cdev_rcu_lock);
181 cxgbi_device_destroy(cdev);
183 EXPORT_SYMBOL_GPL(cxgbi_device_unregister);
185 void cxgbi_device_unregister_all(unsigned int flag)
187 struct cxgbi_device *cdev, *tmp;
189 mutex_lock(&cdev_mutex);
190 list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
191 if ((cdev->flags & flag) == flag) {
192 mutex_unlock(&cdev_mutex);
193 cxgbi_device_unregister(cdev);
194 mutex_lock(&cdev_mutex);
197 mutex_unlock(&cdev_mutex);
199 EXPORT_SYMBOL_GPL(cxgbi_device_unregister_all);
201 struct cxgbi_device *cxgbi_device_find_by_lldev(void *lldev)
203 struct cxgbi_device *cdev, *tmp;
205 mutex_lock(&cdev_mutex);
206 list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
207 if (cdev->lldev == lldev) {
208 mutex_unlock(&cdev_mutex);
212 mutex_unlock(&cdev_mutex);
214 log_debug(1 << CXGBI_DBG_DEV,
215 "lldev 0x%p, NO match found.\n", lldev);
218 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_lldev);
220 struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev,
223 struct net_device *vdev = NULL;
224 struct cxgbi_device *cdev, *tmp;
227 if (ndev->priv_flags & IFF_802_1Q_VLAN) {
229 ndev = vlan_dev_real_dev(ndev);
230 log_debug(1 << CXGBI_DBG_DEV,
231 "vlan dev %s -> %s.\n", vdev->name, ndev->name);
234 mutex_lock(&cdev_mutex);
235 list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
236 for (i = 0; i < cdev->nports; i++) {
237 if (ndev == cdev->ports[i]) {
238 cdev->hbas[i]->vdev = vdev;
239 mutex_unlock(&cdev_mutex);
246 mutex_unlock(&cdev_mutex);
247 log_debug(1 << CXGBI_DBG_DEV,
248 "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name);
251 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev);
253 struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *ndev,
256 struct net_device *vdev = NULL;
257 struct cxgbi_device *cdev;
260 if (ndev->priv_flags & IFF_802_1Q_VLAN) {
262 ndev = vlan_dev_real_dev(ndev);
263 pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name);
267 list_for_each_entry_rcu(cdev, &cdev_rcu_list, rcu_node) {
268 for (i = 0; i < cdev->nports; i++) {
269 if (ndev == cdev->ports[i]) {
270 cdev->hbas[i]->vdev = vdev;
280 log_debug(1 << CXGBI_DBG_DEV,
281 "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name);
284 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev_rcu);
286 #if IS_ENABLED(CONFIG_IPV6)
287 static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev,
290 struct net_device *vdev = NULL;
291 struct cxgbi_device *cdev, *tmp;
294 if (ndev->priv_flags & IFF_802_1Q_VLAN) {
296 ndev = vlan_dev_real_dev(ndev);
297 pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name);
300 mutex_lock(&cdev_mutex);
301 list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
302 for (i = 0; i < cdev->nports; i++) {
303 if (!memcmp(ndev->dev_addr, cdev->ports[i]->dev_addr,
305 cdev->hbas[i]->vdev = vdev;
306 mutex_unlock(&cdev_mutex);
313 mutex_unlock(&cdev_mutex);
314 log_debug(1 << CXGBI_DBG_DEV,
315 "ndev 0x%p, %s, NO match mac found.\n",
321 void cxgbi_hbas_remove(struct cxgbi_device *cdev)
324 struct cxgbi_hba *chba;
326 log_debug(1 << CXGBI_DBG_DEV,
327 "cdev 0x%p, p#%u.\n", cdev, cdev->nports);
329 for (i = 0; i < cdev->nports; i++) {
330 chba = cdev->hbas[i];
332 cdev->hbas[i] = NULL;
333 iscsi_host_remove(chba->shost);
334 pci_dev_put(cdev->pdev);
335 iscsi_host_free(chba->shost);
339 EXPORT_SYMBOL_GPL(cxgbi_hbas_remove);
341 int cxgbi_hbas_add(struct cxgbi_device *cdev, u64 max_lun,
342 unsigned int max_id, struct scsi_host_template *sht,
343 struct scsi_transport_template *stt)
345 struct cxgbi_hba *chba;
346 struct Scsi_Host *shost;
349 log_debug(1 << CXGBI_DBG_DEV, "cdev 0x%p, p#%u.\n", cdev, cdev->nports);
351 for (i = 0; i < cdev->nports; i++) {
352 shost = iscsi_host_alloc(sht, sizeof(*chba), 1);
354 pr_info("0x%p, p%d, %s, host alloc failed.\n",
355 cdev, i, cdev->ports[i]->name);
360 shost->transportt = stt;
361 shost->max_lun = max_lun;
362 shost->max_id = max_id;
363 shost->max_channel = 0;
364 shost->max_cmd_len = 16;
366 chba = iscsi_host_priv(shost);
368 chba->ndev = cdev->ports[i];
371 log_debug(1 << CXGBI_DBG_DEV,
372 "cdev 0x%p, p#%d %s: chba 0x%p.\n",
373 cdev, i, cdev->ports[i]->name, chba);
375 pci_dev_get(cdev->pdev);
376 err = iscsi_host_add(shost, &cdev->pdev->dev);
378 pr_info("cdev 0x%p, p#%d %s, host add failed.\n",
379 cdev, i, cdev->ports[i]->name);
380 pci_dev_put(cdev->pdev);
381 scsi_host_put(shost);
385 cdev->hbas[i] = chba;
391 cxgbi_hbas_remove(cdev);
394 EXPORT_SYMBOL_GPL(cxgbi_hbas_add);
399 * - source port management
400 * To find a free source port in the port allocation map we use a very simple
401 * rotor scheme to look for the next free port.
403 * If a source port has been specified make sure that it doesn't collide with
404 * our normal source port allocation map. If it's outside the range of our
405 * allocation/deallocation scheme just let them use it.
407 * If the source port is outside our allocation range, the caller is
408 * responsible for keeping track of their port usage.
411 static struct cxgbi_sock *find_sock_on_port(struct cxgbi_device *cdev,
412 unsigned char port_id)
414 struct cxgbi_ports_map *pmap = &cdev->pmap;
418 if (!pmap->max_connect || !pmap->used)
421 spin_lock_bh(&pmap->lock);
423 for (i = 0; used && i < pmap->max_connect; i++) {
424 struct cxgbi_sock *csk = pmap->port_csk[i];
427 if (csk->port_id == port_id) {
428 spin_unlock_bh(&pmap->lock);
434 spin_unlock_bh(&pmap->lock);
439 static int sock_get_port(struct cxgbi_sock *csk)
441 struct cxgbi_device *cdev = csk->cdev;
442 struct cxgbi_ports_map *pmap = &cdev->pmap;
447 if (!pmap->max_connect) {
448 pr_err("cdev 0x%p, p#%u %s, NO port map.\n",
449 cdev, csk->port_id, cdev->ports[csk->port_id]->name);
450 return -EADDRNOTAVAIL;
453 if (csk->csk_family == AF_INET)
454 port = &csk->saddr.sin_port;
456 port = &csk->saddr6.sin6_port;
459 pr_err("source port NON-ZERO %u.\n",
464 spin_lock_bh(&pmap->lock);
465 if (pmap->used >= pmap->max_connect) {
466 spin_unlock_bh(&pmap->lock);
467 pr_info("cdev 0x%p, p#%u %s, ALL ports used.\n",
468 cdev, csk->port_id, cdev->ports[csk->port_id]->name);
469 return -EADDRNOTAVAIL;
472 start = idx = pmap->next;
474 if (++idx >= pmap->max_connect)
476 if (!pmap->port_csk[idx]) {
478 *port = htons(pmap->sport_base + idx);
480 pmap->port_csk[idx] = csk;
481 spin_unlock_bh(&pmap->lock);
483 log_debug(1 << CXGBI_DBG_SOCK,
484 "cdev 0x%p, p#%u %s, p %u, %u.\n",
486 cdev->ports[csk->port_id]->name,
487 pmap->sport_base + idx, pmap->next);
490 } while (idx != start);
491 spin_unlock_bh(&pmap->lock);
493 /* should not happen */
494 pr_warn("cdev 0x%p, p#%u %s, next %u?\n",
495 cdev, csk->port_id, cdev->ports[csk->port_id]->name,
497 return -EADDRNOTAVAIL;
500 static void sock_put_port(struct cxgbi_sock *csk)
502 struct cxgbi_device *cdev = csk->cdev;
503 struct cxgbi_ports_map *pmap = &cdev->pmap;
506 if (csk->csk_family == AF_INET)
507 port = &csk->saddr.sin_port;
509 port = &csk->saddr6.sin6_port;
512 int idx = ntohs(*port) - pmap->sport_base;
515 if (idx < 0 || idx >= pmap->max_connect) {
516 pr_err("cdev 0x%p, p#%u %s, port %u OOR.\n",
518 cdev->ports[csk->port_id]->name,
523 spin_lock_bh(&pmap->lock);
524 pmap->port_csk[idx] = NULL;
526 spin_unlock_bh(&pmap->lock);
528 log_debug(1 << CXGBI_DBG_SOCK,
529 "cdev 0x%p, p#%u %s, release %u.\n",
530 cdev, csk->port_id, cdev->ports[csk->port_id]->name,
531 pmap->sport_base + idx);
538 * iscsi tcp connection
540 void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *csk)
542 if (csk->cpl_close) {
543 kfree_skb(csk->cpl_close);
544 csk->cpl_close = NULL;
546 if (csk->cpl_abort_req) {
547 kfree_skb(csk->cpl_abort_req);
548 csk->cpl_abort_req = NULL;
550 if (csk->cpl_abort_rpl) {
551 kfree_skb(csk->cpl_abort_rpl);
552 csk->cpl_abort_rpl = NULL;
555 EXPORT_SYMBOL_GPL(cxgbi_sock_free_cpl_skbs);
557 static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev)
559 struct cxgbi_sock *csk = kzalloc(sizeof(*csk), GFP_NOIO);
562 pr_info("alloc csk %zu failed.\n", sizeof(*csk));
566 if (cdev->csk_alloc_cpls(csk) < 0) {
567 pr_info("csk 0x%p, alloc cpls failed.\n", csk);
572 spin_lock_init(&csk->lock);
573 kref_init(&csk->refcnt);
574 skb_queue_head_init(&csk->receive_queue);
575 skb_queue_head_init(&csk->write_queue);
576 setup_timer(&csk->retry_timer, NULL, (unsigned long)csk);
577 rwlock_init(&csk->callback_lock);
580 cxgbi_sock_set_state(csk, CTP_CLOSED);
582 log_debug(1 << CXGBI_DBG_SOCK, "cdev 0x%p, new csk 0x%p.\n", cdev, csk);
587 static struct rtable *find_route_ipv4(struct flowi4 *fl4,
588 __be32 saddr, __be32 daddr,
589 __be16 sport, __be16 dport, u8 tos)
593 rt = ip_route_output_ports(&init_net, fl4, NULL, daddr, saddr,
594 dport, sport, IPPROTO_TCP, tos, 0);
601 static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
603 struct sockaddr_in *daddr = (struct sockaddr_in *)dst_addr;
604 struct dst_entry *dst;
605 struct net_device *ndev;
606 struct cxgbi_device *cdev;
607 struct rtable *rt = NULL;
610 struct cxgbi_sock *csk = NULL;
611 unsigned int mtu = 0;
615 rt = find_route_ipv4(&fl4, 0, daddr->sin_addr.s_addr, 0, daddr->sin_port, 0);
617 pr_info("no route to ipv4 0x%x, port %u.\n",
618 be32_to_cpu(daddr->sin_addr.s_addr),
619 be16_to_cpu(daddr->sin_port));
624 n = dst_neigh_lookup(dst, &daddr->sin_addr.s_addr);
631 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
632 pr_info("multi-cast route %pI4, port %u, dev %s.\n",
633 &daddr->sin_addr.s_addr, ntohs(daddr->sin_port),
639 if (ndev->flags & IFF_LOOPBACK) {
640 ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr);
646 pr_info("rt dev %s, loopback -> %s, mtu %u.\n",
647 n->dev->name, ndev->name, mtu);
650 cdev = cxgbi_device_find_by_netdev(ndev, &port);
652 pr_info("dst %pI4, %s, NOT cxgbi device.\n",
653 &daddr->sin_addr.s_addr, ndev->name);
657 log_debug(1 << CXGBI_DBG_SOCK,
658 "route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n",
659 &daddr->sin_addr.s_addr, ntohs(daddr->sin_port),
660 port, ndev->name, cdev);
662 csk = cxgbi_sock_create(cdev);
672 csk->csk_family = AF_INET;
673 csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr;
674 csk->daddr.sin_port = daddr->sin_port;
675 csk->daddr.sin_family = daddr->sin_family;
676 csk->saddr.sin_family = daddr->sin_family;
677 csk->saddr.sin_addr.s_addr = fl4.saddr;
688 cxgbi_sock_closed(csk);
693 #if IS_ENABLED(CONFIG_IPV6)
694 static struct rt6_info *find_route_ipv6(const struct in6_addr *saddr,
695 const struct in6_addr *daddr)
699 memset(&fl, 0, sizeof(fl));
701 memcpy(&fl.saddr, saddr, sizeof(struct in6_addr));
703 memcpy(&fl.daddr, daddr, sizeof(struct in6_addr));
704 return (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
707 static struct cxgbi_sock *cxgbi_check_route6(struct sockaddr *dst_addr)
709 struct sockaddr_in6 *daddr6 = (struct sockaddr_in6 *)dst_addr;
710 struct dst_entry *dst;
711 struct net_device *ndev;
712 struct cxgbi_device *cdev;
713 struct rt6_info *rt = NULL;
715 struct in6_addr pref_saddr;
716 struct cxgbi_sock *csk = NULL;
717 unsigned int mtu = 0;
721 rt = find_route_ipv6(NULL, &daddr6->sin6_addr);
724 pr_info("no route to ipv6 %pI6 port %u\n",
725 daddr6->sin6_addr.s6_addr,
726 be16_to_cpu(daddr6->sin6_port));
733 n = dst_neigh_lookup(dst, &daddr6->sin6_addr);
736 pr_info("%pI6, port %u, dst no neighbour.\n",
737 daddr6->sin6_addr.s6_addr,
738 be16_to_cpu(daddr6->sin6_port));
744 if (ipv6_addr_is_multicast(&daddr6->sin6_addr)) {
745 pr_info("multi-cast route %pI6 port %u, dev %s.\n",
746 daddr6->sin6_addr.s6_addr,
747 ntohs(daddr6->sin6_port), ndev->name);
752 cdev = cxgbi_device_find_by_netdev(ndev, &port);
754 cdev = cxgbi_device_find_by_mac(ndev, &port);
756 pr_info("dst %pI6 %s, NOT cxgbi device.\n",
757 daddr6->sin6_addr.s6_addr, ndev->name);
761 log_debug(1 << CXGBI_DBG_SOCK,
762 "route to %pI6 :%u, ndev p#%d,%s, cdev 0x%p.\n",
763 daddr6->sin6_addr.s6_addr, ntohs(daddr6->sin6_port), port,
766 csk = cxgbi_sock_create(cdev);
776 if (ipv6_addr_any(&rt->rt6i_prefsrc.addr)) {
777 struct inet6_dev *idev = ip6_dst_idev((struct dst_entry *)rt);
779 err = ipv6_dev_get_saddr(&init_net, idev ? idev->dev : NULL,
780 &daddr6->sin6_addr, 0, &pref_saddr);
782 pr_info("failed to get source address to reach %pI6\n",
787 pref_saddr = rt->rt6i_prefsrc.addr;
790 csk->csk_family = AF_INET6;
791 csk->daddr6.sin6_addr = daddr6->sin6_addr;
792 csk->daddr6.sin6_port = daddr6->sin6_port;
793 csk->daddr6.sin6_family = daddr6->sin6_family;
794 csk->saddr6.sin6_family = daddr6->sin6_family;
795 csk->saddr6.sin6_addr = pref_saddr;
806 cxgbi_sock_closed(csk);
810 #endif /* IS_ENABLED(CONFIG_IPV6) */
812 void cxgbi_sock_established(struct cxgbi_sock *csk, unsigned int snd_isn,
815 csk->write_seq = csk->snd_nxt = csk->snd_una = snd_isn;
816 dst_confirm(csk->dst);
818 cxgbi_sock_set_state(csk, CTP_ESTABLISHED);
820 EXPORT_SYMBOL_GPL(cxgbi_sock_established);
822 static void cxgbi_inform_iscsi_conn_closing(struct cxgbi_sock *csk)
824 log_debug(1 << CXGBI_DBG_SOCK,
825 "csk 0x%p, state %u, flags 0x%lx, conn 0x%p.\n",
826 csk, csk->state, csk->flags, csk->user_data);
828 if (csk->state != CTP_ESTABLISHED) {
829 read_lock_bh(&csk->callback_lock);
831 iscsi_conn_failure(csk->user_data,
832 ISCSI_ERR_TCP_CONN_CLOSE);
833 read_unlock_bh(&csk->callback_lock);
837 void cxgbi_sock_closed(struct cxgbi_sock *csk)
839 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
840 csk, (csk)->state, (csk)->flags, (csk)->tid);
841 cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED);
842 if (csk->state == CTP_ACTIVE_OPEN || csk->state == CTP_CLOSED)
844 if (csk->saddr.sin_port)
847 dst_release(csk->dst);
848 csk->cdev->csk_release_offload_resources(csk);
849 cxgbi_sock_set_state(csk, CTP_CLOSED);
850 cxgbi_inform_iscsi_conn_closing(csk);
853 EXPORT_SYMBOL_GPL(cxgbi_sock_closed);
855 static void need_active_close(struct cxgbi_sock *csk)
860 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
861 csk, (csk)->state, (csk)->flags, (csk)->tid);
862 spin_lock_bh(&csk->lock);
863 dst_confirm(csk->dst);
864 data_lost = skb_queue_len(&csk->receive_queue);
865 __skb_queue_purge(&csk->receive_queue);
867 if (csk->state == CTP_ACTIVE_OPEN)
868 cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED);
869 else if (csk->state == CTP_ESTABLISHED) {
871 cxgbi_sock_set_state(csk, CTP_ACTIVE_CLOSE);
872 } else if (csk->state == CTP_PASSIVE_CLOSE) {
874 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2);
879 csk->cdev->csk_send_abort_req(csk);
881 csk->cdev->csk_send_close_req(csk);
884 spin_unlock_bh(&csk->lock);
887 void cxgbi_sock_fail_act_open(struct cxgbi_sock *csk, int errno)
889 pr_info("csk 0x%p,%u,%lx, %pI4:%u-%pI4:%u, err %d.\n",
890 csk, csk->state, csk->flags,
891 &csk->saddr.sin_addr.s_addr, csk->saddr.sin_port,
892 &csk->daddr.sin_addr.s_addr, csk->daddr.sin_port,
895 cxgbi_sock_set_state(csk, CTP_CONNECTING);
897 cxgbi_sock_closed(csk);
899 EXPORT_SYMBOL_GPL(cxgbi_sock_fail_act_open);
901 void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb)
903 struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk;
905 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
906 csk, (csk)->state, (csk)->flags, (csk)->tid);
908 spin_lock_bh(&csk->lock);
909 if (csk->state == CTP_ACTIVE_OPEN)
910 cxgbi_sock_fail_act_open(csk, -EHOSTUNREACH);
911 spin_unlock_bh(&csk->lock);
915 EXPORT_SYMBOL_GPL(cxgbi_sock_act_open_req_arp_failure);
917 void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *csk)
920 spin_lock_bh(&csk->lock);
922 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_RCVD);
923 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
924 cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_PENDING);
925 if (cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD))
926 pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n",
927 csk, csk->state, csk->flags, csk->tid);
928 cxgbi_sock_closed(csk);
931 spin_unlock_bh(&csk->lock);
934 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_abort_rpl);
936 void cxgbi_sock_rcv_peer_close(struct cxgbi_sock *csk)
938 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
939 csk, (csk)->state, (csk)->flags, (csk)->tid);
941 spin_lock_bh(&csk->lock);
943 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING))
946 switch (csk->state) {
947 case CTP_ESTABLISHED:
948 cxgbi_sock_set_state(csk, CTP_PASSIVE_CLOSE);
950 case CTP_ACTIVE_CLOSE:
951 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2);
953 case CTP_CLOSE_WAIT_1:
954 cxgbi_sock_closed(csk);
959 pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n",
960 csk, csk->state, csk->flags, csk->tid);
962 cxgbi_inform_iscsi_conn_closing(csk);
964 spin_unlock_bh(&csk->lock);
967 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_peer_close);
969 void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock *csk, u32 snd_nxt)
971 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
972 csk, (csk)->state, (csk)->flags, (csk)->tid);
974 spin_lock_bh(&csk->lock);
976 csk->snd_una = snd_nxt - 1;
977 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING))
980 switch (csk->state) {
981 case CTP_ACTIVE_CLOSE:
982 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_1);
984 case CTP_CLOSE_WAIT_1:
985 case CTP_CLOSE_WAIT_2:
986 cxgbi_sock_closed(csk);
991 pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n",
992 csk, csk->state, csk->flags, csk->tid);
995 spin_unlock_bh(&csk->lock);
998 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_close_conn_rpl);
1000 void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *csk, unsigned int credits,
1001 unsigned int snd_una, int seq_chk)
1003 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1004 "csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, snd_una %u,%d.\n",
1005 csk, csk->state, csk->flags, csk->tid, credits,
1006 csk->wr_cred, csk->wr_una_cred, snd_una, seq_chk);
1008 spin_lock_bh(&csk->lock);
1010 csk->wr_cred += credits;
1011 if (csk->wr_una_cred > csk->wr_max_cred - csk->wr_cred)
1012 csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred;
1015 struct sk_buff *p = cxgbi_sock_peek_wr(csk);
1018 pr_err("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, empty.\n",
1019 csk, csk->state, csk->flags, csk->tid, credits,
1020 csk->wr_cred, csk->wr_una_cred);
1024 if (unlikely(credits < p->csum)) {
1025 pr_warn("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, < %u.\n",
1026 csk, csk->state, csk->flags, csk->tid,
1027 credits, csk->wr_cred, csk->wr_una_cred,
1032 cxgbi_sock_dequeue_wr(csk);
1038 cxgbi_sock_check_wr_invariants(csk);
1041 if (unlikely(before(snd_una, csk->snd_una))) {
1042 pr_warn("csk 0x%p,%u,0x%lx,%u, snd_una %u/%u.",
1043 csk, csk->state, csk->flags, csk->tid, snd_una,
1048 if (csk->snd_una != snd_una) {
1049 csk->snd_una = snd_una;
1050 dst_confirm(csk->dst);
1054 if (skb_queue_len(&csk->write_queue)) {
1055 if (csk->cdev->csk_push_tx_frames(csk, 0))
1056 cxgbi_conn_tx_open(csk);
1058 cxgbi_conn_tx_open(csk);
1060 spin_unlock_bh(&csk->lock);
1062 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_wr_ack);
1064 static unsigned int cxgbi_sock_find_best_mtu(struct cxgbi_sock *csk,
1069 while (i < csk->cdev->nmtus - 1 && csk->cdev->mtus[i + 1] <= mtu)
1075 unsigned int cxgbi_sock_select_mss(struct cxgbi_sock *csk, unsigned int pmtu)
1078 struct dst_entry *dst = csk->dst;
1080 csk->advmss = dst_metric_advmss(dst);
1082 if (csk->advmss > pmtu - 40)
1083 csk->advmss = pmtu - 40;
1084 if (csk->advmss < csk->cdev->mtus[0] - 40)
1085 csk->advmss = csk->cdev->mtus[0] - 40;
1086 idx = cxgbi_sock_find_best_mtu(csk, csk->advmss + 40);
1090 EXPORT_SYMBOL_GPL(cxgbi_sock_select_mss);
1092 void cxgbi_sock_skb_entail(struct cxgbi_sock *csk, struct sk_buff *skb)
1094 cxgbi_skcb_tcp_seq(skb) = csk->write_seq;
1095 __skb_queue_tail(&csk->write_queue, skb);
1097 EXPORT_SYMBOL_GPL(cxgbi_sock_skb_entail);
1099 void cxgbi_sock_purge_wr_queue(struct cxgbi_sock *csk)
1101 struct sk_buff *skb;
1103 while ((skb = cxgbi_sock_dequeue_wr(csk)) != NULL)
1106 EXPORT_SYMBOL_GPL(cxgbi_sock_purge_wr_queue);
1108 void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *csk)
1110 int pending = cxgbi_sock_count_pending_wrs(csk);
1112 if (unlikely(csk->wr_cred + pending != csk->wr_max_cred))
1113 pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
1114 csk, csk->tid, csk->wr_cred, pending, csk->wr_max_cred);
1116 EXPORT_SYMBOL_GPL(cxgbi_sock_check_wr_invariants);
1118 static int cxgbi_sock_send_pdus(struct cxgbi_sock *csk, struct sk_buff *skb)
1120 struct cxgbi_device *cdev = csk->cdev;
1121 struct sk_buff *next;
1122 int err, copied = 0;
1124 spin_lock_bh(&csk->lock);
1126 if (csk->state != CTP_ESTABLISHED) {
1127 log_debug(1 << CXGBI_DBG_PDU_TX,
1128 "csk 0x%p,%u,0x%lx,%u, EAGAIN.\n",
1129 csk, csk->state, csk->flags, csk->tid);
1135 log_debug(1 << CXGBI_DBG_PDU_TX,
1136 "csk 0x%p,%u,0x%lx,%u, EPIPE %d.\n",
1137 csk, csk->state, csk->flags, csk->tid, csk->err);
1142 if (csk->write_seq - csk->snd_una >= csk->snd_win) {
1143 log_debug(1 << CXGBI_DBG_PDU_TX,
1144 "csk 0x%p,%u,0x%lx,%u, FULL %u-%u >= %u.\n",
1145 csk, csk->state, csk->flags, csk->tid, csk->write_seq,
1146 csk->snd_una, csk->snd_win);
1152 int frags = skb_shinfo(skb)->nr_frags +
1153 (skb->len != skb->data_len);
1155 if (unlikely(skb_headroom(skb) < cdev->skb_tx_rsvd)) {
1156 pr_err("csk 0x%p, skb head %u < %u.\n",
1157 csk, skb_headroom(skb), cdev->skb_tx_rsvd);
1162 if (frags >= SKB_WR_LIST_SIZE) {
1163 pr_err("csk 0x%p, frags %d, %u,%u >%u.\n",
1164 csk, skb_shinfo(skb)->nr_frags, skb->len,
1165 skb->data_len, (uint)(SKB_WR_LIST_SIZE));
1172 cxgbi_skcb_set_flag(skb, SKCBF_TX_NEED_HDR);
1173 cxgbi_sock_skb_entail(csk, skb);
1175 csk->write_seq += skb->len +
1176 cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
1180 if (likely(skb_queue_len(&csk->write_queue)))
1181 cdev->csk_push_tx_frames(csk, 1);
1182 spin_unlock_bh(&csk->lock);
1186 if (copied == 0 && err == -EPIPE)
1187 copied = csk->err ? csk->err : -EPIPE;
1194 scmd_get_params(struct scsi_cmnd *sc, struct scatterlist **sgl,
1195 unsigned int *sgcnt, unsigned int *dlen,
1198 struct scsi_data_buffer *sdb = prot ? scsi_prot(sc) : scsi_out(sc);
1200 *sgl = sdb->table.sgl;
1201 *sgcnt = sdb->table.nents;
1202 *dlen = sdb->length;
1203 /* Caution: for protection sdb, sdb->length is invalid */
1206 void cxgbi_ddp_set_one_ppod(struct cxgbi_pagepod *ppod,
1207 struct cxgbi_task_tag_info *ttinfo,
1208 struct scatterlist **sg_pp, unsigned int *sg_off)
1210 struct scatterlist *sg = sg_pp ? *sg_pp : NULL;
1211 unsigned int offset = sg_off ? *sg_off : 0;
1212 dma_addr_t addr = 0UL;
1213 unsigned int len = 0;
1216 memcpy(ppod, &ttinfo->hdr, sizeof(struct cxgbi_pagepod_hdr));
1219 addr = sg_dma_address(sg);
1220 len = sg_dma_len(sg);
1223 for (i = 0; i < PPOD_PAGES_MAX; i++) {
1225 ppod->addr[i] = cpu_to_be64(addr + offset);
1226 offset += PAGE_SIZE;
1227 if (offset == (len + sg->offset)) {
1231 addr = sg_dma_address(sg);
1232 len = sg_dma_len(sg);
1236 ppod->addr[i] = 0ULL;
1241 * the fifth address needs to be repeated in the next ppod, so do
1249 if (offset == len) {
1253 addr = sg_dma_address(sg);
1254 len = sg_dma_len(sg);
1257 ppod->addr[i] = sg ? cpu_to_be64(addr + offset) : 0ULL;
1259 EXPORT_SYMBOL_GPL(cxgbi_ddp_set_one_ppod);
1262 * APIs interacting with open-iscsi libraries
1265 static unsigned char padding[4];
1267 void cxgbi_ddp_ppm_setup(void **ppm_pp, struct cxgbi_device *cdev,
1268 struct cxgbi_tag_format *tformat, unsigned int ppmax,
1269 unsigned int llimit, unsigned int start,
1270 unsigned int rsvd_factor)
1272 int err = cxgbi_ppm_init(ppm_pp, cdev->ports[0], cdev->pdev,
1273 cdev->lldev, tformat, ppmax, llimit, start,
1277 struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*ppm_pp);
1279 if (ppm->ppmax < 1024 ||
1280 ppm->tformat.pgsz_idx_dflt >= DDP_PGIDX_MAX)
1281 cdev->flags |= CXGBI_FLAG_DDP_OFF;
1284 cdev->flags |= CXGBI_FLAG_DDP_OFF;
1287 EXPORT_SYMBOL_GPL(cxgbi_ddp_ppm_setup);
1289 static int cxgbi_ddp_sgl_check(struct scatterlist *sgl, int nents)
1292 int last_sgidx = nents - 1;
1293 struct scatterlist *sg = sgl;
1295 for (i = 0; i < nents; i++, sg = sg_next(sg)) {
1296 unsigned int len = sg->length + sg->offset;
1298 if ((sg->offset & 0x3) || (i && sg->offset) ||
1299 ((i != last_sgidx) && len != PAGE_SIZE)) {
1300 log_debug(1 << CXGBI_DBG_DDP,
1301 "sg %u/%u, %u,%u, not aligned.\n",
1302 i, nents, sg->offset, sg->length);
1311 static int cxgbi_ddp_reserve(struct cxgbi_conn *cconn,
1312 struct cxgbi_task_data *tdata, u32 sw_tag,
1313 unsigned int xferlen)
1315 struct cxgbi_sock *csk = cconn->cep->csk;
1316 struct cxgbi_device *cdev = csk->cdev;
1317 struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev);
1318 struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo;
1319 struct scatterlist *sgl = ttinfo->sgl;
1320 unsigned int sgcnt = ttinfo->nents;
1321 unsigned int sg_offset = sgl->offset;
1324 if (cdev->flags & CXGBI_FLAG_DDP_OFF) {
1325 log_debug(1 << CXGBI_DBG_DDP,
1326 "cdev 0x%p DDP off.\n", cdev);
1330 if (!ppm || xferlen < DDP_THRESHOLD || !sgcnt ||
1331 ppm->tformat.pgsz_idx_dflt >= DDP_PGIDX_MAX) {
1332 log_debug(1 << CXGBI_DBG_DDP,
1333 "ppm 0x%p, pgidx %u, xfer %u, sgcnt %u, NO ddp.\n",
1334 ppm, ppm ? ppm->tformat.pgsz_idx_dflt : DDP_PGIDX_MAX,
1335 xferlen, ttinfo->nents);
1339 /* make sure the buffer is suitable for ddp */
1340 if (cxgbi_ddp_sgl_check(sgl, sgcnt) < 0)
1343 ttinfo->nr_pages = (xferlen + sgl->offset + (1 << PAGE_SHIFT) - 1) >>
1347 * the ddp tag will be used for the itt in the outgoing pdu,
1348 * the itt genrated by libiscsi is saved in the ppm and can be
1349 * retrieved via the ddp tag
1351 err = cxgbi_ppm_ppods_reserve(ppm, ttinfo->nr_pages, 0, &ttinfo->idx,
1352 &ttinfo->tag, (unsigned long)sw_tag);
1357 ttinfo->npods = err;
1359 /* setup dma from scsi command sgl */
1361 err = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
1362 sgl->offset = sg_offset;
1364 pr_info("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n",
1365 __func__, sw_tag, xferlen, sgcnt);
1368 if (err != ttinfo->nr_pages) {
1369 log_debug(1 << CXGBI_DBG_DDP,
1370 "%s: sw tag 0x%x, xfer %u, sgl %u, dma count %d.\n",
1371 __func__, sw_tag, xferlen, sgcnt, err);
1374 ttinfo->flags |= CXGBI_PPOD_INFO_FLAG_MAPPED;
1375 ttinfo->cid = csk->port_id;
1377 cxgbi_ppm_make_ppod_hdr(ppm, ttinfo->tag, csk->tid, sgl->offset,
1378 xferlen, &ttinfo->hdr);
1380 if (cdev->flags & CXGBI_FLAG_USE_PPOD_OFLDQ) {
1381 /* write ppod from xmit_pdu (of iscsi_scsi_command pdu) */
1382 ttinfo->flags |= CXGBI_PPOD_INFO_FLAG_VALID;
1384 /* write ppod from control queue now */
1385 err = cdev->csk_ddp_set_map(ppm, csk, ttinfo);
1393 cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
1395 if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_MAPPED) {
1396 ttinfo->flags &= ~CXGBI_PPOD_INFO_FLAG_MAPPED;
1397 dma_unmap_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
1402 static void task_release_itt(struct iscsi_task *task, itt_t hdr_itt)
1404 struct scsi_cmnd *sc = task->sc;
1405 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
1406 struct cxgbi_conn *cconn = tcp_conn->dd_data;
1407 struct cxgbi_device *cdev = cconn->chba->cdev;
1408 struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev);
1409 u32 tag = ntohl((__force u32)hdr_itt);
1411 log_debug(1 << CXGBI_DBG_DDP,
1412 "cdev 0x%p, task 0x%p, release tag 0x%x.\n",
1415 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) &&
1416 cxgbi_ppm_is_ddp_tag(ppm, tag)) {
1417 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
1418 struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo;
1420 if (!(cdev->flags & CXGBI_FLAG_USE_PPOD_OFLDQ))
1421 cdev->csk_ddp_clear_map(cdev, ppm, ttinfo);
1422 cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
1423 dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl, ttinfo->nents,
1428 static inline u32 cxgbi_build_sw_tag(u32 idx, u32 age)
1430 /* assume idx and age both are < 0x7FFF (32767) */
1431 return (idx << 16) | age;
1434 static int task_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt)
1436 struct scsi_cmnd *sc = task->sc;
1437 struct iscsi_conn *conn = task->conn;
1438 struct iscsi_session *sess = conn->session;
1439 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1440 struct cxgbi_conn *cconn = tcp_conn->dd_data;
1441 struct cxgbi_device *cdev = cconn->chba->cdev;
1442 struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev);
1443 u32 sw_tag = cxgbi_build_sw_tag(task->itt, sess->age);
1448 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE)
1450 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
1451 struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo;
1453 scmd_get_params(sc, &ttinfo->sgl, &ttinfo->nents,
1455 err = cxgbi_ddp_reserve(cconn, tdata, sw_tag, tdata->dlen);
1459 log_debug(1 << CXGBI_DBG_DDP,
1460 "csk 0x%p, R task 0x%p, %u,%u, no ddp.\n",
1461 cconn->cep->csk, task, tdata->dlen,
1466 err = cxgbi_ppm_make_non_ddp_tag(ppm, sw_tag, &tag);
1470 /* the itt need to sent in big-endian order */
1471 *hdr_itt = (__force itt_t)htonl(tag);
1473 log_debug(1 << CXGBI_DBG_DDP,
1474 "cdev 0x%p, task 0x%p, 0x%x(0x%x,0x%x)->0x%x/0x%x.\n",
1475 cdev, task, sw_tag, task->itt, sess->age, tag, *hdr_itt);
1479 void cxgbi_parse_pdu_itt(struct iscsi_conn *conn, itt_t itt, int *idx, int *age)
1481 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1482 struct cxgbi_conn *cconn = tcp_conn->dd_data;
1483 struct cxgbi_device *cdev = cconn->chba->cdev;
1484 struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev);
1485 u32 tag = ntohl((__force u32)itt);
1489 if (cxgbi_ppm_is_ddp_tag(ppm, tag))
1490 sw_bits = cxgbi_ppm_get_tag_caller_data(ppm, tag);
1492 sw_bits = cxgbi_ppm_decode_non_ddp_tag(ppm, tag);
1497 cxgbi_decode_sw_tag(sw_bits, idx, age);
1498 log_debug(1 << CXGBI_DBG_DDP,
1499 "cdev 0x%p, tag 0x%x/0x%x, -> 0x%x(0x%x,0x%x).\n",
1500 cdev, tag, itt, sw_bits, idx ? *idx : 0xFFFFF,
1503 EXPORT_SYMBOL_GPL(cxgbi_parse_pdu_itt);
1505 void cxgbi_conn_tx_open(struct cxgbi_sock *csk)
1507 struct iscsi_conn *conn = csk->user_data;
1510 log_debug(1 << CXGBI_DBG_SOCK,
1511 "csk 0x%p, cid %d.\n", csk, conn->id);
1512 iscsi_conn_queue_work(conn);
1515 EXPORT_SYMBOL_GPL(cxgbi_conn_tx_open);
1518 * pdu receive, interact with libiscsi_tcp
1520 static inline int read_pdu_skb(struct iscsi_conn *conn,
1521 struct sk_buff *skb,
1522 unsigned int offset,
1528 bytes_read = iscsi_tcp_recv_skb(conn, skb, offset, offloaded, &status);
1530 case ISCSI_TCP_CONN_ERR:
1531 pr_info("skb 0x%p, off %u, %d, TCP_ERR.\n",
1532 skb, offset, offloaded);
1534 case ISCSI_TCP_SUSPENDED:
1535 log_debug(1 << CXGBI_DBG_PDU_RX,
1536 "skb 0x%p, off %u, %d, TCP_SUSPEND, rc %d.\n",
1537 skb, offset, offloaded, bytes_read);
1538 /* no transfer - just have caller flush queue */
1540 case ISCSI_TCP_SKB_DONE:
1541 pr_info("skb 0x%p, off %u, %d, TCP_SKB_DONE.\n",
1542 skb, offset, offloaded);
1544 * pdus should always fit in the skb and we should get
1545 * segment done notifcation.
1547 iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu or skb.");
1549 case ISCSI_TCP_SEGMENT_DONE:
1550 log_debug(1 << CXGBI_DBG_PDU_RX,
1551 "skb 0x%p, off %u, %d, TCP_SEG_DONE, rc %d.\n",
1552 skb, offset, offloaded, bytes_read);
1555 pr_info("skb 0x%p, off %u, %d, invalid status %d.\n",
1556 skb, offset, offloaded, status);
1561 static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb)
1563 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1565 log_debug(1 << CXGBI_DBG_PDU_RX,
1566 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n",
1567 conn, skb, skb->len, cxgbi_skcb_flags(skb));
1569 if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn)) {
1570 pr_info("conn 0x%p, skb 0x%p, not hdr.\n", conn, skb);
1571 iscsi_conn_failure(conn, ISCSI_ERR_PROTO);
1575 if (conn->hdrdgst_en &&
1576 cxgbi_skcb_test_flag(skb, SKCBF_RX_HCRC_ERR)) {
1577 pr_info("conn 0x%p, skb 0x%p, hcrc.\n", conn, skb);
1578 iscsi_conn_failure(conn, ISCSI_ERR_HDR_DGST);
1582 return read_pdu_skb(conn, skb, 0, 0);
1585 static int skb_read_pdu_data(struct iscsi_conn *conn, struct sk_buff *lskb,
1586 struct sk_buff *skb, unsigned int offset)
1588 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1590 int opcode = tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK;
1592 log_debug(1 << CXGBI_DBG_PDU_RX,
1593 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n",
1594 conn, skb, skb->len, cxgbi_skcb_flags(skb));
1596 if (conn->datadgst_en &&
1597 cxgbi_skcb_test_flag(lskb, SKCBF_RX_DCRC_ERR)) {
1598 pr_info("conn 0x%p, skb 0x%p, dcrc 0x%lx.\n",
1599 conn, lskb, cxgbi_skcb_flags(lskb));
1600 iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
1604 if (iscsi_tcp_recv_segment_is_hdr(tcp_conn))
1607 /* coalesced, add header digest length */
1608 if (lskb == skb && conn->hdrdgst_en)
1609 offset += ISCSI_DIGEST_SIZE;
1611 if (cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA_DDPD))
1614 if (opcode == ISCSI_OP_SCSI_DATA_IN)
1615 log_debug(1 << CXGBI_DBG_PDU_RX,
1616 "skb 0x%p, op 0x%x, itt 0x%x, %u %s ddp'ed.\n",
1617 skb, opcode, ntohl(tcp_conn->in.hdr->itt),
1618 tcp_conn->in.datalen, offloaded ? "is" : "not");
1620 return read_pdu_skb(conn, skb, offset, offloaded);
1623 static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied)
1625 struct cxgbi_device *cdev = csk->cdev;
1629 log_debug(1 << CXGBI_DBG_PDU_RX,
1630 "csk 0x%p,%u,0x%lx,%u, seq %u, wup %u, thre %u, %u.\n",
1631 csk, csk->state, csk->flags, csk->tid, csk->copied_seq,
1632 csk->rcv_wup, cdev->rx_credit_thres,
1635 if (csk->state != CTP_ESTABLISHED)
1638 credits = csk->copied_seq - csk->rcv_wup;
1639 if (unlikely(!credits))
1641 if (unlikely(cdev->rx_credit_thres == 0))
1644 must_send = credits + 16384 >= csk->rcv_win;
1645 if (must_send || credits >= cdev->rx_credit_thres)
1646 csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits);
1649 void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk)
1651 struct cxgbi_device *cdev = csk->cdev;
1652 struct iscsi_conn *conn = csk->user_data;
1653 struct sk_buff *skb;
1654 unsigned int read = 0;
1657 log_debug(1 << CXGBI_DBG_PDU_RX,
1658 "csk 0x%p, conn 0x%p.\n", csk, conn);
1660 if (unlikely(!conn || conn->suspend_rx)) {
1661 log_debug(1 << CXGBI_DBG_PDU_RX,
1662 "csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n",
1663 csk, conn, conn ? conn->id : 0xFF,
1664 conn ? conn->suspend_rx : 0xFF);
1669 skb = skb_peek(&csk->receive_queue);
1671 !(cxgbi_skcb_test_flag(skb, SKCBF_RX_STATUS))) {
1673 log_debug(1 << CXGBI_DBG_PDU_RX,
1674 "skb 0x%p, NOT ready 0x%lx.\n",
1675 skb, cxgbi_skcb_flags(skb));
1678 __skb_unlink(skb, &csk->receive_queue);
1680 read += cxgbi_skcb_rx_pdulen(skb);
1681 log_debug(1 << CXGBI_DBG_PDU_RX,
1682 "csk 0x%p, skb 0x%p,%u,f 0x%lx, pdu len %u.\n",
1683 csk, skb, skb->len, cxgbi_skcb_flags(skb),
1684 cxgbi_skcb_rx_pdulen(skb));
1686 if (cxgbi_skcb_test_flag(skb, SKCBF_RX_COALESCED)) {
1687 err = skb_read_pdu_bhs(conn, skb);
1689 pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, "
1690 "f 0x%lx, plen %u.\n",
1692 cxgbi_skcb_flags(skb),
1693 cxgbi_skcb_rx_pdulen(skb));
1696 err = skb_read_pdu_data(conn, skb, skb,
1697 err + cdev->skb_rx_extra);
1699 pr_err("coalesced data, csk 0x%p, skb 0x%p,%u, "
1700 "f 0x%lx, plen %u.\n",
1702 cxgbi_skcb_flags(skb),
1703 cxgbi_skcb_rx_pdulen(skb));
1705 err = skb_read_pdu_bhs(conn, skb);
1707 pr_err("bhs, csk 0x%p, skb 0x%p,%u, "
1708 "f 0x%lx, plen %u.\n",
1710 cxgbi_skcb_flags(skb),
1711 cxgbi_skcb_rx_pdulen(skb));
1715 if (cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) {
1716 struct sk_buff *dskb;
1718 dskb = skb_peek(&csk->receive_queue);
1720 pr_err("csk 0x%p, skb 0x%p,%u, f 0x%lx,"
1721 " plen %u, NO data.\n",
1723 cxgbi_skcb_flags(skb),
1724 cxgbi_skcb_rx_pdulen(skb));
1728 __skb_unlink(dskb, &csk->receive_queue);
1730 err = skb_read_pdu_data(conn, skb, dskb, 0);
1732 pr_err("data, csk 0x%p, skb 0x%p,%u, "
1733 "f 0x%lx, plen %u, dskb 0x%p,"
1736 cxgbi_skcb_flags(skb),
1737 cxgbi_skcb_rx_pdulen(skb),
1741 err = skb_read_pdu_data(conn, skb, skb, 0);
1750 log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, read %u.\n", csk, read);
1752 csk->copied_seq += read;
1753 csk_return_rx_credits(csk, read);
1754 conn->rxdata_octets += read;
1758 pr_info("csk 0x%p, 0x%p, rx failed %d, read %u.\n",
1759 csk, conn, err, read);
1760 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1763 EXPORT_SYMBOL_GPL(cxgbi_conn_pdu_ready);
1765 static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt,
1766 unsigned int offset, unsigned int *off,
1767 struct scatterlist **sgp)
1770 struct scatterlist *sg;
1772 for_each_sg(sgl, sg, sgcnt, i) {
1773 if (offset < sg->length) {
1778 offset -= sg->length;
1783 static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset,
1784 unsigned int dlen, struct page_frag *frags,
1787 unsigned int datalen = dlen;
1788 unsigned int sglen = sg->length - sgoffset;
1789 struct page *page = sg_page(sg);
1799 pr_warn("sg %d NULL, len %u/%u.\n",
1808 copy = min(datalen, sglen);
1809 if (i && page == frags[i - 1].page &&
1810 sgoffset + sg->offset ==
1811 frags[i - 1].offset + frags[i - 1].size) {
1812 frags[i - 1].size += copy;
1814 if (i >= frag_max) {
1815 pr_warn("too many pages %u, dlen %u.\n",
1820 frags[i].page = page;
1821 frags[i].offset = sg->offset + sgoffset;
1822 frags[i].size = copy;
1833 int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
1835 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
1836 struct cxgbi_conn *cconn = tcp_conn->dd_data;
1837 struct cxgbi_device *cdev = cconn->chba->cdev;
1838 struct iscsi_conn *conn = task->conn;
1839 struct iscsi_tcp_task *tcp_task = task->dd_data;
1840 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
1841 struct scsi_cmnd *sc = task->sc;
1842 int headroom = SKB_TX_ISCSI_PDU_HEADER_MAX;
1844 tcp_task->dd_data = tdata;
1847 if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) &&
1848 (opcode == ISCSI_OP_SCSI_DATA_OUT ||
1849 (opcode == ISCSI_OP_SCSI_CMD &&
1850 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_TO_DEVICE))))
1851 /* data could goes into skb head */
1852 headroom += min_t(unsigned int,
1853 SKB_MAX_HEAD(cdev->skb_tx_rsvd),
1854 conn->max_xmit_dlength);
1856 tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC);
1858 struct cxgbi_sock *csk = cconn->cep->csk;
1859 struct net_device *ndev = cdev->ports[csk->port_id];
1860 ndev->stats.tx_dropped++;
1864 skb_reserve(tdata->skb, cdev->skb_tx_rsvd);
1865 task->hdr = (struct iscsi_hdr *)tdata->skb->data;
1866 task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */
1868 /* data_out uses scsi_cmd's itt */
1869 if (opcode != ISCSI_OP_SCSI_DATA_OUT)
1870 task_reserve_itt(task, &task->hdr->itt);
1872 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
1873 "task 0x%p, op 0x%x, skb 0x%p,%u+%u/%u, itt 0x%x.\n",
1874 task, opcode, tdata->skb, cdev->skb_tx_rsvd, headroom,
1875 conn->max_xmit_dlength, ntohl(task->hdr->itt));
1879 EXPORT_SYMBOL_GPL(cxgbi_conn_alloc_pdu);
1881 static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc)
1890 cxgbi_skcb_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode;
1892 cxgbi_skcb_ulp_mode(skb) = 0;
1895 int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
1898 struct iscsi_conn *conn = task->conn;
1899 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
1900 struct sk_buff *skb = tdata->skb;
1901 unsigned int datalen = count;
1902 int i, padlen = iscsi_padding(count);
1905 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
1906 "task 0x%p,0x%p, skb 0x%p, 0x%x,0x%x,0x%x, %u+%u.\n",
1907 task, task->sc, skb, (*skb->data) & ISCSI_OPCODE_MASK,
1908 ntohl(task->cmdsn), ntohl(task->hdr->itt), offset, count);
1910 skb_put(skb, task->hdr_len);
1911 tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0);
1916 struct scsi_data_buffer *sdb = scsi_out(task->sc);
1917 struct scatterlist *sg = NULL;
1920 tdata->offset = offset;
1921 tdata->count = count;
1922 err = sgl_seek_offset(
1923 sdb->table.sgl, sdb->table.nents,
1924 tdata->offset, &tdata->sgoffset, &sg);
1926 pr_warn("tpdu, sgl %u, bad offset %u/%u.\n",
1927 sdb->table.nents, tdata->offset, sdb->length);
1930 err = sgl_read_to_frags(sg, tdata->sgoffset, tdata->count,
1931 tdata->frags, MAX_PDU_FRAGS);
1933 pr_warn("tpdu, sgl %u, bad offset %u + %u.\n",
1934 sdb->table.nents, tdata->offset, tdata->count);
1937 tdata->nr_frags = err;
1939 if (tdata->nr_frags > MAX_SKB_FRAGS ||
1940 (padlen && tdata->nr_frags == MAX_SKB_FRAGS)) {
1941 char *dst = skb->data + task->hdr_len;
1942 struct page_frag *frag = tdata->frags;
1944 /* data fits in the skb's headroom */
1945 for (i = 0; i < tdata->nr_frags; i++, frag++) {
1946 char *src = kmap_atomic(frag->page);
1948 memcpy(dst, src+frag->offset, frag->size);
1953 memset(dst, 0, padlen);
1956 skb_put(skb, count + padlen);
1958 /* data fit into frag_list */
1959 for (i = 0; i < tdata->nr_frags; i++) {
1960 __skb_fill_page_desc(skb, i,
1961 tdata->frags[i].page,
1962 tdata->frags[i].offset,
1963 tdata->frags[i].size);
1964 skb_frag_ref(skb, i);
1966 skb_shinfo(skb)->nr_frags = tdata->nr_frags;
1968 skb->data_len += count;
1969 skb->truesize += count;
1973 pg = virt_to_page(task->data);
1976 skb_fill_page_desc(skb, 0, pg, offset_in_page(task->data),
1979 skb->data_len += count;
1980 skb->truesize += count;
1984 i = skb_shinfo(skb)->nr_frags;
1985 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1986 virt_to_page(padding), offset_in_page(padding),
1989 skb->data_len += padlen;
1990 skb->truesize += padlen;
1996 EXPORT_SYMBOL_GPL(cxgbi_conn_init_pdu);
1998 int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
2000 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
2001 struct cxgbi_conn *cconn = tcp_conn->dd_data;
2002 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
2003 struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo;
2004 struct sk_buff *skb = tdata->skb;
2005 struct cxgbi_sock *csk = NULL;
2006 unsigned int datalen;
2010 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
2011 "task 0x%p, skb NULL.\n", task);
2015 if (cconn && cconn->cep)
2016 csk = cconn->cep->csk;
2018 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
2019 "task 0x%p, csk gone.\n", task);
2023 datalen = skb->data_len;
2026 /* write ppod first if using ofldq to write ppod */
2027 if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_VALID) {
2028 struct cxgbi_ppm *ppm = csk->cdev->cdev2ppm(csk->cdev);
2030 ttinfo->flags &= ~CXGBI_PPOD_INFO_FLAG_VALID;
2031 if (csk->cdev->csk_ddp_set_map(ppm, csk, ttinfo) < 0)
2032 pr_err("task 0x%p, ppod writing using ofldq failed.\n",
2034 /* continue. Let fl get the data */
2037 err = cxgbi_sock_send_pdus(cconn->cep->csk, skb);
2041 log_debug(1 << CXGBI_DBG_PDU_TX,
2042 "task 0x%p,0x%p, skb 0x%p, len %u/%u, rv %d.\n",
2043 task, task->sc, skb, skb->len, skb->data_len, err);
2045 if (task->conn->hdrdgst_en)
2046 pdulen += ISCSI_DIGEST_SIZE;
2048 if (datalen && task->conn->datadgst_en)
2049 pdulen += ISCSI_DIGEST_SIZE;
2051 task->conn->txdata_octets += pdulen;
2055 if (err == -EAGAIN || err == -ENOBUFS) {
2056 log_debug(1 << CXGBI_DBG_PDU_TX,
2057 "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n",
2058 task, skb, skb->len, skb->data_len, err);
2059 /* reset skb to send when we are called again */
2064 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
2065 "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
2066 task->itt, skb, skb->len, skb->data_len, err);
2070 iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
2071 iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
2074 EXPORT_SYMBOL_GPL(cxgbi_conn_xmit_pdu);
2076 void cxgbi_cleanup_task(struct iscsi_task *task)
2078 struct iscsi_tcp_task *tcp_task = task->dd_data;
2079 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
2081 log_debug(1 << CXGBI_DBG_ISCSI,
2082 "task 0x%p, skb 0x%p, itt 0x%x.\n",
2083 task, tdata->skb, task->hdr_itt);
2085 tcp_task->dd_data = NULL;
2086 /* never reached the xmit task callout */
2088 __kfree_skb(tdata->skb);
2090 task_release_itt(task, task->hdr_itt);
2091 memset(tdata, 0, sizeof(*tdata));
2093 iscsi_tcp_cleanup_task(task);
2095 EXPORT_SYMBOL_GPL(cxgbi_cleanup_task);
2097 void cxgbi_get_conn_stats(struct iscsi_cls_conn *cls_conn,
2098 struct iscsi_stats *stats)
2100 struct iscsi_conn *conn = cls_conn->dd_data;
2102 stats->txdata_octets = conn->txdata_octets;
2103 stats->rxdata_octets = conn->rxdata_octets;
2104 stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
2105 stats->dataout_pdus = conn->dataout_pdus_cnt;
2106 stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
2107 stats->datain_pdus = conn->datain_pdus_cnt;
2108 stats->r2t_pdus = conn->r2t_pdus_cnt;
2109 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
2110 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
2111 stats->digest_err = 0;
2112 stats->timeout_err = 0;
2113 stats->custom_length = 1;
2114 strcpy(stats->custom[0].desc, "eh_abort_cnt");
2115 stats->custom[0].value = conn->eh_abort_cnt;
2117 EXPORT_SYMBOL_GPL(cxgbi_get_conn_stats);
2119 static int cxgbi_conn_max_xmit_dlength(struct iscsi_conn *conn)
2121 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2122 struct cxgbi_conn *cconn = tcp_conn->dd_data;
2123 struct cxgbi_device *cdev = cconn->chba->cdev;
2124 unsigned int headroom = SKB_MAX_HEAD(cdev->skb_tx_rsvd);
2125 unsigned int max_def = 512 * MAX_SKB_FRAGS;
2126 unsigned int max = max(max_def, headroom);
2128 max = min(cconn->chba->cdev->tx_max_size, max);
2129 if (conn->max_xmit_dlength)
2130 conn->max_xmit_dlength = min(conn->max_xmit_dlength, max);
2132 conn->max_xmit_dlength = max;
2133 cxgbi_align_pdu_size(conn->max_xmit_dlength);
2138 static int cxgbi_conn_max_recv_dlength(struct iscsi_conn *conn)
2140 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2141 struct cxgbi_conn *cconn = tcp_conn->dd_data;
2142 unsigned int max = cconn->chba->cdev->rx_max_size;
2144 cxgbi_align_pdu_size(max);
2146 if (conn->max_recv_dlength) {
2147 if (conn->max_recv_dlength > max) {
2148 pr_err("MaxRecvDataSegmentLength %u > %u.\n",
2149 conn->max_recv_dlength, max);
2152 conn->max_recv_dlength = min(conn->max_recv_dlength, max);
2153 cxgbi_align_pdu_size(conn->max_recv_dlength);
2155 conn->max_recv_dlength = max;
2160 int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
2161 enum iscsi_param param, char *buf, int buflen)
2163 struct iscsi_conn *conn = cls_conn->dd_data;
2164 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2165 struct cxgbi_conn *cconn = tcp_conn->dd_data;
2166 struct cxgbi_sock *csk = cconn->cep->csk;
2169 log_debug(1 << CXGBI_DBG_ISCSI,
2170 "cls_conn 0x%p, param %d, buf(%d) %s.\n",
2171 cls_conn, param, buflen, buf);
2174 case ISCSI_PARAM_HDRDGST_EN:
2175 err = iscsi_set_param(cls_conn, param, buf, buflen);
2176 if (!err && conn->hdrdgst_en)
2177 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
2179 conn->datadgst_en, 0);
2181 case ISCSI_PARAM_DATADGST_EN:
2182 err = iscsi_set_param(cls_conn, param, buf, buflen);
2183 if (!err && conn->datadgst_en)
2184 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
2186 conn->datadgst_en, 0);
2188 case ISCSI_PARAM_MAX_R2T:
2189 return iscsi_tcp_set_max_r2t(conn, buf);
2190 case ISCSI_PARAM_MAX_RECV_DLENGTH:
2191 err = iscsi_set_param(cls_conn, param, buf, buflen);
2193 err = cxgbi_conn_max_recv_dlength(conn);
2195 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
2196 err = iscsi_set_param(cls_conn, param, buf, buflen);
2198 err = cxgbi_conn_max_xmit_dlength(conn);
2201 return iscsi_set_param(cls_conn, param, buf, buflen);
2205 EXPORT_SYMBOL_GPL(cxgbi_set_conn_param);
2207 static inline int csk_print_port(struct cxgbi_sock *csk, char *buf)
2211 cxgbi_sock_get(csk);
2212 len = sprintf(buf, "%hu\n", ntohs(csk->daddr.sin_port));
2213 cxgbi_sock_put(csk);
2218 static inline int csk_print_ip(struct cxgbi_sock *csk, char *buf)
2222 cxgbi_sock_get(csk);
2223 if (csk->csk_family == AF_INET)
2224 len = sprintf(buf, "%pI4",
2225 &csk->daddr.sin_addr.s_addr);
2227 len = sprintf(buf, "%pI6",
2228 &csk->daddr6.sin6_addr);
2230 cxgbi_sock_put(csk);
2235 int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param,
2238 struct cxgbi_endpoint *cep = ep->dd_data;
2239 struct cxgbi_sock *csk;
2242 log_debug(1 << CXGBI_DBG_ISCSI,
2243 "cls_conn 0x%p, param %d.\n", ep, param);
2246 case ISCSI_PARAM_CONN_PORT:
2247 case ISCSI_PARAM_CONN_ADDRESS:
2255 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
2256 &csk->daddr, param, buf);
2262 EXPORT_SYMBOL_GPL(cxgbi_get_ep_param);
2264 struct iscsi_cls_conn *
2265 cxgbi_create_conn(struct iscsi_cls_session *cls_session, u32 cid)
2267 struct iscsi_cls_conn *cls_conn;
2268 struct iscsi_conn *conn;
2269 struct iscsi_tcp_conn *tcp_conn;
2270 struct cxgbi_conn *cconn;
2272 cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*cconn), cid);
2276 conn = cls_conn->dd_data;
2277 tcp_conn = conn->dd_data;
2278 cconn = tcp_conn->dd_data;
2279 cconn->iconn = conn;
2281 log_debug(1 << CXGBI_DBG_ISCSI,
2282 "cid %u(0x%x), cls 0x%p,0x%p, conn 0x%p,0x%p,0x%p.\n",
2283 cid, cid, cls_session, cls_conn, conn, tcp_conn, cconn);
2287 EXPORT_SYMBOL_GPL(cxgbi_create_conn);
2289 int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
2290 struct iscsi_cls_conn *cls_conn,
2291 u64 transport_eph, int is_leading)
2293 struct iscsi_conn *conn = cls_conn->dd_data;
2294 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2295 struct cxgbi_conn *cconn = tcp_conn->dd_data;
2296 struct cxgbi_ppm *ppm;
2297 struct iscsi_endpoint *ep;
2298 struct cxgbi_endpoint *cep;
2299 struct cxgbi_sock *csk;
2302 ep = iscsi_lookup_endpoint(transport_eph);
2306 /* setup ddp pagesize */
2310 ppm = csk->cdev->cdev2ppm(csk->cdev);
2311 err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid,
2312 ppm->tformat.pgsz_idx_dflt, 0);
2316 err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
2320 /* calculate the tag idx bits needed for this conn based on cmds_max */
2321 cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1;
2323 write_lock_bh(&csk->callback_lock);
2324 csk->user_data = conn;
2325 cconn->chba = cep->chba;
2328 write_unlock_bh(&csk->callback_lock);
2330 cxgbi_conn_max_xmit_dlength(conn);
2331 cxgbi_conn_max_recv_dlength(conn);
2333 log_debug(1 << CXGBI_DBG_ISCSI,
2334 "cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n",
2335 cls_session, cls_conn, ep, cconn, csk);
2336 /* init recv engine */
2337 iscsi_tcp_hdr_recv_prep(tcp_conn);
2341 EXPORT_SYMBOL_GPL(cxgbi_bind_conn);
2343 struct iscsi_cls_session *cxgbi_create_session(struct iscsi_endpoint *ep,
2344 u16 cmds_max, u16 qdepth,
2347 struct cxgbi_endpoint *cep;
2348 struct cxgbi_hba *chba;
2349 struct Scsi_Host *shost;
2350 struct iscsi_cls_session *cls_session;
2351 struct iscsi_session *session;
2354 pr_err("missing endpoint.\n");
2360 shost = chba->shost;
2362 BUG_ON(chba != iscsi_host_priv(shost));
2364 cls_session = iscsi_session_setup(chba->cdev->itp, shost,
2366 sizeof(struct iscsi_tcp_task) +
2367 sizeof(struct cxgbi_task_data),
2368 initial_cmdsn, ISCSI_MAX_TARGET);
2372 session = cls_session->dd_data;
2373 if (iscsi_tcp_r2tpool_alloc(session))
2374 goto remove_session;
2376 log_debug(1 << CXGBI_DBG_ISCSI,
2377 "ep 0x%p, cls sess 0x%p.\n", ep, cls_session);
2381 iscsi_session_teardown(cls_session);
2384 EXPORT_SYMBOL_GPL(cxgbi_create_session);
2386 void cxgbi_destroy_session(struct iscsi_cls_session *cls_session)
2388 log_debug(1 << CXGBI_DBG_ISCSI,
2389 "cls sess 0x%p.\n", cls_session);
2391 iscsi_tcp_r2tpool_free(cls_session->dd_data);
2392 iscsi_session_teardown(cls_session);
2394 EXPORT_SYMBOL_GPL(cxgbi_destroy_session);
2396 int cxgbi_set_host_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2397 char *buf, int buflen)
2399 struct cxgbi_hba *chba = iscsi_host_priv(shost);
2402 shost_printk(KERN_ERR, shost, "Could not get host param. "
2403 "netdev for host not set.\n");
2407 log_debug(1 << CXGBI_DBG_ISCSI,
2408 "shost 0x%p, hba 0x%p,%s, param %d, buf(%d) %s.\n",
2409 shost, chba, chba->ndev->name, param, buflen, buf);
2412 case ISCSI_HOST_PARAM_IPADDRESS:
2414 __be32 addr = in_aton(buf);
2415 log_debug(1 << CXGBI_DBG_ISCSI,
2416 "hba %s, req. ipv4 %pI4.\n", chba->ndev->name, &addr);
2417 cxgbi_set_iscsi_ipv4(chba, addr);
2420 case ISCSI_HOST_PARAM_HWADDRESS:
2421 case ISCSI_HOST_PARAM_NETDEV_NAME:
2424 return iscsi_host_set_param(shost, param, buf, buflen);
2427 EXPORT_SYMBOL_GPL(cxgbi_set_host_param);
2429 int cxgbi_get_host_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2432 struct cxgbi_hba *chba = iscsi_host_priv(shost);
2436 shost_printk(KERN_ERR, shost, "Could not get host param. "
2437 "netdev for host not set.\n");
2441 log_debug(1 << CXGBI_DBG_ISCSI,
2442 "shost 0x%p, hba 0x%p,%s, param %d.\n",
2443 shost, chba, chba->ndev->name, param);
2446 case ISCSI_HOST_PARAM_HWADDRESS:
2447 len = sysfs_format_mac(buf, chba->ndev->dev_addr, 6);
2449 case ISCSI_HOST_PARAM_NETDEV_NAME:
2450 len = sprintf(buf, "%s\n", chba->ndev->name);
2452 case ISCSI_HOST_PARAM_IPADDRESS:
2454 struct cxgbi_sock *csk = find_sock_on_port(chba->cdev,
2457 len = sprintf(buf, "%pIS",
2458 (struct sockaddr *)&csk->saddr);
2460 log_debug(1 << CXGBI_DBG_ISCSI,
2461 "hba %s, addr %s.\n", chba->ndev->name, buf);
2465 return iscsi_host_get_param(shost, param, buf);
2470 EXPORT_SYMBOL_GPL(cxgbi_get_host_param);
2472 struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost,
2473 struct sockaddr *dst_addr,
2476 struct iscsi_endpoint *ep;
2477 struct cxgbi_endpoint *cep;
2478 struct cxgbi_hba *hba = NULL;
2479 struct cxgbi_sock *csk;
2482 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK,
2483 "shost 0x%p, non_blocking %d, dst_addr 0x%p.\n",
2484 shost, non_blocking, dst_addr);
2487 hba = iscsi_host_priv(shost);
2489 pr_info("shost 0x%p, priv NULL.\n", shost);
2494 if (dst_addr->sa_family == AF_INET) {
2495 csk = cxgbi_check_route(dst_addr);
2496 #if IS_ENABLED(CONFIG_IPV6)
2497 } else if (dst_addr->sa_family == AF_INET6) {
2498 csk = cxgbi_check_route6(dst_addr);
2501 pr_info("address family 0x%x NOT supported.\n",
2502 dst_addr->sa_family);
2503 err = -EAFNOSUPPORT;
2504 return (struct iscsi_endpoint *)ERR_PTR(err);
2508 return (struct iscsi_endpoint *)csk;
2509 cxgbi_sock_get(csk);
2512 hba = csk->cdev->hbas[csk->port_id];
2513 else if (hba != csk->cdev->hbas[csk->port_id]) {
2514 pr_info("Could not connect through requested host %u"
2515 "hba 0x%p != 0x%p (%u).\n",
2516 shost->host_no, hba,
2517 csk->cdev->hbas[csk->port_id], csk->port_id);
2522 err = sock_get_port(csk);
2526 cxgbi_sock_set_state(csk, CTP_CONNECTING);
2527 err = csk->cdev->csk_init_act_open(csk);
2531 if (cxgbi_sock_is_closing(csk)) {
2533 pr_info("csk 0x%p is closing.\n", csk);
2537 ep = iscsi_create_endpoint(sizeof(*cep));
2540 pr_info("iscsi alloc ep, OOM.\n");
2548 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK,
2549 "ep 0x%p, cep 0x%p, csk 0x%p, hba 0x%p,%s.\n",
2550 ep, cep, csk, hba, hba->ndev->name);
2554 cxgbi_sock_put(csk);
2555 cxgbi_sock_closed(csk);
2557 return ERR_PTR(err);
2559 EXPORT_SYMBOL_GPL(cxgbi_ep_connect);
2561 int cxgbi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
2563 struct cxgbi_endpoint *cep = ep->dd_data;
2564 struct cxgbi_sock *csk = cep->csk;
2566 if (!cxgbi_sock_is_established(csk))
2570 EXPORT_SYMBOL_GPL(cxgbi_ep_poll);
2572 void cxgbi_ep_disconnect(struct iscsi_endpoint *ep)
2574 struct cxgbi_endpoint *cep = ep->dd_data;
2575 struct cxgbi_conn *cconn = cep->cconn;
2576 struct cxgbi_sock *csk = cep->csk;
2578 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK,
2579 "ep 0x%p, cep 0x%p, cconn 0x%p, csk 0x%p,%u,0x%lx.\n",
2580 ep, cep, cconn, csk, csk->state, csk->flags);
2582 if (cconn && cconn->iconn) {
2583 iscsi_suspend_tx(cconn->iconn);
2584 write_lock_bh(&csk->callback_lock);
2585 cep->csk->user_data = NULL;
2587 write_unlock_bh(&csk->callback_lock);
2589 iscsi_destroy_endpoint(ep);
2591 if (likely(csk->state >= CTP_ESTABLISHED))
2592 need_active_close(csk);
2594 cxgbi_sock_closed(csk);
2596 cxgbi_sock_put(csk);
2598 EXPORT_SYMBOL_GPL(cxgbi_ep_disconnect);
2600 int cxgbi_iscsi_init(struct iscsi_transport *itp,
2601 struct scsi_transport_template **stt)
2603 *stt = iscsi_register_transport(itp);
2605 pr_err("unable to register %s transport 0x%p.\n",
2609 log_debug(1 << CXGBI_DBG_ISCSI,
2610 "%s, registered iscsi transport 0x%p.\n",
2614 EXPORT_SYMBOL_GPL(cxgbi_iscsi_init);
2616 void cxgbi_iscsi_cleanup(struct iscsi_transport *itp,
2617 struct scsi_transport_template **stt)
2620 log_debug(1 << CXGBI_DBG_ISCSI,
2621 "de-register transport 0x%p, %s, stt 0x%p.\n",
2622 itp, itp->name, *stt);
2624 iscsi_unregister_transport(itp);
2627 EXPORT_SYMBOL_GPL(cxgbi_iscsi_cleanup);
2629 umode_t cxgbi_attr_is_visible(int param_type, int param)
2631 switch (param_type) {
2632 case ISCSI_HOST_PARAM:
2634 case ISCSI_HOST_PARAM_NETDEV_NAME:
2635 case ISCSI_HOST_PARAM_HWADDRESS:
2636 case ISCSI_HOST_PARAM_IPADDRESS:
2637 case ISCSI_HOST_PARAM_INITIATOR_NAME:
2644 case ISCSI_PARAM_MAX_RECV_DLENGTH:
2645 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
2646 case ISCSI_PARAM_HDRDGST_EN:
2647 case ISCSI_PARAM_DATADGST_EN:
2648 case ISCSI_PARAM_CONN_ADDRESS:
2649 case ISCSI_PARAM_CONN_PORT:
2650 case ISCSI_PARAM_EXP_STATSN:
2651 case ISCSI_PARAM_PERSISTENT_ADDRESS:
2652 case ISCSI_PARAM_PERSISTENT_PORT:
2653 case ISCSI_PARAM_PING_TMO:
2654 case ISCSI_PARAM_RECV_TMO:
2655 case ISCSI_PARAM_INITIAL_R2T_EN:
2656 case ISCSI_PARAM_MAX_R2T:
2657 case ISCSI_PARAM_IMM_DATA_EN:
2658 case ISCSI_PARAM_FIRST_BURST:
2659 case ISCSI_PARAM_MAX_BURST:
2660 case ISCSI_PARAM_PDU_INORDER_EN:
2661 case ISCSI_PARAM_DATASEQ_INORDER_EN:
2662 case ISCSI_PARAM_ERL:
2663 case ISCSI_PARAM_TARGET_NAME:
2664 case ISCSI_PARAM_TPGT:
2665 case ISCSI_PARAM_USERNAME:
2666 case ISCSI_PARAM_PASSWORD:
2667 case ISCSI_PARAM_USERNAME_IN:
2668 case ISCSI_PARAM_PASSWORD_IN:
2669 case ISCSI_PARAM_FAST_ABORT:
2670 case ISCSI_PARAM_ABORT_TMO:
2671 case ISCSI_PARAM_LU_RESET_TMO:
2672 case ISCSI_PARAM_TGT_RESET_TMO:
2673 case ISCSI_PARAM_IFACE_NAME:
2674 case ISCSI_PARAM_INITIATOR_NAME:
2683 EXPORT_SYMBOL_GPL(cxgbi_attr_is_visible);
2685 static int __init libcxgbi_init_module(void)
2687 pr_info("%s", version);
2691 static void __exit libcxgbi_exit_module(void)
2693 cxgbi_device_unregister_all(0xFF);
2697 module_init(libcxgbi_init_module);
2698 module_exit(libcxgbi_exit_module);