1 /* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */
4 * Filesystem request handling methods
8 #include <linux/slab.h>
9 #include <linux/hdreg.h>
10 #include <linux/blk-mq.h>
11 #include <linux/skbuff.h>
12 #include <linux/netdevice.h>
13 #include <linux/moduleparam.h>
14 #include <linux/workqueue.h>
15 #include <linux/kthread.h>
16 #include <net/net_namespace.h>
17 #include <asm/unaligned.h>
18 #include <linux/uio.h>
21 #define MAXIOC (8192) /* default meant to avoid most soft lockups */
23 static void ktcomplete(struct frame *, struct sk_buff *);
24 static int count_targets(struct aoedev *d, int *untainted);
26 static struct buf *nextbuf(struct aoedev *);
28 static int aoe_deadsecs = 60 * 3;
29 module_param(aoe_deadsecs, int, 0644);
30 MODULE_PARM_DESC(aoe_deadsecs, "After aoe_deadsecs seconds, give up and fail dev.");
32 static int aoe_maxout = 64;
33 module_param(aoe_maxout, int, 0644);
34 MODULE_PARM_DESC(aoe_maxout,
35 "Only aoe_maxout outstanding packets for every MAC on eX.Y.");
37 /* The number of online cpus during module initialization gives us a
38 * convenient heuristic cap on the parallelism used for ktio threads
39 * doing I/O completion. It is not important that the cap equal the
40 * actual number of running CPUs at any given time, but because of CPU
41 * hotplug, we take care to use ncpus instead of using
42 * num_online_cpus() after module initialization.
46 /* mutex lock used for synchronization while thread spawning */
47 static DEFINE_MUTEX(ktio_spawn_lock);
49 static wait_queue_head_t *ktiowq;
50 static struct ktstate *kts;
52 /* io completion queue */
54 struct list_head head;
57 static struct iocq_ktio *iocq;
59 static struct page *empty_page;
61 static struct sk_buff *
66 skb = alloc_skb(len + MAX_HEADER, GFP_ATOMIC);
68 skb_reserve(skb, MAX_HEADER);
69 skb_reset_mac_header(skb);
70 skb_reset_network_header(skb);
71 skb->protocol = __constant_htons(ETH_P_AOE);
72 skb_checksum_none_assert(skb);
78 getframe_deferred(struct aoedev *d, u32 tag)
80 struct list_head *head, *pos, *nx;
84 list_for_each_safe(pos, nx, head) {
85 f = list_entry(pos, struct frame, head);
95 getframe(struct aoedev *d, u32 tag)
98 struct list_head *head, *pos, *nx;
102 head = &d->factive[n];
103 list_for_each_safe(pos, nx, head) {
104 f = list_entry(pos, struct frame, head);
114 * Leave the top bit clear so we have tagspace for userland.
115 * The bottom 16 bits are the xmit tick for rexmit/rttavg processing.
116 * This driver reserves tag -1 to mean "unused frame."
119 newtag(struct aoedev *d)
123 n = jiffies & 0xffff;
124 return n | (++d->lasttag & 0x7fff) << 16;
128 aoehdr_atainit(struct aoedev *d, struct aoetgt *t, struct aoe_hdr *h)
130 u32 host_tag = newtag(d);
132 memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
133 memcpy(h->dst, t->addr, sizeof h->dst);
134 h->type = __constant_cpu_to_be16(ETH_P_AOE);
136 h->major = cpu_to_be16(d->aoemajor);
137 h->minor = d->aoeminor;
139 h->tag = cpu_to_be32(host_tag);
145 put_lba(struct aoe_atahdr *ah, sector_t lba)
148 ah->lba1 = lba >>= 8;
149 ah->lba2 = lba >>= 8;
150 ah->lba3 = lba >>= 8;
151 ah->lba4 = lba >>= 8;
152 ah->lba5 = lba >>= 8;
155 static struct aoeif *
156 ifrotate(struct aoetgt *t)
162 if (ifp >= &t->ifs[NAOEIFS] || ifp->nd == NULL)
170 skb_pool_put(struct aoedev *d, struct sk_buff *skb)
172 __skb_queue_tail(&d->skbpool, skb);
175 static struct sk_buff *
176 skb_pool_get(struct aoedev *d)
178 struct sk_buff *skb = skb_peek(&d->skbpool);
180 if (skb && atomic_read(&skb_shinfo(skb)->dataref) == 1) {
181 __skb_unlink(skb, &d->skbpool);
184 if (skb_queue_len(&d->skbpool) < NSKBPOOLMAX &&
185 (skb = new_skb(ETH_ZLEN)))
192 aoe_freetframe(struct frame *f)
198 memset(&f->iter, 0, sizeof(f->iter));
201 list_add(&f->head, &t->ffree);
204 static struct frame *
205 newtframe(struct aoedev *d, struct aoetgt *t)
209 struct list_head *pos;
211 if (list_empty(&t->ffree)) {
212 if (t->falloc >= NSKBPOOLMAX*2)
214 f = kcalloc(1, sizeof(*f), GFP_ATOMIC);
222 f = list_entry(pos, struct frame, head);
227 f->skb = skb = new_skb(ETH_ZLEN);
229 bail: aoe_freetframe(f);
234 if (atomic_read(&skb_shinfo(skb)->dataref) != 1) {
235 skb = skb_pool_get(d);
238 skb_pool_put(d, f->skb);
242 skb->truesize -= skb->data_len;
243 skb_shinfo(skb)->nr_frags = skb->data_len = 0;
248 static struct frame *
249 newframe(struct aoedev *d)
252 struct aoetgt *t, **tt;
257 if (!d->targets || !d->targets[0]) {
258 printk(KERN_ERR "aoe: NULL TARGETS!\n");
261 tt = d->tgt; /* last used target */
262 for (use_tainted = 0, has_untainted = 0;;) {
264 if (tt >= &d->targets[d->ntargets] || !*tt)
271 if (t->nout < t->maxout
272 && (use_tainted || !t->taint)
281 if (tt == d->tgt) { /* we've looped and found nada */
282 if (!use_tainted && !has_untainted)
290 d->flags |= DEVFL_KICKME;
296 skb_fillup(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter)
301 __bio_for_each_segment(bv, bio, iter, iter)
302 skb_fill_page_desc(skb, frag++, bv.bv_page,
303 bv.bv_offset, bv.bv_len);
307 fhash(struct frame *f)
309 struct aoedev *d = f->t->d;
312 n = f->tag % NFACTIVE;
313 list_add_tail(&f->head, &d->factive[n]);
317 ata_rw_frameinit(struct frame *f)
321 struct aoe_atahdr *ah;
323 char writebit, extbit;
326 h = (struct aoe_hdr *) skb_mac_header(skb);
327 ah = (struct aoe_atahdr *) (h + 1);
328 skb_put(skb, sizeof(*h) + sizeof(*ah));
329 memset(h, 0, skb->len);
335 f->tag = aoehdr_atainit(t->d, t, h);
341 /* set up ata header */
342 ah->scnt = f->iter.bi_size >> 9;
343 put_lba(ah, f->iter.bi_sector);
344 if (t->d->flags & DEVFL_EXT) {
345 ah->aflags |= AOEAFL_EXT;
349 ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */
351 if (f->buf && bio_data_dir(f->buf->bio) == WRITE) {
352 skb_fillup(skb, f->buf->bio, f->iter);
353 ah->aflags |= AOEAFL_WRITE;
354 skb->len += f->iter.bi_size;
355 skb->data_len = f->iter.bi_size;
356 skb->truesize += f->iter.bi_size;
363 ah->cmdstat = ATA_CMD_PIO_READ | writebit | extbit;
364 skb->dev = t->ifp->nd;
368 aoecmd_ata_rw(struct aoedev *d)
373 struct sk_buff_head queue;
382 /* initialize the headers & frame */
385 f->iter.bi_size = min_t(unsigned long,
386 d->maxbcnt ?: DEFAULTBCNT,
388 bio_advance_iter(buf->bio, &buf->iter, f->iter.bi_size);
390 if (!buf->iter.bi_size)
393 /* mark all tracking fields and load out */
394 buf->nframesout += 1;
398 skb = skb_clone(f->skb, GFP_ATOMIC);
400 f->sent = ktime_get();
401 __skb_queue_head_init(&queue);
402 __skb_queue_tail(&queue, skb);
408 /* some callers cannot sleep, and they can call this function,
409 * transmitting the packets later, when interrupts are on
412 aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *queue)
415 struct aoe_cfghdr *ch;
417 struct net_device *ifp;
420 for_each_netdev_rcu(&init_net, ifp) {
422 if (!is_aoe_netif(ifp))
425 skb = new_skb(sizeof *h + sizeof *ch);
427 printk(KERN_INFO "aoe: skb alloc failure\n");
430 skb_put(skb, sizeof *h + sizeof *ch);
432 __skb_queue_tail(queue, skb);
433 h = (struct aoe_hdr *) skb_mac_header(skb);
434 memset(h, 0, sizeof *h + sizeof *ch);
436 memset(h->dst, 0xff, sizeof h->dst);
437 memcpy(h->src, ifp->dev_addr, sizeof h->src);
438 h->type = __constant_cpu_to_be16(ETH_P_AOE);
440 h->major = cpu_to_be16(aoemajor);
451 resend(struct aoedev *d, struct frame *f)
454 struct sk_buff_head queue;
463 if (ifrotate(t) == NULL) {
464 /* probably can't happen, but set it up to fail anyway */
465 pr_info("aoe: resend: no interfaces to rotate to.\n");
469 h = (struct aoe_hdr *) skb_mac_header(skb);
471 if (!(f->flags & FFL_PROBE)) {
472 snprintf(buf, sizeof(buf),
473 "%15s e%ld.%d oldtag=%08x@%08lx newtag=%08x s=%pm d=%pm nout=%d\n",
474 "retransmit", d->aoemajor, d->aoeminor,
476 h->src, h->dst, t->nout);
482 h->tag = cpu_to_be32(n);
483 memcpy(h->dst, t->addr, sizeof h->dst);
484 memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
486 skb->dev = t->ifp->nd;
487 skb = skb_clone(skb, GFP_ATOMIC);
490 f->sent = ktime_get();
491 __skb_queue_head_init(&queue);
492 __skb_queue_tail(&queue, skb);
497 tsince_hr(struct frame *f)
499 u64 delta = ktime_to_ns(ktime_sub(ktime_get(), f->sent));
501 /* delta is normally under 4.2 seconds, avoid 64-bit division */
502 if (likely(delta <= UINT_MAX))
503 return (u32)delta / NSEC_PER_USEC;
505 /* avoid overflow after 71 minutes */
506 if (delta > ((u64)INT_MAX * NSEC_PER_USEC))
509 return div_u64(delta, NSEC_PER_USEC);
517 n = jiffies & 0xffff;
521 return jiffies_to_usecs(n + 1);
524 static struct aoeif *
525 getif(struct aoetgt *t, struct net_device *nd)
538 ejectif(struct aoetgt *t, struct aoeif *ifp)
541 struct net_device *nd;
545 e = t->ifs + NAOEIFS - 1;
546 n = (e - ifp) * sizeof *ifp;
547 memmove(ifp, ifp+1, n);
552 static struct frame *
553 reassign_frame(struct frame *f)
558 nf = newframe(f->t->d);
571 nf->waited_total = f->waited_total;
579 probe(struct aoetgt *t)
584 struct sk_buff_head queue;
591 pr_err("%s %pm for e%ld.%d: %s\n",
592 "aoe: cannot probe remote address",
594 (long) d->aoemajor, d->aoeminor,
595 "no frame available");
598 f->flags |= FFL_PROBE;
600 f->iter.bi_size = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT;
603 for (frag = 0, n = f->iter.bi_size; n > 0; ++frag, n -= m) {
608 skb_fill_page_desc(skb, frag, empty_page, 0, m);
610 skb->len += f->iter.bi_size;
611 skb->data_len = f->iter.bi_size;
612 skb->truesize += f->iter.bi_size;
614 skb = skb_clone(f->skb, GFP_ATOMIC);
616 f->sent = ktime_get();
617 __skb_queue_head_init(&queue);
618 __skb_queue_tail(&queue, skb);
624 rto(struct aoedev *d)
628 t = 2 * d->rttavg >> RTTSCALE;
629 t += 8 * d->rttdev >> RTTDSCALE;
637 rexmit_deferred(struct aoedev *d)
642 struct list_head *pos, *nx, *head;
646 count_targets(d, &untainted);
649 list_for_each_safe(pos, nx, head) {
650 f = list_entry(pos, struct frame, head);
653 if (!(f->flags & FFL_PROBE)) {
654 nf = reassign_frame(f);
656 if (t->nout_probes == 0
661 list_replace(&f->head, &nf->head);
667 } else if (untainted < 1) {
668 /* don't probe w/o other untainted aoetgts */
670 } else if (tsince_hr(f) < t->taint * rto(d)) {
671 /* reprobe slowly when taint is high */
674 } else if (f->flags & FFL_PROBE) {
675 stop_probe: /* don't probe untainted aoetgts */
678 /* leaving d->kicked, because this is routine */
679 f->t->d->flags |= DEVFL_KICKME;
682 if (t->nout >= t->maxout)
686 if (f->flags & FFL_PROBE)
688 since = tsince_hr(f);
690 f->waited_total += since;
695 /* An aoetgt accumulates demerits quickly, and successful
696 * probing redeems the aoetgt slowly.
699 scorn(struct aoetgt *t)
704 t->taint += t->taint * 2;
707 if (t->taint > MAX_TAINT)
708 t->taint = MAX_TAINT;
712 count_targets(struct aoedev *d, int *untainted)
716 for (i = good = 0; i < d->ntargets && d->targets[i]; ++i)
717 if (d->targets[i]->taint == 0)
726 rexmit_timer(struct timer_list *timer)
732 struct list_head *head, *pos, *nx;
734 register long timeout;
737 int utgts; /* number of aoetgt descriptors (not slots) */
740 d = from_timer(d, timer, timer);
742 spin_lock_irqsave(&d->lock, flags);
744 /* timeout based on observed timings and variations */
747 utgts = count_targets(d, NULL);
749 if (d->flags & DEVFL_TKILL) {
750 spin_unlock_irqrestore(&d->lock, flags);
754 /* collect all frames to rexmit into flist */
755 for (i = 0; i < NFACTIVE; i++) {
756 head = &d->factive[i];
757 list_for_each_safe(pos, nx, head) {
758 f = list_entry(pos, struct frame, head);
759 if (tsince_hr(f) < timeout)
760 break; /* end of expired frames */
761 /* move to flist for later processing */
762 list_move_tail(pos, &flist);
766 /* process expired frames */
767 while (!list_empty(&flist)) {
769 f = list_entry(pos, struct frame, head);
770 since = tsince_hr(f);
771 n = f->waited_total + since;
775 && !(f->flags & FFL_PROBE)) {
776 /* Waited too long. Device failure.
777 * Hang all frames on first hash bucket for downdev
780 list_splice(&flist, &d->factive[0]);
786 n = f->waited + since;
788 if (aoe_deadsecs && utgts > 0
789 && (n > aoe_deadsecs / utgts || n > HARD_SCORN_SECS))
790 scorn(t); /* avoid this target */
792 if (t->maxout != 1) {
793 t->ssthresh = t->maxout / 2;
797 if (f->flags & FFL_PROBE) {
800 ifp = getif(t, f->skb->dev);
801 if (ifp && ++ifp->lost > (t->nframes << 1)
802 && (ifp != t->ifs || t->ifs[1].nd)) {
807 list_move_tail(pos, &d->rexmitq);
813 if ((d->flags & DEVFL_KICKME) && d->blkq) {
814 d->flags &= ~DEVFL_KICKME;
815 blk_mq_run_hw_queues(d->blkq, true);
818 d->timer.expires = jiffies + TIMERTICK;
819 add_timer(&d->timer);
821 spin_unlock_irqrestore(&d->lock, flags);
825 bufinit(struct buf *buf, struct request *rq, struct bio *bio)
827 memset(buf, 0, sizeof(*buf));
830 buf->iter = bio->bi_iter;
834 nextbuf(struct aoedev *d)
837 struct request_queue *q;
844 return NULL; /* initializing */
849 rq = list_first_entry_or_null(&d->rq_list, struct request,
853 list_del_init(&rq->queuelist);
854 blk_mq_start_request(rq);
856 d->ip.nxbio = rq->bio;
858 req = blk_mq_rq_to_pdu(rq);
860 __rq_for_each_bio(bio, rq)
863 buf = mempool_alloc(d->bufpool, GFP_ATOMIC);
865 pr_err("aoe: nextbuf: unable to mempool_alloc!\n");
869 bufinit(buf, rq, bio);
874 return d->ip.buf = buf;
877 /* enters with d->lock held */
879 aoecmd_work(struct aoedev *d)
882 while (aoecmd_ata_rw(d))
886 /* this function performs work that has been deferred until sleeping is OK
889 aoecmd_sleepwork(struct work_struct *work)
891 struct aoedev *d = container_of(work, struct aoedev, work);
893 if (d->flags & DEVFL_GDALLOC)
896 if (d->flags & DEVFL_NEWSIZE) {
897 set_capacity_and_notify(d->gd, d->ssize);
899 spin_lock_irq(&d->lock);
900 d->flags |= DEVFL_UP;
901 d->flags &= ~DEVFL_NEWSIZE;
902 spin_unlock_irq(&d->lock);
907 ata_ident_fixstring(u16 *id, int ns)
913 *id++ = s >> 8 | s << 8;
918 ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
923 /* word 83: command set supported */
924 n = get_unaligned_le16(&id[83 << 1]);
926 /* word 86: command set/feature enabled */
927 n |= get_unaligned_le16(&id[86 << 1]);
929 if (n & (1<<10)) { /* bit 10: LBA 48 */
930 d->flags |= DEVFL_EXT;
932 /* word 100: number lba48 sectors */
933 ssize = get_unaligned_le64(&id[100 << 1]);
935 /* set as in ide-disk.c:init_idedisk_capacity */
936 d->geo.cylinders = ssize;
937 d->geo.cylinders /= (255 * 63);
941 d->flags &= ~DEVFL_EXT;
943 /* number lba28 sectors */
944 ssize = get_unaligned_le32(&id[60 << 1]);
946 /* NOTE: obsolete in ATA 6 */
947 d->geo.cylinders = get_unaligned_le16(&id[54 << 1]);
948 d->geo.heads = get_unaligned_le16(&id[55 << 1]);
949 d->geo.sectors = get_unaligned_le16(&id[56 << 1]);
952 ata_ident_fixstring((u16 *) &id[10<<1], 10); /* serial */
953 ata_ident_fixstring((u16 *) &id[23<<1], 4); /* firmware */
954 ata_ident_fixstring((u16 *) &id[27<<1], 20); /* model */
955 memcpy(d->ident, id, sizeof(d->ident));
957 if (d->ssize != ssize)
959 "aoe: %pm e%ld.%d v%04x has %llu sectors\n",
961 d->aoemajor, d->aoeminor,
962 d->fw_ver, (long long)ssize);
965 if (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE))
968 d->flags |= DEVFL_NEWSIZE;
970 d->flags |= DEVFL_GDALLOC;
971 queue_work(aoe_wq, &d->work);
975 calc_rttavg(struct aoedev *d, struct aoetgt *t, int rtt)
981 /* cf. Congestion Avoidance and Control, Jacobson & Karels, 1988 */
982 n -= d->rttavg >> RTTSCALE;
986 n -= d->rttdev >> RTTDSCALE;
989 if (!t || t->maxout >= t->nframes)
991 if (t->maxout < t->ssthresh)
993 else if (t->nout == t->maxout && t->next_cwnd-- == 0) {
995 t->next_cwnd = t->maxout;
999 static struct aoetgt *
1000 gettgt(struct aoedev *d, char *addr)
1002 struct aoetgt **t, **e;
1005 e = t + d->ntargets;
1006 for (; t < e && *t; t++)
1007 if (memcmp((*t)->addr, addr, sizeof((*t)->addr)) == 0)
1013 bvcpy(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter, long cnt)
1020 __bio_for_each_segment(bv, bio, iter, iter) {
1021 char *p = bvec_kmap_local(&bv);
1022 skb_copy_bits(skb, soff, p, bv.bv_len);
1029 aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
1033 struct request_queue *q;
1034 blk_status_t err = BLK_STS_OK;
1041 bok = !fastfail && !bio->bi_status;
1043 err = BLK_STS_IOERR;
1044 } while (blk_update_request(rq, bok ? BLK_STS_OK : BLK_STS_IOERR, bio->bi_iter.bi_size));
1046 __blk_mq_end_request(rq, err);
1048 /* cf. https://lore.kernel.org/lkml/20061031071040.GS14055@kernel.dk/ */
1050 blk_mq_run_hw_queues(q, true);
1054 aoe_end_buf(struct aoedev *d, struct buf *buf)
1056 struct request *rq = buf->rq;
1057 struct aoe_req *req = blk_mq_rq_to_pdu(rq);
1059 if (buf == d->ip.buf)
1061 mempool_free(buf, d->bufpool);
1062 if (--req->nr_bios == 0)
1063 aoe_end_request(d, rq, 0);
1067 ktiocomplete(struct frame *f)
1069 struct aoe_hdr *hin, *hout;
1070 struct aoe_atahdr *ahin, *ahout;
1072 struct sk_buff *skb;
1086 if (f->flags & FFL_PROBE)
1088 if (!skb) /* just fail the buf. */
1091 hout = (struct aoe_hdr *) skb_mac_header(f->skb);
1092 ahout = (struct aoe_atahdr *) (hout+1);
1094 hin = (struct aoe_hdr *) skb->data;
1095 skb_pull(skb, sizeof(*hin));
1096 ahin = (struct aoe_atahdr *) skb->data;
1097 skb_pull(skb, sizeof(*ahin));
1098 if (ahin->cmdstat & 0xa9) { /* these bits cleared on success */
1099 pr_err("aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%d\n",
1100 ahout->cmdstat, ahin->cmdstat,
1101 d->aoemajor, d->aoeminor);
1103 buf->bio->bi_status = BLK_STS_IOERR;
1107 n = ahout->scnt << 9;
1108 switch (ahout->cmdstat) {
1109 case ATA_CMD_PIO_READ:
1110 case ATA_CMD_PIO_READ_EXT:
1112 pr_err("%s e%ld.%d. skb->len=%d need=%ld\n",
1113 "aoe: runt data size in read from",
1114 (long) d->aoemajor, d->aoeminor,
1116 buf->bio->bi_status = BLK_STS_IOERR;
1119 if (n > f->iter.bi_size) {
1120 pr_err_ratelimited("%s e%ld.%d. bytes=%ld need=%u\n",
1121 "aoe: too-large data size in read from",
1122 (long) d->aoemajor, d->aoeminor,
1123 n, f->iter.bi_size);
1124 buf->bio->bi_status = BLK_STS_IOERR;
1127 bvcpy(skb, f->buf->bio, f->iter, n);
1129 case ATA_CMD_PIO_WRITE:
1130 case ATA_CMD_PIO_WRITE_EXT:
1131 spin_lock_irq(&d->lock);
1132 ifp = getif(t, skb->dev);
1135 spin_unlock_irq(&d->lock);
1137 case ATA_CMD_ID_ATA:
1138 if (skb->len < 512) {
1139 pr_info("%s e%ld.%d. skb->len=%d need=512\n",
1140 "aoe: runt data size in ataid from",
1141 (long) d->aoemajor, d->aoeminor,
1145 if (skb_linearize(skb))
1147 spin_lock_irq(&d->lock);
1148 ataid_complete(d, t, skb->data);
1149 spin_unlock_irq(&d->lock);
1152 pr_info("aoe: unrecognized ata command %2.2Xh for %d.%d\n",
1154 be16_to_cpu(get_unaligned(&hin->major)),
1158 spin_lock_irq(&d->lock);
1161 && t->nout_probes == 0) {
1162 count_targets(d, &untainted);
1163 if (untainted > 0) {
1171 if (buf && --buf->nframesout == 0 && buf->iter.bi_size == 0)
1172 aoe_end_buf(d, buf);
1174 spin_unlock_irq(&d->lock);
1179 /* Enters with iocq.lock held.
1180 * Returns true iff responses needing processing remain.
1186 struct list_head *pos;
1190 for (i = 0; ; ++i) {
1193 if (list_empty(&iocq[id].head))
1195 pos = iocq[id].head.next;
1197 f = list_entry(pos, struct frame, head);
1198 spin_unlock_irq(&iocq[id].lock);
1201 /* Figure out if extra threads are required. */
1202 actual_id = f->t->d->aoeminor % ncpus;
1204 if (!kts[actual_id].active) {
1206 mutex_lock(&ktio_spawn_lock);
1207 if (!kts[actual_id].active
1208 && aoe_ktstart(&kts[actual_id]) == 0)
1209 kts[actual_id].active = 1;
1210 mutex_unlock(&ktio_spawn_lock);
1212 spin_lock_irq(&iocq[id].lock);
1220 DECLARE_WAITQUEUE(wait, current);
1224 current->flags |= PF_NOFREEZE;
1225 set_user_nice(current, -10);
1226 complete(&k->rendez); /* tell spawner we're running */
1228 spin_lock_irq(k->lock);
1229 more = k->fn(k->id);
1231 add_wait_queue(k->waitq, &wait);
1232 __set_current_state(TASK_INTERRUPTIBLE);
1234 spin_unlock_irq(k->lock);
1237 remove_wait_queue(k->waitq, &wait);
1240 } while (!kthread_should_stop());
1241 complete(&k->rendez); /* tell spawner we're stopping */
1246 aoe_ktstop(struct ktstate *k)
1248 kthread_stop(k->task);
1249 wait_for_completion(&k->rendez);
1253 aoe_ktstart(struct ktstate *k)
1255 struct task_struct *task;
1257 init_completion(&k->rendez);
1258 task = kthread_run(kthread, k, "%s", k->name);
1259 if (task == NULL || IS_ERR(task))
1262 wait_for_completion(&k->rendez); /* allow kthread to start */
1263 init_completion(&k->rendez); /* for waiting for exit later */
1267 /* pass it off to kthreads for processing */
1269 ktcomplete(struct frame *f, struct sk_buff *skb)
1275 id = f->t->d->aoeminor % ncpus;
1276 spin_lock_irqsave(&iocq[id].lock, flags);
1277 if (!kts[id].active) {
1278 spin_unlock_irqrestore(&iocq[id].lock, flags);
1279 /* The thread with id has not been spawned yet,
1280 * so delegate the work to the main thread and
1281 * try spawning a new thread.
1284 spin_lock_irqsave(&iocq[id].lock, flags);
1286 list_add_tail(&f->head, &iocq[id].head);
1287 spin_unlock_irqrestore(&iocq[id].lock, flags);
1288 wake_up(&ktiowq[id]);
1292 aoecmd_ata_rsp(struct sk_buff *skb)
1302 h = (struct aoe_hdr *) skb->data;
1303 aoemajor = be16_to_cpu(get_unaligned(&h->major));
1304 d = aoedev_by_aoeaddr(aoemajor, h->minor, 0);
1306 snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response "
1307 "for unknown device %d.%d\n",
1308 aoemajor, h->minor);
1313 spin_lock_irqsave(&d->lock, flags);
1315 n = be32_to_cpu(get_unaligned(&h->tag));
1318 calc_rttavg(d, f->t, tsince_hr(f));
1320 if (f->flags & FFL_PROBE)
1321 f->t->nout_probes--;
1323 f = getframe_deferred(d, n);
1325 calc_rttavg(d, NULL, tsince_hr(f));
1327 calc_rttavg(d, NULL, tsince(n));
1328 spin_unlock_irqrestore(&d->lock, flags);
1330 snprintf(ebuf, sizeof(ebuf),
1331 "%15s e%d.%d tag=%08x@%08lx s=%pm d=%pm\n",
1333 get_unaligned_be16(&h->major),
1335 get_unaligned_be32(&h->tag),
1345 spin_unlock_irqrestore(&d->lock, flags);
1350 * Note here that we do not perform an aoedev_put, as we are
1351 * leaving this reference for the ktio to release.
1357 aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
1359 struct sk_buff_head queue;
1361 __skb_queue_head_init(&queue);
1362 aoecmd_cfg_pkts(aoemajor, aoeminor, &queue);
1363 aoenet_xmit(&queue);
1367 aoecmd_ata_id(struct aoedev *d)
1370 struct aoe_atahdr *ah;
1372 struct sk_buff *skb;
1381 /* initialize the headers & frame */
1383 h = (struct aoe_hdr *) skb_mac_header(skb);
1384 ah = (struct aoe_atahdr *) (h+1);
1385 skb_put(skb, sizeof *h + sizeof *ah);
1386 memset(h, 0, skb->len);
1387 f->tag = aoehdr_atainit(d, t, h);
1391 f->waited_total = 0;
1393 /* set up ata header */
1395 ah->cmdstat = ATA_CMD_ID_ATA;
1398 skb->dev = t->ifp->nd;
1400 d->rttavg = RTTAVG_INIT;
1401 d->rttdev = RTTDEV_INIT;
1402 d->timer.function = rexmit_timer;
1404 skb = skb_clone(skb, GFP_ATOMIC);
1406 f->sent = ktime_get();
1411 static struct aoetgt **
1412 grow_targets(struct aoedev *d)
1419 tt = kcalloc(newn, sizeof(*d->targets), GFP_ATOMIC);
1422 memmove(tt, d->targets, sizeof(*d->targets) * oldn);
1423 d->tgt = tt + (d->tgt - d->targets);
1428 return &d->targets[oldn];
1431 static struct aoetgt *
1432 addtgt(struct aoedev *d, char *addr, ulong nframes)
1434 struct aoetgt *t, **tt, **te;
1437 te = tt + d->ntargets;
1438 for (; tt < te && *tt; tt++)
1442 tt = grow_targets(d);
1446 t = kzalloc(sizeof(*t), GFP_ATOMIC);
1449 t->nframes = nframes;
1451 memcpy(t->addr, addr, sizeof t->addr);
1454 t->maxout = t->nframes / 2;
1455 INIT_LIST_HEAD(&t->ffree);
1459 pr_info("aoe: cannot allocate memory to add target\n");
1464 setdbcnt(struct aoedev *d)
1466 struct aoetgt **t, **e;
1470 e = t + d->ntargets;
1471 for (; t < e && *t; t++)
1472 if (bcnt == 0 || bcnt > (*t)->minbcnt)
1473 bcnt = (*t)->minbcnt;
1474 if (bcnt != d->maxbcnt) {
1476 pr_info("aoe: e%ld.%d: setting %d byte data frames\n",
1477 d->aoemajor, d->aoeminor, bcnt);
1482 setifbcnt(struct aoetgt *t, struct net_device *nd, int bcnt)
1485 struct aoeif *p, *e;
1492 for (; p < e; p++) {
1494 break; /* end of the valid interfaces */
1496 p->bcnt = bcnt; /* we're updating */
1498 } else if (minbcnt > p->bcnt)
1499 minbcnt = p->bcnt; /* find the min interface */
1503 pr_err("aoe: device setifbcnt failure; too many interfaces.\n");
1510 t->minbcnt = minbcnt;
1515 aoecmd_cfg_rsp(struct sk_buff *skb)
1519 struct aoe_cfghdr *ch;
1521 ulong flags, aoemajor;
1523 struct sk_buff_head queue;
1527 h = (struct aoe_hdr *) skb_mac_header(skb);
1528 ch = (struct aoe_cfghdr *) (h+1);
1531 * Enough people have their dip switches set backwards to
1532 * warrant a loud message for this special case.
1534 aoemajor = get_unaligned_be16(&h->major);
1535 if (aoemajor == 0xfff) {
1536 printk(KERN_ERR "aoe: Warning: shelf address is all ones. "
1537 "Check shelf dip switches.\n");
1540 if (aoemajor == 0xffff) {
1541 pr_info("aoe: e%ld.%d: broadcast shelf number invalid\n",
1542 aoemajor, (int) h->minor);
1545 if (h->minor == 0xff) {
1546 pr_info("aoe: e%ld.%d: broadcast slot number invalid\n",
1547 aoemajor, (int) h->minor);
1551 n = be16_to_cpu(ch->bufcnt);
1552 if (n > aoe_maxout) /* keep it reasonable */
1555 d = aoedev_by_aoeaddr(aoemajor, h->minor, 1);
1557 pr_info("aoe: device allocation failure\n");
1561 spin_lock_irqsave(&d->lock, flags);
1563 t = gettgt(d, h->src);
1569 t = addtgt(d, h->src, n);
1574 n -= sizeof(struct aoe_hdr) + sizeof(struct aoe_atahdr);
1578 n = n ? n * 512 : DEFAULTBCNT;
1579 setifbcnt(t, skb->dev, n);
1581 /* don't change users' perspective */
1582 if (d->nopen == 0) {
1583 d->fw_ver = be16_to_cpu(ch->fwver);
1584 sl = aoecmd_ata_id(d);
1587 spin_unlock_irqrestore(&d->lock, flags);
1590 __skb_queue_head_init(&queue);
1591 __skb_queue_tail(&queue, sl);
1592 aoenet_xmit(&queue);
1597 aoecmd_wreset(struct aoetgt *t)
1600 t->ssthresh = t->nframes / 2;
1601 t->next_cwnd = t->nframes;
1605 aoecmd_cleanslate(struct aoedev *d)
1607 struct aoetgt **t, **te;
1609 d->rttavg = RTTAVG_INIT;
1610 d->rttdev = RTTDEV_INIT;
1614 te = t + d->ntargets;
1615 for (; t < te && *t; t++)
1620 aoe_failbuf(struct aoedev *d, struct buf *buf)
1624 buf->iter.bi_size = 0;
1625 buf->bio->bi_status = BLK_STS_IOERR;
1626 if (buf->nframesout == 0)
1627 aoe_end_buf(d, buf);
1631 aoe_flush_iocq(void)
1635 for (i = 0; i < ncpus; i++) {
1637 aoe_flush_iocq_by_index(i);
1642 aoe_flush_iocq_by_index(int id)
1647 struct list_head *pos;
1648 struct sk_buff *skb;
1651 spin_lock_irqsave(&iocq[id].lock, flags);
1652 list_splice_init(&iocq[id].head, &flist);
1653 spin_unlock_irqrestore(&iocq[id].lock, flags);
1654 while (!list_empty(&flist)) {
1657 f = list_entry(pos, struct frame, head);
1660 spin_lock_irqsave(&d->lock, flags);
1662 f->buf->nframesout--;
1663 aoe_failbuf(d, f->buf);
1666 spin_unlock_irqrestore(&d->lock, flags);
1679 /* get_zeroed_page returns page with ref count 1 */
1680 p = (void *) get_zeroed_page(GFP_KERNEL);
1683 empty_page = virt_to_page(p);
1685 ncpus = num_online_cpus();
1687 iocq = kcalloc(ncpus, sizeof(struct iocq_ktio), GFP_KERNEL);
1691 kts = kcalloc(ncpus, sizeof(struct ktstate), GFP_KERNEL);
1697 ktiowq = kcalloc(ncpus, sizeof(wait_queue_head_t), GFP_KERNEL);
1703 for (i = 0; i < ncpus; i++) {
1704 INIT_LIST_HEAD(&iocq[i].head);
1705 spin_lock_init(&iocq[i].lock);
1706 init_waitqueue_head(&ktiowq[i]);
1707 snprintf(kts[i].name, sizeof(kts[i].name), "aoe_ktio%d", i);
1709 kts[i].waitq = &ktiowq[i];
1710 kts[i].lock = &iocq[i].lock;
1715 if (aoe_ktstart(&kts[0])) {
1736 for (i = 0; i < ncpus; i++)
1738 aoe_ktstop(&kts[i]);
1742 /* Free up the iocq and thread speicific configuration
1743 * allocated during startup.
1749 free_page((unsigned long) page_address(empty_page));