GNU Linux-libre 4.9.318-gnu1
[releases.git] / drivers / ntb / hw / intel / ntb_hw_intel.c
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  *   redistributing this file, you may do so under either license.
4  *
5  *   GPL LICENSE SUMMARY
6  *
7  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
8  *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
9  *
10  *   This program is free software; you can redistribute it and/or modify
11  *   it under the terms of version 2 of the GNU General Public License as
12  *   published by the Free Software Foundation.
13  *
14  *   BSD LICENSE
15  *
16  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
17  *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
18  *
19  *   Redistribution and use in source and binary forms, with or without
20  *   modification, are permitted provided that the following conditions
21  *   are met:
22  *
23  *     * Redistributions of source code must retain the above copyright
24  *       notice, this list of conditions and the following disclaimer.
25  *     * Redistributions in binary form must reproduce the above copy
26  *       notice, this list of conditions and the following disclaimer in
27  *       the documentation and/or other materials provided with the
28  *       distribution.
29  *     * Neither the name of Intel Corporation nor the names of its
30  *       contributors may be used to endorse or promote products derived
31  *       from this software without specific prior written permission.
32  *
33  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
36  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
39  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
40  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
41  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
42  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44  *
45  * Intel PCIe NTB Linux driver
46  *
47  * Contact Information:
48  * Jon Mason <jon.mason@intel.com>
49  */
50
51 #include <linux/debugfs.h>
52 #include <linux/delay.h>
53 #include <linux/init.h>
54 #include <linux/interrupt.h>
55 #include <linux/module.h>
56 #include <linux/pci.h>
57 #include <linux/random.h>
58 #include <linux/slab.h>
59 #include <linux/ntb.h>
60
61 #include "ntb_hw_intel.h"
62
63 #define NTB_NAME        "ntb_hw_intel"
64 #define NTB_DESC        "Intel(R) PCI-E Non-Transparent Bridge Driver"
65 #define NTB_VER         "2.0"
66
67 MODULE_DESCRIPTION(NTB_DESC);
68 MODULE_VERSION(NTB_VER);
69 MODULE_LICENSE("Dual BSD/GPL");
70 MODULE_AUTHOR("Intel Corporation");
71
72 #define bar0_off(base, bar) ((base) + ((bar) << 2))
73 #define bar2_off(base, bar) bar0_off(base, (bar) - 2)
74
75 static const struct intel_ntb_reg atom_reg;
76 static const struct intel_ntb_alt_reg atom_pri_reg;
77 static const struct intel_ntb_alt_reg atom_sec_reg;
78 static const struct intel_ntb_alt_reg atom_b2b_reg;
79 static const struct intel_ntb_xlat_reg atom_pri_xlat;
80 static const struct intel_ntb_xlat_reg atom_sec_xlat;
81 static const struct intel_ntb_reg xeon_reg;
82 static const struct intel_ntb_alt_reg xeon_pri_reg;
83 static const struct intel_ntb_alt_reg xeon_sec_reg;
84 static const struct intel_ntb_alt_reg xeon_b2b_reg;
85 static const struct intel_ntb_xlat_reg xeon_pri_xlat;
86 static const struct intel_ntb_xlat_reg xeon_sec_xlat;
87 static struct intel_b2b_addr xeon_b2b_usd_addr;
88 static struct intel_b2b_addr xeon_b2b_dsd_addr;
89 static const struct ntb_dev_ops intel_ntb_ops;
90
91 static const struct file_operations intel_ntb_debugfs_info;
92 static struct dentry *debugfs_dir;
93
94 static int b2b_mw_idx = -1;
95 module_param(b2b_mw_idx, int, 0644);
96 MODULE_PARM_DESC(b2b_mw_idx, "Use this mw idx to access the peer ntb.  A "
97                  "value of zero or positive starts from first mw idx, and a "
98                  "negative value starts from last mw idx.  Both sides MUST "
99                  "set the same value here!");
100
101 static unsigned int b2b_mw_share;
102 module_param(b2b_mw_share, uint, 0644);
103 MODULE_PARM_DESC(b2b_mw_share, "If the b2b mw is large enough, configure the "
104                  "ntb so that the peer ntb only occupies the first half of "
105                  "the mw, so the second half can still be used as a mw.  Both "
106                  "sides MUST set the same value here!");
107
108 module_param_named(xeon_b2b_usd_bar2_addr64,
109                    xeon_b2b_usd_addr.bar2_addr64, ullong, 0644);
110 MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
111                  "XEON B2B USD BAR 2 64-bit address");
112
113 module_param_named(xeon_b2b_usd_bar4_addr64,
114                    xeon_b2b_usd_addr.bar4_addr64, ullong, 0644);
115 MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr64,
116                  "XEON B2B USD BAR 4 64-bit address");
117
118 module_param_named(xeon_b2b_usd_bar4_addr32,
119                    xeon_b2b_usd_addr.bar4_addr32, ullong, 0644);
120 MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr32,
121                  "XEON B2B USD split-BAR 4 32-bit address");
122
123 module_param_named(xeon_b2b_usd_bar5_addr32,
124                    xeon_b2b_usd_addr.bar5_addr32, ullong, 0644);
125 MODULE_PARM_DESC(xeon_b2b_usd_bar5_addr32,
126                  "XEON B2B USD split-BAR 5 32-bit address");
127
128 module_param_named(xeon_b2b_dsd_bar2_addr64,
129                    xeon_b2b_dsd_addr.bar2_addr64, ullong, 0644);
130 MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
131                  "XEON B2B DSD BAR 2 64-bit address");
132
133 module_param_named(xeon_b2b_dsd_bar4_addr64,
134                    xeon_b2b_dsd_addr.bar4_addr64, ullong, 0644);
135 MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr64,
136                  "XEON B2B DSD BAR 4 64-bit address");
137
138 module_param_named(xeon_b2b_dsd_bar4_addr32,
139                    xeon_b2b_dsd_addr.bar4_addr32, ullong, 0644);
140 MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr32,
141                  "XEON B2B DSD split-BAR 4 32-bit address");
142
143 module_param_named(xeon_b2b_dsd_bar5_addr32,
144                    xeon_b2b_dsd_addr.bar5_addr32, ullong, 0644);
145 MODULE_PARM_DESC(xeon_b2b_dsd_bar5_addr32,
146                  "XEON B2B DSD split-BAR 5 32-bit address");
147
148 #ifndef ioread64
149 #ifdef readq
150 #define ioread64 readq
151 #else
152 #define ioread64 _ioread64
153 static inline u64 _ioread64(void __iomem *mmio)
154 {
155         u64 low, high;
156
157         low = ioread32(mmio);
158         high = ioread32(mmio + sizeof(u32));
159         return low | (high << 32);
160 }
161 #endif
162 #endif
163
164 #ifndef iowrite64
165 #ifdef writeq
166 #define iowrite64 writeq
167 #else
168 #define iowrite64 _iowrite64
169 static inline void _iowrite64(u64 val, void __iomem *mmio)
170 {
171         iowrite32(val, mmio);
172         iowrite32(val >> 32, mmio + sizeof(u32));
173 }
174 #endif
175 #endif
176
177 static inline int pdev_is_atom(struct pci_dev *pdev)
178 {
179         switch (pdev->device) {
180         case PCI_DEVICE_ID_INTEL_NTB_B2B_BWD:
181                 return 1;
182         }
183         return 0;
184 }
185
186 static inline int pdev_is_xeon(struct pci_dev *pdev)
187 {
188         switch (pdev->device) {
189         case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
190         case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
191         case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
192         case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
193         case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
194         case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
195         case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
196         case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
197         case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
198         case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
199         case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
200         case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
201         case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
202         case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
203         case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
204                 return 1;
205         }
206         return 0;
207 }
208
209 static inline void ndev_reset_unsafe_flags(struct intel_ntb_dev *ndev)
210 {
211         ndev->unsafe_flags = 0;
212         ndev->unsafe_flags_ignore = 0;
213
214         /* Only B2B has a workaround to avoid SDOORBELL */
215         if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP)
216                 if (!ntb_topo_is_b2b(ndev->ntb.topo))
217                         ndev->unsafe_flags |= NTB_UNSAFE_DB;
218
219         /* No low level workaround to avoid SB01BASE */
220         if (ndev->hwerr_flags & NTB_HWERR_SB01BASE_LOCKUP) {
221                 ndev->unsafe_flags |= NTB_UNSAFE_DB;
222                 ndev->unsafe_flags |= NTB_UNSAFE_SPAD;
223         }
224 }
225
226 static inline int ndev_is_unsafe(struct intel_ntb_dev *ndev,
227                                  unsigned long flag)
228 {
229         return !!(flag & ndev->unsafe_flags & ~ndev->unsafe_flags_ignore);
230 }
231
232 static inline int ndev_ignore_unsafe(struct intel_ntb_dev *ndev,
233                                      unsigned long flag)
234 {
235         flag &= ndev->unsafe_flags;
236         ndev->unsafe_flags_ignore |= flag;
237
238         return !!flag;
239 }
240
241 static int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx)
242 {
243         if (idx < 0 || idx >= ndev->mw_count)
244                 return -EINVAL;
245         return ndev->reg->mw_bar[idx];
246 }
247
248 static inline int ndev_db_addr(struct intel_ntb_dev *ndev,
249                                phys_addr_t *db_addr, resource_size_t *db_size,
250                                phys_addr_t reg_addr, unsigned long reg)
251 {
252         if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
253                 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
254
255         if (db_addr) {
256                 *db_addr = reg_addr + reg;
257                 dev_dbg(ndev_dev(ndev), "Peer db addr %llx\n", *db_addr);
258         }
259
260         if (db_size) {
261                 *db_size = ndev->reg->db_size;
262                 dev_dbg(ndev_dev(ndev), "Peer db size %llx\n", *db_size);
263         }
264
265         return 0;
266 }
267
268 static inline u64 ndev_db_read(struct intel_ntb_dev *ndev,
269                                void __iomem *mmio)
270 {
271         if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
272                 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
273
274         return ndev->reg->db_ioread(mmio);
275 }
276
277 static inline int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits,
278                                 void __iomem *mmio)
279 {
280         if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
281                 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
282
283         if (db_bits & ~ndev->db_valid_mask)
284                 return -EINVAL;
285
286         ndev->reg->db_iowrite(db_bits, mmio);
287
288         return 0;
289 }
290
291 static inline int ndev_db_set_mask(struct intel_ntb_dev *ndev, u64 db_bits,
292                                    void __iomem *mmio)
293 {
294         unsigned long irqflags;
295
296         if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
297                 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
298
299         if (db_bits & ~ndev->db_valid_mask)
300                 return -EINVAL;
301
302         spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
303         {
304                 ndev->db_mask |= db_bits;
305                 ndev->reg->db_iowrite(ndev->db_mask, mmio);
306         }
307         spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
308
309         return 0;
310 }
311
312 static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits,
313                                      void __iomem *mmio)
314 {
315         unsigned long irqflags;
316
317         if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
318                 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
319
320         if (db_bits & ~ndev->db_valid_mask)
321                 return -EINVAL;
322
323         spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
324         {
325                 ndev->db_mask &= ~db_bits;
326                 ndev->reg->db_iowrite(ndev->db_mask, mmio);
327         }
328         spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
329
330         return 0;
331 }
332
333 static inline u64 ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector)
334 {
335         u64 shift, mask;
336
337         shift = ndev->db_vec_shift;
338         mask = BIT_ULL(shift) - 1;
339
340         return mask << (shift * db_vector);
341 }
342
343 static inline int ndev_spad_addr(struct intel_ntb_dev *ndev, int idx,
344                                  phys_addr_t *spad_addr, phys_addr_t reg_addr,
345                                  unsigned long reg)
346 {
347         if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
348                 pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
349
350         if (idx < 0 || idx >= ndev->spad_count)
351                 return -EINVAL;
352
353         if (spad_addr) {
354                 *spad_addr = reg_addr + reg + (idx << 2);
355                 dev_dbg(ndev_dev(ndev), "Peer spad addr %llx\n", *spad_addr);
356         }
357
358         return 0;
359 }
360
361 static inline u32 ndev_spad_read(struct intel_ntb_dev *ndev, int idx,
362                                  void __iomem *mmio)
363 {
364         if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
365                 pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
366
367         if (idx < 0 || idx >= ndev->spad_count)
368                 return 0;
369
370         return ioread32(mmio + (idx << 2));
371 }
372
373 static inline int ndev_spad_write(struct intel_ntb_dev *ndev, int idx, u32 val,
374                                   void __iomem *mmio)
375 {
376         if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
377                 pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
378
379         if (idx < 0 || idx >= ndev->spad_count)
380                 return -EINVAL;
381
382         iowrite32(val, mmio + (idx << 2));
383
384         return 0;
385 }
386
387 static irqreturn_t ndev_interrupt(struct intel_ntb_dev *ndev, int vec)
388 {
389         u64 vec_mask;
390
391         vec_mask = ndev_vec_mask(ndev, vec);
392
393         dev_dbg(ndev_dev(ndev), "vec %d vec_mask %llx\n", vec, vec_mask);
394
395         ndev->last_ts = jiffies;
396
397         if (vec_mask & ndev->db_link_mask) {
398                 if (ndev->reg->poll_link(ndev))
399                         ntb_link_event(&ndev->ntb);
400         }
401
402         if (vec_mask & ndev->db_valid_mask)
403                 ntb_db_event(&ndev->ntb, vec);
404
405         return IRQ_HANDLED;
406 }
407
408 static irqreturn_t ndev_vec_isr(int irq, void *dev)
409 {
410         struct intel_ntb_vec *nvec = dev;
411
412         return ndev_interrupt(nvec->ndev, nvec->num);
413 }
414
415 static irqreturn_t ndev_irq_isr(int irq, void *dev)
416 {
417         struct intel_ntb_dev *ndev = dev;
418
419         return ndev_interrupt(ndev, irq - ndev_pdev(ndev)->irq);
420 }
421
422 static int ndev_init_isr(struct intel_ntb_dev *ndev,
423                          int msix_min, int msix_max,
424                          int msix_shift, int total_shift)
425 {
426         struct pci_dev *pdev;
427         int rc, i, msix_count, node;
428
429         pdev = ndev_pdev(ndev);
430
431         node = dev_to_node(&pdev->dev);
432
433         /* Mask all doorbell interrupts */
434         ndev->db_mask = ndev->db_valid_mask;
435         ndev->reg->db_iowrite(ndev->db_mask,
436                               ndev->self_mmio +
437                               ndev->self_reg->db_mask);
438
439         /* Try to set up msix irq */
440
441         ndev->vec = kzalloc_node(msix_max * sizeof(*ndev->vec),
442                                  GFP_KERNEL, node);
443         if (!ndev->vec)
444                 goto err_msix_vec_alloc;
445
446         ndev->msix = kzalloc_node(msix_max * sizeof(*ndev->msix),
447                                   GFP_KERNEL, node);
448         if (!ndev->msix)
449                 goto err_msix_alloc;
450
451         for (i = 0; i < msix_max; ++i)
452                 ndev->msix[i].entry = i;
453
454         msix_count = pci_enable_msix_range(pdev, ndev->msix,
455                                            msix_min, msix_max);
456         if (msix_count < 0)
457                 goto err_msix_enable;
458
459         for (i = 0; i < msix_count; ++i) {
460                 ndev->vec[i].ndev = ndev;
461                 ndev->vec[i].num = i;
462                 rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0,
463                                  "ndev_vec_isr", &ndev->vec[i]);
464                 if (rc)
465                         goto err_msix_request;
466         }
467
468         dev_dbg(ndev_dev(ndev), "Using msix interrupts\n");
469         ndev->db_vec_count = msix_count;
470         ndev->db_vec_shift = msix_shift;
471         return 0;
472
473 err_msix_request:
474         while (i-- > 0)
475                 free_irq(ndev->msix[i].vector, ndev);
476         pci_disable_msix(pdev);
477 err_msix_enable:
478         kfree(ndev->msix);
479 err_msix_alloc:
480         kfree(ndev->vec);
481 err_msix_vec_alloc:
482         ndev->msix = NULL;
483         ndev->vec = NULL;
484
485         /* Try to set up msi irq */
486
487         rc = pci_enable_msi(pdev);
488         if (rc)
489                 goto err_msi_enable;
490
491         rc = request_irq(pdev->irq, ndev_irq_isr, 0,
492                          "ndev_irq_isr", ndev);
493         if (rc)
494                 goto err_msi_request;
495
496         dev_dbg(ndev_dev(ndev), "Using msi interrupts\n");
497         ndev->db_vec_count = 1;
498         ndev->db_vec_shift = total_shift;
499         return 0;
500
501 err_msi_request:
502         pci_disable_msi(pdev);
503 err_msi_enable:
504
505         /* Try to set up intx irq */
506
507         pci_intx(pdev, 1);
508
509         rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED,
510                          "ndev_irq_isr", ndev);
511         if (rc)
512                 goto err_intx_request;
513
514         dev_dbg(ndev_dev(ndev), "Using intx interrupts\n");
515         ndev->db_vec_count = 1;
516         ndev->db_vec_shift = total_shift;
517         return 0;
518
519 err_intx_request:
520         return rc;
521 }
522
523 static void ndev_deinit_isr(struct intel_ntb_dev *ndev)
524 {
525         struct pci_dev *pdev;
526         int i;
527
528         pdev = ndev_pdev(ndev);
529
530         /* Mask all doorbell interrupts */
531         ndev->db_mask = ndev->db_valid_mask;
532         ndev->reg->db_iowrite(ndev->db_mask,
533                               ndev->self_mmio +
534                               ndev->self_reg->db_mask);
535
536         if (ndev->msix) {
537                 i = ndev->db_vec_count;
538                 while (i--)
539                         free_irq(ndev->msix[i].vector, &ndev->vec[i]);
540                 pci_disable_msix(pdev);
541                 kfree(ndev->msix);
542                 kfree(ndev->vec);
543         } else {
544                 free_irq(pdev->irq, ndev);
545                 if (pci_dev_msi_enabled(pdev))
546                         pci_disable_msi(pdev);
547         }
548 }
549
550 static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
551                                  size_t count, loff_t *offp)
552 {
553         struct intel_ntb_dev *ndev;
554         struct pci_dev *pdev;
555         void __iomem *mmio;
556         char *buf;
557         size_t buf_size;
558         ssize_t ret, off;
559         union { u64 v64; u32 v32; u16 v16; u8 v8; } u;
560
561         ndev = filp->private_data;
562         pdev = ndev_pdev(ndev);
563         mmio = ndev->self_mmio;
564
565         buf_size = min(count, 0x800ul);
566
567         buf = kmalloc(buf_size, GFP_KERNEL);
568         if (!buf)
569                 return -ENOMEM;
570
571         off = 0;
572
573         off += scnprintf(buf + off, buf_size - off,
574                          "NTB Device Information:\n");
575
576         off += scnprintf(buf + off, buf_size - off,
577                          "Connection Topology -\t%s\n",
578                          ntb_topo_string(ndev->ntb.topo));
579
580         if (ndev->b2b_idx != UINT_MAX) {
581                 off += scnprintf(buf + off, buf_size - off,
582                                  "B2B MW Idx -\t\t%u\n", ndev->b2b_idx);
583                 off += scnprintf(buf + off, buf_size - off,
584                                  "B2B Offset -\t\t%#lx\n", ndev->b2b_off);
585         }
586
587         off += scnprintf(buf + off, buf_size - off,
588                          "BAR4 Split -\t\t%s\n",
589                          ndev->bar4_split ? "yes" : "no");
590
591         off += scnprintf(buf + off, buf_size - off,
592                          "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
593         off += scnprintf(buf + off, buf_size - off,
594                          "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
595
596         if (!ndev->reg->link_is_up(ndev)) {
597                 off += scnprintf(buf + off, buf_size - off,
598                                  "Link Status -\t\tDown\n");
599         } else {
600                 off += scnprintf(buf + off, buf_size - off,
601                                  "Link Status -\t\tUp\n");
602                 off += scnprintf(buf + off, buf_size - off,
603                                  "Link Speed -\t\tPCI-E Gen %u\n",
604                                  NTB_LNK_STA_SPEED(ndev->lnk_sta));
605                 off += scnprintf(buf + off, buf_size - off,
606                                  "Link Width -\t\tx%u\n",
607                                  NTB_LNK_STA_WIDTH(ndev->lnk_sta));
608         }
609
610         off += scnprintf(buf + off, buf_size - off,
611                          "Memory Window Count -\t%u\n", ndev->mw_count);
612         off += scnprintf(buf + off, buf_size - off,
613                          "Scratchpad Count -\t%u\n", ndev->spad_count);
614         off += scnprintf(buf + off, buf_size - off,
615                          "Doorbell Count -\t%u\n", ndev->db_count);
616         off += scnprintf(buf + off, buf_size - off,
617                          "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
618         off += scnprintf(buf + off, buf_size - off,
619                          "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
620
621         off += scnprintf(buf + off, buf_size - off,
622                          "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
623         off += scnprintf(buf + off, buf_size - off,
624                          "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
625         off += scnprintf(buf + off, buf_size - off,
626                          "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
627
628         u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
629         off += scnprintf(buf + off, buf_size - off,
630                          "Doorbell Mask -\t\t%#llx\n", u.v64);
631
632         u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
633         off += scnprintf(buf + off, buf_size - off,
634                          "Doorbell Bell -\t\t%#llx\n", u.v64);
635
636         off += scnprintf(buf + off, buf_size - off,
637                          "\nNTB Window Size:\n");
638
639         pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &u.v8);
640         off += scnprintf(buf + off, buf_size - off,
641                          "PBAR23SZ %hhu\n", u.v8);
642         if (!ndev->bar4_split) {
643                 pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &u.v8);
644                 off += scnprintf(buf + off, buf_size - off,
645                                  "PBAR45SZ %hhu\n", u.v8);
646         } else {
647                 pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &u.v8);
648                 off += scnprintf(buf + off, buf_size - off,
649                                  "PBAR4SZ %hhu\n", u.v8);
650                 pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &u.v8);
651                 off += scnprintf(buf + off, buf_size - off,
652                                  "PBAR5SZ %hhu\n", u.v8);
653         }
654
655         pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &u.v8);
656         off += scnprintf(buf + off, buf_size - off,
657                          "SBAR23SZ %hhu\n", u.v8);
658         if (!ndev->bar4_split) {
659                 pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &u.v8);
660                 off += scnprintf(buf + off, buf_size - off,
661                                  "SBAR45SZ %hhu\n", u.v8);
662         } else {
663                 pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &u.v8);
664                 off += scnprintf(buf + off, buf_size - off,
665                                  "SBAR4SZ %hhu\n", u.v8);
666                 pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &u.v8);
667                 off += scnprintf(buf + off, buf_size - off,
668                                  "SBAR5SZ %hhu\n", u.v8);
669         }
670
671         off += scnprintf(buf + off, buf_size - off,
672                          "\nNTB Incoming XLAT:\n");
673
674         u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 2));
675         off += scnprintf(buf + off, buf_size - off,
676                          "XLAT23 -\t\t%#018llx\n", u.v64);
677
678         if (ndev->bar4_split) {
679                 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
680                 off += scnprintf(buf + off, buf_size - off,
681                                  "XLAT4 -\t\t\t%#06x\n", u.v32);
682
683                 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 5));
684                 off += scnprintf(buf + off, buf_size - off,
685                                  "XLAT5 -\t\t\t%#06x\n", u.v32);
686         } else {
687                 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
688                 off += scnprintf(buf + off, buf_size - off,
689                                  "XLAT45 -\t\t%#018llx\n", u.v64);
690         }
691
692         u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 2));
693         off += scnprintf(buf + off, buf_size - off,
694                          "LMT23 -\t\t\t%#018llx\n", u.v64);
695
696         if (ndev->bar4_split) {
697                 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
698                 off += scnprintf(buf + off, buf_size - off,
699                                  "LMT4 -\t\t\t%#06x\n", u.v32);
700                 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 5));
701                 off += scnprintf(buf + off, buf_size - off,
702                                  "LMT5 -\t\t\t%#06x\n", u.v32);
703         } else {
704                 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
705                 off += scnprintf(buf + off, buf_size - off,
706                                  "LMT45 -\t\t\t%#018llx\n", u.v64);
707         }
708
709         if (pdev_is_xeon(pdev)) {
710                 if (ntb_topo_is_b2b(ndev->ntb.topo)) {
711                         off += scnprintf(buf + off, buf_size - off,
712                                          "\nNTB Outgoing B2B XLAT:\n");
713
714                         u.v64 = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
715                         off += scnprintf(buf + off, buf_size - off,
716                                          "B2B XLAT23 -\t\t%#018llx\n", u.v64);
717
718                         if (ndev->bar4_split) {
719                                 u.v32 = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
720                                 off += scnprintf(buf + off, buf_size - off,
721                                                  "B2B XLAT4 -\t\t%#06x\n",
722                                                  u.v32);
723                                 u.v32 = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
724                                 off += scnprintf(buf + off, buf_size - off,
725                                                  "B2B XLAT5 -\t\t%#06x\n",
726                                                  u.v32);
727                         } else {
728                                 u.v64 = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
729                                 off += scnprintf(buf + off, buf_size - off,
730                                                  "B2B XLAT45 -\t\t%#018llx\n",
731                                                  u.v64);
732                         }
733
734                         u.v64 = ioread64(mmio + XEON_PBAR23LMT_OFFSET);
735                         off += scnprintf(buf + off, buf_size - off,
736                                          "B2B LMT23 -\t\t%#018llx\n", u.v64);
737
738                         if (ndev->bar4_split) {
739                                 u.v32 = ioread32(mmio + XEON_PBAR4LMT_OFFSET);
740                                 off += scnprintf(buf + off, buf_size - off,
741                                                  "B2B LMT4 -\t\t%#06x\n",
742                                                  u.v32);
743                                 u.v32 = ioread32(mmio + XEON_PBAR5LMT_OFFSET);
744                                 off += scnprintf(buf + off, buf_size - off,
745                                                  "B2B LMT5 -\t\t%#06x\n",
746                                                  u.v32);
747                         } else {
748                                 u.v64 = ioread64(mmio + XEON_PBAR45LMT_OFFSET);
749                                 off += scnprintf(buf + off, buf_size - off,
750                                                  "B2B LMT45 -\t\t%#018llx\n",
751                                                  u.v64);
752                         }
753
754                         off += scnprintf(buf + off, buf_size - off,
755                                          "\nNTB Secondary BAR:\n");
756
757                         u.v64 = ioread64(mmio + XEON_SBAR0BASE_OFFSET);
758                         off += scnprintf(buf + off, buf_size - off,
759                                          "SBAR01 -\t\t%#018llx\n", u.v64);
760
761                         u.v64 = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
762                         off += scnprintf(buf + off, buf_size - off,
763                                          "SBAR23 -\t\t%#018llx\n", u.v64);
764
765                         if (ndev->bar4_split) {
766                                 u.v32 = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
767                                 off += scnprintf(buf + off, buf_size - off,
768                                                  "SBAR4 -\t\t\t%#06x\n", u.v32);
769                                 u.v32 = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
770                                 off += scnprintf(buf + off, buf_size - off,
771                                                  "SBAR5 -\t\t\t%#06x\n", u.v32);
772                         } else {
773                                 u.v64 = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
774                                 off += scnprintf(buf + off, buf_size - off,
775                                                  "SBAR45 -\t\t%#018llx\n",
776                                                  u.v64);
777                         }
778                 }
779
780                 off += scnprintf(buf + off, buf_size - off,
781                                  "\nXEON NTB Statistics:\n");
782
783                 u.v16 = ioread16(mmio + XEON_USMEMMISS_OFFSET);
784                 off += scnprintf(buf + off, buf_size - off,
785                                  "Upstream Memory Miss -\t%u\n", u.v16);
786
787                 off += scnprintf(buf + off, buf_size - off,
788                                  "\nXEON NTB Hardware Errors:\n");
789
790                 if (!pci_read_config_word(pdev,
791                                           XEON_DEVSTS_OFFSET, &u.v16))
792                         off += scnprintf(buf + off, buf_size - off,
793                                          "DEVSTS -\t\t%#06x\n", u.v16);
794
795                 if (!pci_read_config_word(pdev,
796                                           XEON_LINK_STATUS_OFFSET, &u.v16))
797                         off += scnprintf(buf + off, buf_size - off,
798                                          "LNKSTS -\t\t%#06x\n", u.v16);
799
800                 if (!pci_read_config_dword(pdev,
801                                            XEON_UNCERRSTS_OFFSET, &u.v32))
802                         off += scnprintf(buf + off, buf_size - off,
803                                          "UNCERRSTS -\t\t%#06x\n", u.v32);
804
805                 if (!pci_read_config_dword(pdev,
806                                            XEON_CORERRSTS_OFFSET, &u.v32))
807                         off += scnprintf(buf + off, buf_size - off,
808                                          "CORERRSTS -\t\t%#06x\n", u.v32);
809         }
810
811         ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
812         kfree(buf);
813         return ret;
814 }
815
816 static void ndev_init_debugfs(struct intel_ntb_dev *ndev)
817 {
818         if (!debugfs_dir) {
819                 ndev->debugfs_dir = NULL;
820                 ndev->debugfs_info = NULL;
821         } else {
822                 ndev->debugfs_dir =
823                         debugfs_create_dir(ndev_name(ndev), debugfs_dir);
824                 if (!ndev->debugfs_dir)
825                         ndev->debugfs_info = NULL;
826                 else
827                         ndev->debugfs_info =
828                                 debugfs_create_file("info", S_IRUSR,
829                                                     ndev->debugfs_dir, ndev,
830                                                     &intel_ntb_debugfs_info);
831         }
832 }
833
834 static void ndev_deinit_debugfs(struct intel_ntb_dev *ndev)
835 {
836         debugfs_remove_recursive(ndev->debugfs_dir);
837 }
838
839 static int intel_ntb_mw_count(struct ntb_dev *ntb)
840 {
841         return ntb_ndev(ntb)->mw_count;
842 }
843
844 static int intel_ntb_mw_get_range(struct ntb_dev *ntb, int idx,
845                                   phys_addr_t *base,
846                                   resource_size_t *size,
847                                   resource_size_t *align,
848                                   resource_size_t *align_size)
849 {
850         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
851         int bar;
852
853         if (idx >= ndev->b2b_idx && !ndev->b2b_off)
854                 idx += 1;
855
856         bar = ndev_mw_to_bar(ndev, idx);
857         if (bar < 0)
858                 return bar;
859
860         if (base)
861                 *base = pci_resource_start(ndev->ntb.pdev, bar) +
862                         (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
863
864         if (size)
865                 *size = pci_resource_len(ndev->ntb.pdev, bar) -
866                         (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
867
868         if (align)
869                 *align = pci_resource_len(ndev->ntb.pdev, bar);
870
871         if (align_size)
872                 *align_size = 1;
873
874         return 0;
875 }
876
877 static int intel_ntb_mw_set_trans(struct ntb_dev *ntb, int idx,
878                                   dma_addr_t addr, resource_size_t size)
879 {
880         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
881         unsigned long base_reg, xlat_reg, limit_reg;
882         resource_size_t bar_size, mw_size;
883         void __iomem *mmio;
884         u64 base, limit, reg_val;
885         int bar;
886
887         if (idx >= ndev->b2b_idx && !ndev->b2b_off)
888                 idx += 1;
889
890         bar = ndev_mw_to_bar(ndev, idx);
891         if (bar < 0)
892                 return bar;
893
894         bar_size = pci_resource_len(ndev->ntb.pdev, bar);
895
896         if (idx == ndev->b2b_idx)
897                 mw_size = bar_size - ndev->b2b_off;
898         else
899                 mw_size = bar_size;
900
901         /* hardware requires that addr is aligned to bar size */
902         if (addr & (bar_size - 1))
903                 return -EINVAL;
904
905         /* make sure the range fits in the usable mw size */
906         if (size > mw_size)
907                 return -EINVAL;
908
909         mmio = ndev->self_mmio;
910         base_reg = bar0_off(ndev->xlat_reg->bar0_base, bar);
911         xlat_reg = bar2_off(ndev->xlat_reg->bar2_xlat, bar);
912         limit_reg = bar2_off(ndev->xlat_reg->bar2_limit, bar);
913
914         if (bar < 4 || !ndev->bar4_split) {
915                 base = ioread64(mmio + base_reg) & NTB_BAR_MASK_64;
916
917                 /* Set the limit if supported, if size is not mw_size */
918                 if (limit_reg && size != mw_size)
919                         limit = base + size;
920                 else
921                         limit = 0;
922
923                 /* set and verify setting the translation address */
924                 iowrite64(addr, mmio + xlat_reg);
925                 reg_val = ioread64(mmio + xlat_reg);
926                 if (reg_val != addr) {
927                         iowrite64(0, mmio + xlat_reg);
928                         return -EIO;
929                 }
930
931                 /* set and verify setting the limit */
932                 iowrite64(limit, mmio + limit_reg);
933                 reg_val = ioread64(mmio + limit_reg);
934                 if (reg_val != limit) {
935                         iowrite64(base, mmio + limit_reg);
936                         iowrite64(0, mmio + xlat_reg);
937                         return -EIO;
938                 }
939         } else {
940                 /* split bar addr range must all be 32 bit */
941                 if (addr & (~0ull << 32))
942                         return -EINVAL;
943                 if ((addr + size) & (~0ull << 32))
944                         return -EINVAL;
945
946                 base = ioread32(mmio + base_reg) & NTB_BAR_MASK_32;
947
948                 /* Set the limit if supported, if size is not mw_size */
949                 if (limit_reg && size != mw_size)
950                         limit = base + size;
951                 else
952                         limit = 0;
953
954                 /* set and verify setting the translation address */
955                 iowrite32(addr, mmio + xlat_reg);
956                 reg_val = ioread32(mmio + xlat_reg);
957                 if (reg_val != addr) {
958                         iowrite32(0, mmio + xlat_reg);
959                         return -EIO;
960                 }
961
962                 /* set and verify setting the limit */
963                 iowrite32(limit, mmio + limit_reg);
964                 reg_val = ioread32(mmio + limit_reg);
965                 if (reg_val != limit) {
966                         iowrite32(base, mmio + limit_reg);
967                         iowrite32(0, mmio + xlat_reg);
968                         return -EIO;
969                 }
970         }
971
972         return 0;
973 }
974
975 static int intel_ntb_link_is_up(struct ntb_dev *ntb,
976                                 enum ntb_speed *speed,
977                                 enum ntb_width *width)
978 {
979         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
980
981         if (ndev->reg->link_is_up(ndev)) {
982                 if (speed)
983                         *speed = NTB_LNK_STA_SPEED(ndev->lnk_sta);
984                 if (width)
985                         *width = NTB_LNK_STA_WIDTH(ndev->lnk_sta);
986                 return 1;
987         } else {
988                 /* TODO MAYBE: is it possible to observe the link speed and
989                  * width while link is training? */
990                 if (speed)
991                         *speed = NTB_SPEED_NONE;
992                 if (width)
993                         *width = NTB_WIDTH_NONE;
994                 return 0;
995         }
996 }
997
998 static int intel_ntb_link_enable(struct ntb_dev *ntb,
999                                  enum ntb_speed max_speed,
1000                                  enum ntb_width max_width)
1001 {
1002         struct intel_ntb_dev *ndev;
1003         u32 ntb_ctl;
1004
1005         ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1006
1007         if (ndev->ntb.topo == NTB_TOPO_SEC)
1008                 return -EINVAL;
1009
1010         dev_dbg(ndev_dev(ndev),
1011                 "Enabling link with max_speed %d max_width %d\n",
1012                 max_speed, max_width);
1013         if (max_speed != NTB_SPEED_AUTO)
1014                 dev_dbg(ndev_dev(ndev), "ignoring max_speed %d\n", max_speed);
1015         if (max_width != NTB_WIDTH_AUTO)
1016                 dev_dbg(ndev_dev(ndev), "ignoring max_width %d\n", max_width);
1017
1018         ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
1019         ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
1020         ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
1021         ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
1022         if (ndev->bar4_split)
1023                 ntb_ctl |= NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP;
1024         iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
1025
1026         return 0;
1027 }
1028
1029 static int intel_ntb_link_disable(struct ntb_dev *ntb)
1030 {
1031         struct intel_ntb_dev *ndev;
1032         u32 ntb_cntl;
1033
1034         ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1035
1036         if (ndev->ntb.topo == NTB_TOPO_SEC)
1037                 return -EINVAL;
1038
1039         dev_dbg(ndev_dev(ndev), "Disabling link\n");
1040
1041         /* Bring NTB link down */
1042         ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
1043         ntb_cntl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP);
1044         ntb_cntl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP);
1045         if (ndev->bar4_split)
1046                 ntb_cntl &= ~(NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP);
1047         ntb_cntl |= NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK;
1048         iowrite32(ntb_cntl, ndev->self_mmio + ndev->reg->ntb_ctl);
1049
1050         return 0;
1051 }
1052
1053 static int intel_ntb_db_is_unsafe(struct ntb_dev *ntb)
1054 {
1055         return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_DB);
1056 }
1057
1058 static u64 intel_ntb_db_valid_mask(struct ntb_dev *ntb)
1059 {
1060         return ntb_ndev(ntb)->db_valid_mask;
1061 }
1062
1063 static int intel_ntb_db_vector_count(struct ntb_dev *ntb)
1064 {
1065         struct intel_ntb_dev *ndev;
1066
1067         ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1068
1069         return ndev->db_vec_count;
1070 }
1071
1072 static u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
1073 {
1074         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1075
1076         if (db_vector < 0 || db_vector > ndev->db_vec_count)
1077                 return 0;
1078
1079         return ndev->db_valid_mask & ndev_vec_mask(ndev, db_vector);
1080 }
1081
1082 static u64 intel_ntb_db_read(struct ntb_dev *ntb)
1083 {
1084         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1085
1086         return ndev_db_read(ndev,
1087                             ndev->self_mmio +
1088                             ndev->self_reg->db_bell);
1089 }
1090
1091 static int intel_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
1092 {
1093         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1094
1095         return ndev_db_write(ndev, db_bits,
1096                              ndev->self_mmio +
1097                              ndev->self_reg->db_bell);
1098 }
1099
1100 static int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
1101 {
1102         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1103
1104         return ndev_db_set_mask(ndev, db_bits,
1105                                 ndev->self_mmio +
1106                                 ndev->self_reg->db_mask);
1107 }
1108
1109 static int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
1110 {
1111         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1112
1113         return ndev_db_clear_mask(ndev, db_bits,
1114                                   ndev->self_mmio +
1115                                   ndev->self_reg->db_mask);
1116 }
1117
1118 static int intel_ntb_peer_db_addr(struct ntb_dev *ntb,
1119                                   phys_addr_t *db_addr,
1120                                   resource_size_t *db_size)
1121 {
1122         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1123
1124         return ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr,
1125                             ndev->peer_reg->db_bell);
1126 }
1127
1128 static int intel_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
1129 {
1130         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1131
1132         return ndev_db_write(ndev, db_bits,
1133                              ndev->peer_mmio +
1134                              ndev->peer_reg->db_bell);
1135 }
1136
1137 static int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb)
1138 {
1139         return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_SPAD);
1140 }
1141
1142 static int intel_ntb_spad_count(struct ntb_dev *ntb)
1143 {
1144         struct intel_ntb_dev *ndev;
1145
1146         ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1147
1148         return ndev->spad_count;
1149 }
1150
1151 static u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx)
1152 {
1153         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1154
1155         return ndev_spad_read(ndev, idx,
1156                               ndev->self_mmio +
1157                               ndev->self_reg->spad);
1158 }
1159
1160 static int intel_ntb_spad_write(struct ntb_dev *ntb,
1161                                 int idx, u32 val)
1162 {
1163         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1164
1165         return ndev_spad_write(ndev, idx, val,
1166                                ndev->self_mmio +
1167                                ndev->self_reg->spad);
1168 }
1169
1170 static int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int idx,
1171                                     phys_addr_t *spad_addr)
1172 {
1173         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1174
1175         return ndev_spad_addr(ndev, idx, spad_addr, ndev->peer_addr,
1176                               ndev->peer_reg->spad);
1177 }
1178
1179 static u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int idx)
1180 {
1181         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1182
1183         return ndev_spad_read(ndev, idx,
1184                               ndev->peer_mmio +
1185                               ndev->peer_reg->spad);
1186 }
1187
1188 static int intel_ntb_peer_spad_write(struct ntb_dev *ntb,
1189                                      int idx, u32 val)
1190 {
1191         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1192
1193         return ndev_spad_write(ndev, idx, val,
1194                                ndev->peer_mmio +
1195                                ndev->peer_reg->spad);
1196 }
1197
1198 /* ATOM */
1199
1200 static u64 atom_db_ioread(void __iomem *mmio)
1201 {
1202         return ioread64(mmio);
1203 }
1204
1205 static void atom_db_iowrite(u64 bits, void __iomem *mmio)
1206 {
1207         iowrite64(bits, mmio);
1208 }
1209
1210 static int atom_poll_link(struct intel_ntb_dev *ndev)
1211 {
1212         u32 ntb_ctl;
1213
1214         ntb_ctl = ioread32(ndev->self_mmio + ATOM_NTBCNTL_OFFSET);
1215
1216         if (ntb_ctl == ndev->ntb_ctl)
1217                 return 0;
1218
1219         ndev->ntb_ctl = ntb_ctl;
1220
1221         ndev->lnk_sta = ioread32(ndev->self_mmio + ATOM_LINK_STATUS_OFFSET);
1222
1223         return 1;
1224 }
1225
1226 static int atom_link_is_up(struct intel_ntb_dev *ndev)
1227 {
1228         return ATOM_NTB_CTL_ACTIVE(ndev->ntb_ctl);
1229 }
1230
1231 static int atom_link_is_err(struct intel_ntb_dev *ndev)
1232 {
1233         if (ioread32(ndev->self_mmio + ATOM_LTSSMSTATEJMP_OFFSET)
1234             & ATOM_LTSSMSTATEJMP_FORCEDETECT)
1235                 return 1;
1236
1237         if (ioread32(ndev->self_mmio + ATOM_IBSTERRRCRVSTS0_OFFSET)
1238             & ATOM_IBIST_ERR_OFLOW)
1239                 return 1;
1240
1241         return 0;
1242 }
1243
1244 static inline enum ntb_topo atom_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd)
1245 {
1246         switch (ppd & ATOM_PPD_TOPO_MASK) {
1247         case ATOM_PPD_TOPO_B2B_USD:
1248                 dev_dbg(ndev_dev(ndev), "PPD %d B2B USD\n", ppd);
1249                 return NTB_TOPO_B2B_USD;
1250
1251         case ATOM_PPD_TOPO_B2B_DSD:
1252                 dev_dbg(ndev_dev(ndev), "PPD %d B2B DSD\n", ppd);
1253                 return NTB_TOPO_B2B_DSD;
1254
1255         case ATOM_PPD_TOPO_PRI_USD:
1256         case ATOM_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
1257         case ATOM_PPD_TOPO_SEC_USD:
1258         case ATOM_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
1259                 dev_dbg(ndev_dev(ndev), "PPD %d non B2B disabled\n", ppd);
1260                 return NTB_TOPO_NONE;
1261         }
1262
1263         dev_dbg(ndev_dev(ndev), "PPD %d invalid\n", ppd);
1264         return NTB_TOPO_NONE;
1265 }
1266
1267 static void atom_link_hb(struct work_struct *work)
1268 {
1269         struct intel_ntb_dev *ndev = hb_ndev(work);
1270         unsigned long poll_ts;
1271         void __iomem *mmio;
1272         u32 status32;
1273
1274         poll_ts = ndev->last_ts + ATOM_LINK_HB_TIMEOUT;
1275
1276         /* Delay polling the link status if an interrupt was received,
1277          * unless the cached link status says the link is down.
1278          */
1279         if (time_after(poll_ts, jiffies) && atom_link_is_up(ndev)) {
1280                 schedule_delayed_work(&ndev->hb_timer, poll_ts - jiffies);
1281                 return;
1282         }
1283
1284         if (atom_poll_link(ndev))
1285                 ntb_link_event(&ndev->ntb);
1286
1287         if (atom_link_is_up(ndev) || !atom_link_is_err(ndev)) {
1288                 schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_HB_TIMEOUT);
1289                 return;
1290         }
1291
1292         /* Link is down with error: recover the link! */
1293
1294         mmio = ndev->self_mmio;
1295
1296         /* Driver resets the NTB ModPhy lanes - magic! */
1297         iowrite8(0xe0, mmio + ATOM_MODPHY_PCSREG6);
1298         iowrite8(0x40, mmio + ATOM_MODPHY_PCSREG4);
1299         iowrite8(0x60, mmio + ATOM_MODPHY_PCSREG4);
1300         iowrite8(0x60, mmio + ATOM_MODPHY_PCSREG6);
1301
1302         /* Driver waits 100ms to allow the NTB ModPhy to settle */
1303         msleep(100);
1304
1305         /* Clear AER Errors, write to clear */
1306         status32 = ioread32(mmio + ATOM_ERRCORSTS_OFFSET);
1307         dev_dbg(ndev_dev(ndev), "ERRCORSTS = %x\n", status32);
1308         status32 &= PCI_ERR_COR_REP_ROLL;
1309         iowrite32(status32, mmio + ATOM_ERRCORSTS_OFFSET);
1310
1311         /* Clear unexpected electrical idle event in LTSSM, write to clear */
1312         status32 = ioread32(mmio + ATOM_LTSSMERRSTS0_OFFSET);
1313         dev_dbg(ndev_dev(ndev), "LTSSMERRSTS0 = %x\n", status32);
1314         status32 |= ATOM_LTSSMERRSTS0_UNEXPECTEDEI;
1315         iowrite32(status32, mmio + ATOM_LTSSMERRSTS0_OFFSET);
1316
1317         /* Clear DeSkew Buffer error, write to clear */
1318         status32 = ioread32(mmio + ATOM_DESKEWSTS_OFFSET);
1319         dev_dbg(ndev_dev(ndev), "DESKEWSTS = %x\n", status32);
1320         status32 |= ATOM_DESKEWSTS_DBERR;
1321         iowrite32(status32, mmio + ATOM_DESKEWSTS_OFFSET);
1322
1323         status32 = ioread32(mmio + ATOM_IBSTERRRCRVSTS0_OFFSET);
1324         dev_dbg(ndev_dev(ndev), "IBSTERRRCRVSTS0 = %x\n", status32);
1325         status32 &= ATOM_IBIST_ERR_OFLOW;
1326         iowrite32(status32, mmio + ATOM_IBSTERRRCRVSTS0_OFFSET);
1327
1328         /* Releases the NTB state machine to allow the link to retrain */
1329         status32 = ioread32(mmio + ATOM_LTSSMSTATEJMP_OFFSET);
1330         dev_dbg(ndev_dev(ndev), "LTSSMSTATEJMP = %x\n", status32);
1331         status32 &= ~ATOM_LTSSMSTATEJMP_FORCEDETECT;
1332         iowrite32(status32, mmio + ATOM_LTSSMSTATEJMP_OFFSET);
1333
1334         /* There is a potential race between the 2 NTB devices recovering at the
1335          * same time.  If the times are the same, the link will not recover and
1336          * the driver will be stuck in this loop forever.  Add a random interval
1337          * to the recovery time to prevent this race.
1338          */
1339         schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_RECOVERY_TIME
1340                               + prandom_u32() % ATOM_LINK_RECOVERY_TIME);
1341 }
1342
1343 static int atom_init_isr(struct intel_ntb_dev *ndev)
1344 {
1345         int rc;
1346
1347         rc = ndev_init_isr(ndev, 1, ATOM_DB_MSIX_VECTOR_COUNT,
1348                            ATOM_DB_MSIX_VECTOR_SHIFT, ATOM_DB_TOTAL_SHIFT);
1349         if (rc)
1350                 return rc;
1351
1352         /* ATOM doesn't have link status interrupt, poll on that platform */
1353         ndev->last_ts = jiffies;
1354         INIT_DELAYED_WORK(&ndev->hb_timer, atom_link_hb);
1355         schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_HB_TIMEOUT);
1356
1357         return 0;
1358 }
1359
1360 static void atom_deinit_isr(struct intel_ntb_dev *ndev)
1361 {
1362         cancel_delayed_work_sync(&ndev->hb_timer);
1363         ndev_deinit_isr(ndev);
1364 }
1365
1366 static int atom_init_ntb(struct intel_ntb_dev *ndev)
1367 {
1368         ndev->mw_count = ATOM_MW_COUNT;
1369         ndev->spad_count = ATOM_SPAD_COUNT;
1370         ndev->db_count = ATOM_DB_COUNT;
1371
1372         switch (ndev->ntb.topo) {
1373         case NTB_TOPO_B2B_USD:
1374         case NTB_TOPO_B2B_DSD:
1375                 ndev->self_reg = &atom_pri_reg;
1376                 ndev->peer_reg = &atom_b2b_reg;
1377                 ndev->xlat_reg = &atom_sec_xlat;
1378
1379                 /* Enable Bus Master and Memory Space on the secondary side */
1380                 iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
1381                           ndev->self_mmio + ATOM_SPCICMD_OFFSET);
1382
1383                 break;
1384
1385         default:
1386                 return -EINVAL;
1387         }
1388
1389         ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
1390
1391         return 0;
1392 }
1393
1394 static int atom_init_dev(struct intel_ntb_dev *ndev)
1395 {
1396         u32 ppd;
1397         int rc;
1398
1399         rc = pci_read_config_dword(ndev->ntb.pdev, ATOM_PPD_OFFSET, &ppd);
1400         if (rc)
1401                 return -EIO;
1402
1403         ndev->ntb.topo = atom_ppd_topo(ndev, ppd);
1404         if (ndev->ntb.topo == NTB_TOPO_NONE)
1405                 return -EINVAL;
1406
1407         rc = atom_init_ntb(ndev);
1408         if (rc)
1409                 return rc;
1410
1411         rc = atom_init_isr(ndev);
1412         if (rc)
1413                 return rc;
1414
1415         if (ndev->ntb.topo != NTB_TOPO_SEC) {
1416                 /* Initiate PCI-E link training */
1417                 rc = pci_write_config_dword(ndev->ntb.pdev, ATOM_PPD_OFFSET,
1418                                             ppd | ATOM_PPD_INIT_LINK);
1419                 if (rc)
1420                         return rc;
1421         }
1422
1423         return 0;
1424 }
1425
1426 static void atom_deinit_dev(struct intel_ntb_dev *ndev)
1427 {
1428         atom_deinit_isr(ndev);
1429 }
1430
1431 /* XEON */
1432
1433 static u64 xeon_db_ioread(void __iomem *mmio)
1434 {
1435         return (u64)ioread16(mmio);
1436 }
1437
1438 static void xeon_db_iowrite(u64 bits, void __iomem *mmio)
1439 {
1440         iowrite16((u16)bits, mmio);
1441 }
1442
1443 static int xeon_poll_link(struct intel_ntb_dev *ndev)
1444 {
1445         u16 reg_val;
1446         int rc;
1447
1448         ndev->reg->db_iowrite(ndev->db_link_mask,
1449                               ndev->self_mmio +
1450                               ndev->self_reg->db_bell);
1451
1452         rc = pci_read_config_word(ndev->ntb.pdev,
1453                                   XEON_LINK_STATUS_OFFSET, &reg_val);
1454         if (rc)
1455                 return 0;
1456
1457         if (reg_val == ndev->lnk_sta)
1458                 return 0;
1459
1460         ndev->lnk_sta = reg_val;
1461
1462         return 1;
1463 }
1464
1465 static int xeon_link_is_up(struct intel_ntb_dev *ndev)
1466 {
1467         if (ndev->ntb.topo == NTB_TOPO_SEC)
1468                 return 1;
1469
1470         return NTB_LNK_STA_ACTIVE(ndev->lnk_sta);
1471 }
1472
1473 static inline enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd)
1474 {
1475         switch (ppd & XEON_PPD_TOPO_MASK) {
1476         case XEON_PPD_TOPO_B2B_USD:
1477                 return NTB_TOPO_B2B_USD;
1478
1479         case XEON_PPD_TOPO_B2B_DSD:
1480                 return NTB_TOPO_B2B_DSD;
1481
1482         case XEON_PPD_TOPO_PRI_USD:
1483         case XEON_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
1484                 return NTB_TOPO_PRI;
1485
1486         case XEON_PPD_TOPO_SEC_USD:
1487         case XEON_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
1488                 return NTB_TOPO_SEC;
1489         }
1490
1491         return NTB_TOPO_NONE;
1492 }
1493
1494 static inline int xeon_ppd_bar4_split(struct intel_ntb_dev *ndev, u8 ppd)
1495 {
1496         if (ppd & XEON_PPD_SPLIT_BAR_MASK) {
1497                 dev_dbg(ndev_dev(ndev), "PPD %d split bar\n", ppd);
1498                 return 1;
1499         }
1500         return 0;
1501 }
1502
1503 static int xeon_init_isr(struct intel_ntb_dev *ndev)
1504 {
1505         return ndev_init_isr(ndev, XEON_DB_MSIX_VECTOR_COUNT,
1506                              XEON_DB_MSIX_VECTOR_COUNT,
1507                              XEON_DB_MSIX_VECTOR_SHIFT,
1508                              XEON_DB_TOTAL_SHIFT);
1509 }
1510
1511 static void xeon_deinit_isr(struct intel_ntb_dev *ndev)
1512 {
1513         ndev_deinit_isr(ndev);
1514 }
1515
1516 static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
1517                              const struct intel_b2b_addr *addr,
1518                              const struct intel_b2b_addr *peer_addr)
1519 {
1520         struct pci_dev *pdev;
1521         void __iomem *mmio;
1522         resource_size_t bar_size;
1523         phys_addr_t bar_addr;
1524         int b2b_bar;
1525         u8 bar_sz;
1526
1527         pdev = ndev_pdev(ndev);
1528         mmio = ndev->self_mmio;
1529
1530         if (ndev->b2b_idx == UINT_MAX) {
1531                 dev_dbg(ndev_dev(ndev), "not using b2b mw\n");
1532                 b2b_bar = 0;
1533                 ndev->b2b_off = 0;
1534         } else {
1535                 b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
1536                 if (b2b_bar < 0)
1537                         return -EIO;
1538
1539                 dev_dbg(ndev_dev(ndev), "using b2b mw bar %d\n", b2b_bar);
1540
1541                 bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
1542
1543                 dev_dbg(ndev_dev(ndev), "b2b bar size %#llx\n", bar_size);
1544
1545                 if (b2b_mw_share && XEON_B2B_MIN_SIZE <= bar_size >> 1) {
1546                         dev_dbg(ndev_dev(ndev),
1547                                 "b2b using first half of bar\n");
1548                         ndev->b2b_off = bar_size >> 1;
1549                 } else if (XEON_B2B_MIN_SIZE <= bar_size) {
1550                         dev_dbg(ndev_dev(ndev),
1551                                 "b2b using whole bar\n");
1552                         ndev->b2b_off = 0;
1553                         --ndev->mw_count;
1554                 } else {
1555                         dev_dbg(ndev_dev(ndev),
1556                                 "b2b bar size is too small\n");
1557                         return -EIO;
1558                 }
1559         }
1560
1561         /* Reset the secondary bar sizes to match the primary bar sizes,
1562          * except disable or halve the size of the b2b secondary bar.
1563          *
1564          * Note: code for each specific bar size register, because the register
1565          * offsets are not in a consistent order (bar5sz comes after ppd, odd).
1566          */
1567         pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &bar_sz);
1568         dev_dbg(ndev_dev(ndev), "PBAR23SZ %#x\n", bar_sz);
1569         if (b2b_bar == 2) {
1570                 if (ndev->b2b_off)
1571                         bar_sz -= 1;
1572                 else
1573                         bar_sz = 0;
1574         }
1575         pci_write_config_byte(pdev, XEON_SBAR23SZ_OFFSET, bar_sz);
1576         pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &bar_sz);
1577         dev_dbg(ndev_dev(ndev), "SBAR23SZ %#x\n", bar_sz);
1578
1579         if (!ndev->bar4_split) {
1580                 pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &bar_sz);
1581                 dev_dbg(ndev_dev(ndev), "PBAR45SZ %#x\n", bar_sz);
1582                 if (b2b_bar == 4) {
1583                         if (ndev->b2b_off)
1584                                 bar_sz -= 1;
1585                         else
1586                                 bar_sz = 0;
1587                 }
1588                 pci_write_config_byte(pdev, XEON_SBAR45SZ_OFFSET, bar_sz);
1589                 pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &bar_sz);
1590                 dev_dbg(ndev_dev(ndev), "SBAR45SZ %#x\n", bar_sz);
1591         } else {
1592                 pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &bar_sz);
1593                 dev_dbg(ndev_dev(ndev), "PBAR4SZ %#x\n", bar_sz);
1594                 if (b2b_bar == 4) {
1595                         if (ndev->b2b_off)
1596                                 bar_sz -= 1;
1597                         else
1598                                 bar_sz = 0;
1599                 }
1600                 pci_write_config_byte(pdev, XEON_SBAR4SZ_OFFSET, bar_sz);
1601                 pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &bar_sz);
1602                 dev_dbg(ndev_dev(ndev), "SBAR4SZ %#x\n", bar_sz);
1603
1604                 pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &bar_sz);
1605                 dev_dbg(ndev_dev(ndev), "PBAR5SZ %#x\n", bar_sz);
1606                 if (b2b_bar == 5) {
1607                         if (ndev->b2b_off)
1608                                 bar_sz -= 1;
1609                         else
1610                                 bar_sz = 0;
1611                 }
1612                 pci_write_config_byte(pdev, XEON_SBAR5SZ_OFFSET, bar_sz);
1613                 pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &bar_sz);
1614                 dev_dbg(ndev_dev(ndev), "SBAR5SZ %#x\n", bar_sz);
1615         }
1616
1617         /* SBAR01 hit by first part of the b2b bar */
1618         if (b2b_bar == 0)
1619                 bar_addr = addr->bar0_addr;
1620         else if (b2b_bar == 2)
1621                 bar_addr = addr->bar2_addr64;
1622         else if (b2b_bar == 4 && !ndev->bar4_split)
1623                 bar_addr = addr->bar4_addr64;
1624         else if (b2b_bar == 4)
1625                 bar_addr = addr->bar4_addr32;
1626         else if (b2b_bar == 5)
1627                 bar_addr = addr->bar5_addr32;
1628         else
1629                 return -EIO;
1630
1631         dev_dbg(ndev_dev(ndev), "SBAR01 %#018llx\n", bar_addr);
1632         iowrite64(bar_addr, mmio + XEON_SBAR0BASE_OFFSET);
1633
1634         /* Other SBAR are normally hit by the PBAR xlat, except for b2b bar.
1635          * The b2b bar is either disabled above, or configured half-size, and
1636          * it starts at the PBAR xlat + offset.
1637          */
1638
1639         bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
1640         iowrite64(bar_addr, mmio + XEON_SBAR23BASE_OFFSET);
1641         bar_addr = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
1642         dev_dbg(ndev_dev(ndev), "SBAR23 %#018llx\n", bar_addr);
1643
1644         if (!ndev->bar4_split) {
1645                 bar_addr = addr->bar4_addr64 +
1646                         (b2b_bar == 4 ? ndev->b2b_off : 0);
1647                 iowrite64(bar_addr, mmio + XEON_SBAR45BASE_OFFSET);
1648                 bar_addr = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
1649                 dev_dbg(ndev_dev(ndev), "SBAR45 %#018llx\n", bar_addr);
1650         } else {
1651                 bar_addr = addr->bar4_addr32 +
1652                         (b2b_bar == 4 ? ndev->b2b_off : 0);
1653                 iowrite32(bar_addr, mmio + XEON_SBAR4BASE_OFFSET);
1654                 bar_addr = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
1655                 dev_dbg(ndev_dev(ndev), "SBAR4 %#010llx\n", bar_addr);
1656
1657                 bar_addr = addr->bar5_addr32 +
1658                         (b2b_bar == 5 ? ndev->b2b_off : 0);
1659                 iowrite32(bar_addr, mmio + XEON_SBAR5BASE_OFFSET);
1660                 bar_addr = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
1661                 dev_dbg(ndev_dev(ndev), "SBAR5 %#010llx\n", bar_addr);
1662         }
1663
1664         /* setup incoming bar limits == base addrs (zero length windows) */
1665
1666         bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
1667         iowrite64(bar_addr, mmio + XEON_SBAR23LMT_OFFSET);
1668         bar_addr = ioread64(mmio + XEON_SBAR23LMT_OFFSET);
1669         dev_dbg(ndev_dev(ndev), "SBAR23LMT %#018llx\n", bar_addr);
1670
1671         if (!ndev->bar4_split) {
1672                 bar_addr = addr->bar4_addr64 +
1673                         (b2b_bar == 4 ? ndev->b2b_off : 0);
1674                 iowrite64(bar_addr, mmio + XEON_SBAR45LMT_OFFSET);
1675                 bar_addr = ioread64(mmio + XEON_SBAR45LMT_OFFSET);
1676                 dev_dbg(ndev_dev(ndev), "SBAR45LMT %#018llx\n", bar_addr);
1677         } else {
1678                 bar_addr = addr->bar4_addr32 +
1679                         (b2b_bar == 4 ? ndev->b2b_off : 0);
1680                 iowrite32(bar_addr, mmio + XEON_SBAR4LMT_OFFSET);
1681                 bar_addr = ioread32(mmio + XEON_SBAR4LMT_OFFSET);
1682                 dev_dbg(ndev_dev(ndev), "SBAR4LMT %#010llx\n", bar_addr);
1683
1684                 bar_addr = addr->bar5_addr32 +
1685                         (b2b_bar == 5 ? ndev->b2b_off : 0);
1686                 iowrite32(bar_addr, mmio + XEON_SBAR5LMT_OFFSET);
1687                 bar_addr = ioread32(mmio + XEON_SBAR5LMT_OFFSET);
1688                 dev_dbg(ndev_dev(ndev), "SBAR5LMT %#05llx\n", bar_addr);
1689         }
1690
1691         /* zero incoming translation addrs */
1692         iowrite64(0, mmio + XEON_SBAR23XLAT_OFFSET);
1693
1694         if (!ndev->bar4_split) {
1695                 iowrite64(0, mmio + XEON_SBAR45XLAT_OFFSET);
1696         } else {
1697                 iowrite32(0, mmio + XEON_SBAR4XLAT_OFFSET);
1698                 iowrite32(0, mmio + XEON_SBAR5XLAT_OFFSET);
1699         }
1700
1701         /* zero outgoing translation limits (whole bar size windows) */
1702         iowrite64(0, mmio + XEON_PBAR23LMT_OFFSET);
1703         if (!ndev->bar4_split) {
1704                 iowrite64(0, mmio + XEON_PBAR45LMT_OFFSET);
1705         } else {
1706                 iowrite32(0, mmio + XEON_PBAR4LMT_OFFSET);
1707                 iowrite32(0, mmio + XEON_PBAR5LMT_OFFSET);
1708         }
1709
1710         /* set outgoing translation offsets */
1711         bar_addr = peer_addr->bar2_addr64;
1712         iowrite64(bar_addr, mmio + XEON_PBAR23XLAT_OFFSET);
1713         bar_addr = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
1714         dev_dbg(ndev_dev(ndev), "PBAR23XLAT %#018llx\n", bar_addr);
1715
1716         if (!ndev->bar4_split) {
1717                 bar_addr = peer_addr->bar4_addr64;
1718                 iowrite64(bar_addr, mmio + XEON_PBAR45XLAT_OFFSET);
1719                 bar_addr = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
1720                 dev_dbg(ndev_dev(ndev), "PBAR45XLAT %#018llx\n", bar_addr);
1721         } else {
1722                 bar_addr = peer_addr->bar4_addr32;
1723                 iowrite32(bar_addr, mmio + XEON_PBAR4XLAT_OFFSET);
1724                 bar_addr = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
1725                 dev_dbg(ndev_dev(ndev), "PBAR4XLAT %#010llx\n", bar_addr);
1726
1727                 bar_addr = peer_addr->bar5_addr32;
1728                 iowrite32(bar_addr, mmio + XEON_PBAR5XLAT_OFFSET);
1729                 bar_addr = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
1730                 dev_dbg(ndev_dev(ndev), "PBAR5XLAT %#010llx\n", bar_addr);
1731         }
1732
1733         /* set the translation offset for b2b registers */
1734         if (b2b_bar == 0)
1735                 bar_addr = peer_addr->bar0_addr;
1736         else if (b2b_bar == 2)
1737                 bar_addr = peer_addr->bar2_addr64;
1738         else if (b2b_bar == 4 && !ndev->bar4_split)
1739                 bar_addr = peer_addr->bar4_addr64;
1740         else if (b2b_bar == 4)
1741                 bar_addr = peer_addr->bar4_addr32;
1742         else if (b2b_bar == 5)
1743                 bar_addr = peer_addr->bar5_addr32;
1744         else
1745                 return -EIO;
1746
1747         /* B2B_XLAT_OFFSET is 64bit, but can only take 32bit writes */
1748         dev_dbg(ndev_dev(ndev), "B2BXLAT %#018llx\n", bar_addr);
1749         iowrite32(bar_addr, mmio + XEON_B2B_XLAT_OFFSETL);
1750         iowrite32(bar_addr >> 32, mmio + XEON_B2B_XLAT_OFFSETU);
1751
1752         if (b2b_bar) {
1753                 /* map peer ntb mmio config space registers */
1754                 ndev->peer_mmio = pci_iomap(pdev, b2b_bar,
1755                                             XEON_B2B_MIN_SIZE);
1756                 if (!ndev->peer_mmio)
1757                         return -EIO;
1758
1759                 ndev->peer_addr = pci_resource_start(pdev, b2b_bar);
1760         }
1761
1762         return 0;
1763 }
1764
1765 static int xeon_init_ntb(struct intel_ntb_dev *ndev)
1766 {
1767         int rc;
1768         u32 ntb_ctl;
1769
1770         if (ndev->bar4_split)
1771                 ndev->mw_count = HSX_SPLIT_BAR_MW_COUNT;
1772         else
1773                 ndev->mw_count = XEON_MW_COUNT;
1774
1775         ndev->spad_count = XEON_SPAD_COUNT;
1776         ndev->db_count = XEON_DB_COUNT;
1777         ndev->db_link_mask = XEON_DB_LINK_BIT;
1778
1779         switch (ndev->ntb.topo) {
1780         case NTB_TOPO_PRI:
1781                 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
1782                         dev_err(ndev_dev(ndev), "NTB Primary config disabled\n");
1783                         return -EINVAL;
1784                 }
1785
1786                 /* enable link to allow secondary side device to appear */
1787                 ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
1788                 ntb_ctl &= ~NTB_CTL_DISABLE;
1789                 iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
1790
1791                 /* use half the spads for the peer */
1792                 ndev->spad_count >>= 1;
1793                 ndev->self_reg = &xeon_pri_reg;
1794                 ndev->peer_reg = &xeon_sec_reg;
1795                 ndev->xlat_reg = &xeon_sec_xlat;
1796                 break;
1797
1798         case NTB_TOPO_SEC:
1799                 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
1800                         dev_err(ndev_dev(ndev), "NTB Secondary config disabled\n");
1801                         return -EINVAL;
1802                 }
1803                 /* use half the spads for the peer */
1804                 ndev->spad_count >>= 1;
1805                 ndev->self_reg = &xeon_sec_reg;
1806                 ndev->peer_reg = &xeon_pri_reg;
1807                 ndev->xlat_reg = &xeon_pri_xlat;
1808                 break;
1809
1810         case NTB_TOPO_B2B_USD:
1811         case NTB_TOPO_B2B_DSD:
1812                 ndev->self_reg = &xeon_pri_reg;
1813                 ndev->peer_reg = &xeon_b2b_reg;
1814                 ndev->xlat_reg = &xeon_sec_xlat;
1815
1816                 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
1817                         ndev->peer_reg = &xeon_pri_reg;
1818
1819                         if (b2b_mw_idx < 0)
1820                                 ndev->b2b_idx = b2b_mw_idx + ndev->mw_count;
1821                         else
1822                                 ndev->b2b_idx = b2b_mw_idx;
1823
1824                         if (ndev->b2b_idx >= ndev->mw_count) {
1825                                 dev_dbg(ndev_dev(ndev),
1826                                         "b2b_mw_idx %d invalid for mw_count %u\n",
1827                                         b2b_mw_idx, ndev->mw_count);
1828                                 return -EINVAL;
1829                         }
1830
1831                         dev_dbg(ndev_dev(ndev),
1832                                 "setting up b2b mw idx %d means %d\n",
1833                                 b2b_mw_idx, ndev->b2b_idx);
1834
1835                 } else if (ndev->hwerr_flags & NTB_HWERR_B2BDOORBELL_BIT14) {
1836                         dev_warn(ndev_dev(ndev), "Reduce doorbell count by 1\n");
1837                         ndev->db_count -= 1;
1838                 }
1839
1840                 if (ndev->ntb.topo == NTB_TOPO_B2B_USD) {
1841                         rc = xeon_setup_b2b_mw(ndev,
1842                                                &xeon_b2b_dsd_addr,
1843                                                &xeon_b2b_usd_addr);
1844                 } else {
1845                         rc = xeon_setup_b2b_mw(ndev,
1846                                                &xeon_b2b_usd_addr,
1847                                                &xeon_b2b_dsd_addr);
1848                 }
1849                 if (rc)
1850                         return rc;
1851
1852                 /* Enable Bus Master and Memory Space on the secondary side */
1853                 iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
1854                           ndev->self_mmio + XEON_SPCICMD_OFFSET);
1855
1856                 break;
1857
1858         default:
1859                 return -EINVAL;
1860         }
1861
1862         ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
1863
1864         ndev->reg->db_iowrite(ndev->db_valid_mask,
1865                               ndev->self_mmio +
1866                               ndev->self_reg->db_mask);
1867
1868         return 0;
1869 }
1870
1871 static int xeon_init_dev(struct intel_ntb_dev *ndev)
1872 {
1873         struct pci_dev *pdev;
1874         u8 ppd;
1875         int rc, mem;
1876
1877         pdev = ndev_pdev(ndev);
1878
1879         switch (pdev->device) {
1880         /* There is a Xeon hardware errata related to writes to SDOORBELL or
1881          * B2BDOORBELL in conjunction with inbound access to NTB MMIO Space,
1882          * which may hang the system.  To workaround this use the second memory
1883          * window to access the interrupt and scratch pad registers on the
1884          * remote system.
1885          */
1886         case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
1887         case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
1888         case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
1889         case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
1890         case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
1891         case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
1892         case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
1893         case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
1894         case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
1895         case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
1896         case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
1897         case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
1898         case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
1899         case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
1900         case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
1901                 ndev->hwerr_flags |= NTB_HWERR_SDOORBELL_LOCKUP;
1902                 break;
1903         }
1904
1905         switch (pdev->device) {
1906         /* There is a hardware errata related to accessing any register in
1907          * SB01BASE in the presence of bidirectional traffic crossing the NTB.
1908          */
1909         case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
1910         case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
1911         case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
1912         case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
1913         case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
1914         case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
1915         case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
1916         case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
1917         case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
1918                 ndev->hwerr_flags |= NTB_HWERR_SB01BASE_LOCKUP;
1919                 break;
1920         }
1921
1922         switch (pdev->device) {
1923         /* HW Errata on bit 14 of b2bdoorbell register.  Writes will not be
1924          * mirrored to the remote system.  Shrink the number of bits by one,
1925          * since bit 14 is the last bit.
1926          */
1927         case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
1928         case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
1929         case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
1930         case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
1931         case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
1932         case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
1933         case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
1934         case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
1935         case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
1936         case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
1937         case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
1938         case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
1939         case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
1940         case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
1941         case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
1942                 ndev->hwerr_flags |= NTB_HWERR_B2BDOORBELL_BIT14;
1943                 break;
1944         }
1945
1946         ndev->reg = &xeon_reg;
1947
1948         rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd);
1949         if (rc)
1950                 return -EIO;
1951
1952         ndev->ntb.topo = xeon_ppd_topo(ndev, ppd);
1953         dev_dbg(ndev_dev(ndev), "ppd %#x topo %s\n", ppd,
1954                 ntb_topo_string(ndev->ntb.topo));
1955         if (ndev->ntb.topo == NTB_TOPO_NONE)
1956                 return -EINVAL;
1957
1958         if (ndev->ntb.topo != NTB_TOPO_SEC) {
1959                 ndev->bar4_split = xeon_ppd_bar4_split(ndev, ppd);
1960                 dev_dbg(ndev_dev(ndev), "ppd %#x bar4_split %d\n",
1961                         ppd, ndev->bar4_split);
1962         } else {
1963                 /* This is a way for transparent BAR to figure out if we are
1964                  * doing split BAR or not. There is no way for the hw on the
1965                  * transparent side to know and set the PPD.
1966                  */
1967                 mem = pci_select_bars(pdev, IORESOURCE_MEM);
1968                 ndev->bar4_split = hweight32(mem) ==
1969                         HSX_SPLIT_BAR_MW_COUNT + 1;
1970                 dev_dbg(ndev_dev(ndev), "mem %#x bar4_split %d\n",
1971                         mem, ndev->bar4_split);
1972         }
1973
1974         rc = xeon_init_ntb(ndev);
1975         if (rc)
1976                 return rc;
1977
1978         return xeon_init_isr(ndev);
1979 }
1980
1981 static void xeon_deinit_dev(struct intel_ntb_dev *ndev)
1982 {
1983         xeon_deinit_isr(ndev);
1984 }
1985
1986 static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
1987 {
1988         int rc;
1989
1990         pci_set_drvdata(pdev, ndev);
1991
1992         rc = pci_enable_device(pdev);
1993         if (rc)
1994                 goto err_pci_enable;
1995
1996         rc = pci_request_regions(pdev, NTB_NAME);
1997         if (rc)
1998                 goto err_pci_regions;
1999
2000         pci_set_master(pdev);
2001
2002         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2003         if (rc) {
2004                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2005                 if (rc)
2006                         goto err_dma_mask;
2007                 dev_warn(ndev_dev(ndev), "Cannot DMA highmem\n");
2008         }
2009
2010         rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2011         if (rc) {
2012                 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2013                 if (rc)
2014                         goto err_dma_mask;
2015                 dev_warn(ndev_dev(ndev), "Cannot DMA consistent highmem\n");
2016         }
2017
2018         ndev->self_mmio = pci_iomap(pdev, 0, 0);
2019         if (!ndev->self_mmio) {
2020                 rc = -EIO;
2021                 goto err_mmio;
2022         }
2023         ndev->peer_mmio = ndev->self_mmio;
2024         ndev->peer_addr = pci_resource_start(pdev, 0);
2025
2026         return 0;
2027
2028 err_mmio:
2029 err_dma_mask:
2030         pci_clear_master(pdev);
2031         pci_release_regions(pdev);
2032 err_pci_regions:
2033         pci_disable_device(pdev);
2034 err_pci_enable:
2035         pci_set_drvdata(pdev, NULL);
2036         return rc;
2037 }
2038
2039 static void intel_ntb_deinit_pci(struct intel_ntb_dev *ndev)
2040 {
2041         struct pci_dev *pdev = ndev_pdev(ndev);
2042
2043         if (ndev->peer_mmio && ndev->peer_mmio != ndev->self_mmio)
2044                 pci_iounmap(pdev, ndev->peer_mmio);
2045         pci_iounmap(pdev, ndev->self_mmio);
2046
2047         pci_clear_master(pdev);
2048         pci_release_regions(pdev);
2049         pci_disable_device(pdev);
2050         pci_set_drvdata(pdev, NULL);
2051 }
2052
2053 static inline void ndev_init_struct(struct intel_ntb_dev *ndev,
2054                                     struct pci_dev *pdev)
2055 {
2056         ndev->ntb.pdev = pdev;
2057         ndev->ntb.topo = NTB_TOPO_NONE;
2058         ndev->ntb.ops = &intel_ntb_ops;
2059
2060         ndev->b2b_off = 0;
2061         ndev->b2b_idx = UINT_MAX;
2062
2063         ndev->bar4_split = 0;
2064
2065         ndev->mw_count = 0;
2066         ndev->spad_count = 0;
2067         ndev->db_count = 0;
2068         ndev->db_vec_count = 0;
2069         ndev->db_vec_shift = 0;
2070
2071         ndev->ntb_ctl = 0;
2072         ndev->lnk_sta = 0;
2073
2074         ndev->db_valid_mask = 0;
2075         ndev->db_link_mask = 0;
2076         ndev->db_mask = 0;
2077
2078         spin_lock_init(&ndev->db_mask_lock);
2079 }
2080
2081 static int intel_ntb_pci_probe(struct pci_dev *pdev,
2082                                const struct pci_device_id *id)
2083 {
2084         struct intel_ntb_dev *ndev;
2085         int rc, node;
2086
2087         node = dev_to_node(&pdev->dev);
2088
2089         if (pdev_is_atom(pdev)) {
2090                 ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
2091                 if (!ndev) {
2092                         rc = -ENOMEM;
2093                         goto err_ndev;
2094                 }
2095
2096                 ndev_init_struct(ndev, pdev);
2097
2098                 rc = intel_ntb_init_pci(ndev, pdev);
2099                 if (rc)
2100                         goto err_init_pci;
2101
2102                 rc = atom_init_dev(ndev);
2103                 if (rc)
2104                         goto err_init_dev;
2105
2106         } else if (pdev_is_xeon(pdev)) {
2107                 ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
2108                 if (!ndev) {
2109                         rc = -ENOMEM;
2110                         goto err_ndev;
2111                 }
2112
2113                 ndev_init_struct(ndev, pdev);
2114
2115                 rc = intel_ntb_init_pci(ndev, pdev);
2116                 if (rc)
2117                         goto err_init_pci;
2118
2119                 rc = xeon_init_dev(ndev);
2120                 if (rc)
2121                         goto err_init_dev;
2122
2123         } else {
2124                 rc = -EINVAL;
2125                 goto err_ndev;
2126         }
2127
2128         ndev_reset_unsafe_flags(ndev);
2129
2130         ndev->reg->poll_link(ndev);
2131
2132         ndev_init_debugfs(ndev);
2133
2134         rc = ntb_register_device(&ndev->ntb);
2135         if (rc)
2136                 goto err_register;
2137
2138         dev_info(&pdev->dev, "NTB device registered.\n");
2139
2140         return 0;
2141
2142 err_register:
2143         ndev_deinit_debugfs(ndev);
2144         if (pdev_is_atom(pdev))
2145                 atom_deinit_dev(ndev);
2146         else if (pdev_is_xeon(pdev))
2147                 xeon_deinit_dev(ndev);
2148 err_init_dev:
2149         intel_ntb_deinit_pci(ndev);
2150 err_init_pci:
2151         kfree(ndev);
2152 err_ndev:
2153         return rc;
2154 }
2155
2156 static void intel_ntb_pci_remove(struct pci_dev *pdev)
2157 {
2158         struct intel_ntb_dev *ndev = pci_get_drvdata(pdev);
2159
2160         ntb_unregister_device(&ndev->ntb);
2161         ndev_deinit_debugfs(ndev);
2162         if (pdev_is_atom(pdev))
2163                 atom_deinit_dev(ndev);
2164         else if (pdev_is_xeon(pdev))
2165                 xeon_deinit_dev(ndev);
2166         intel_ntb_deinit_pci(ndev);
2167         kfree(ndev);
2168 }
2169
2170 static const struct intel_ntb_reg atom_reg = {
2171         .poll_link              = atom_poll_link,
2172         .link_is_up             = atom_link_is_up,
2173         .db_ioread              = atom_db_ioread,
2174         .db_iowrite             = atom_db_iowrite,
2175         .db_size                = sizeof(u64),
2176         .ntb_ctl                = ATOM_NTBCNTL_OFFSET,
2177         .mw_bar                 = {2, 4},
2178 };
2179
2180 static const struct intel_ntb_alt_reg atom_pri_reg = {
2181         .db_bell                = ATOM_PDOORBELL_OFFSET,
2182         .db_mask                = ATOM_PDBMSK_OFFSET,
2183         .spad                   = ATOM_SPAD_OFFSET,
2184 };
2185
2186 static const struct intel_ntb_alt_reg atom_b2b_reg = {
2187         .db_bell                = ATOM_B2B_DOORBELL_OFFSET,
2188         .spad                   = ATOM_B2B_SPAD_OFFSET,
2189 };
2190
2191 static const struct intel_ntb_xlat_reg atom_sec_xlat = {
2192         /* FIXME : .bar0_base   = ATOM_SBAR0BASE_OFFSET, */
2193         /* FIXME : .bar2_limit  = ATOM_SBAR2LMT_OFFSET, */
2194         .bar2_xlat              = ATOM_SBAR2XLAT_OFFSET,
2195 };
2196
2197 static const struct intel_ntb_reg xeon_reg = {
2198         .poll_link              = xeon_poll_link,
2199         .link_is_up             = xeon_link_is_up,
2200         .db_ioread              = xeon_db_ioread,
2201         .db_iowrite             = xeon_db_iowrite,
2202         .db_size                = sizeof(u32),
2203         .ntb_ctl                = XEON_NTBCNTL_OFFSET,
2204         .mw_bar                 = {2, 4, 5},
2205 };
2206
2207 static const struct intel_ntb_alt_reg xeon_pri_reg = {
2208         .db_bell                = XEON_PDOORBELL_OFFSET,
2209         .db_mask                = XEON_PDBMSK_OFFSET,
2210         .spad                   = XEON_SPAD_OFFSET,
2211 };
2212
2213 static const struct intel_ntb_alt_reg xeon_sec_reg = {
2214         .db_bell                = XEON_SDOORBELL_OFFSET,
2215         .db_mask                = XEON_SDBMSK_OFFSET,
2216         /* second half of the scratchpads */
2217         .spad                   = XEON_SPAD_OFFSET + (XEON_SPAD_COUNT << 1),
2218 };
2219
2220 static const struct intel_ntb_alt_reg xeon_b2b_reg = {
2221         .db_bell                = XEON_B2B_DOORBELL_OFFSET,
2222         .spad                   = XEON_B2B_SPAD_OFFSET,
2223 };
2224
2225 static const struct intel_ntb_xlat_reg xeon_pri_xlat = {
2226         /* Note: no primary .bar0_base visible to the secondary side.
2227          *
2228          * The secondary side cannot get the base address stored in primary
2229          * bars.  The base address is necessary to set the limit register to
2230          * any value other than zero, or unlimited.
2231          *
2232          * WITHOUT THE BASE ADDRESS, THE SECONDARY SIDE CANNOT DISABLE the
2233          * window by setting the limit equal to base, nor can it limit the size
2234          * of the memory window by setting the limit to base + size.
2235          */
2236         .bar2_limit             = XEON_PBAR23LMT_OFFSET,
2237         .bar2_xlat              = XEON_PBAR23XLAT_OFFSET,
2238 };
2239
2240 static const struct intel_ntb_xlat_reg xeon_sec_xlat = {
2241         .bar0_base              = XEON_SBAR0BASE_OFFSET,
2242         .bar2_limit             = XEON_SBAR23LMT_OFFSET,
2243         .bar2_xlat              = XEON_SBAR23XLAT_OFFSET,
2244 };
2245
2246 static struct intel_b2b_addr xeon_b2b_usd_addr = {
2247         .bar2_addr64            = XEON_B2B_BAR2_ADDR64,
2248         .bar4_addr64            = XEON_B2B_BAR4_ADDR64,
2249         .bar4_addr32            = XEON_B2B_BAR4_ADDR32,
2250         .bar5_addr32            = XEON_B2B_BAR5_ADDR32,
2251 };
2252
2253 static struct intel_b2b_addr xeon_b2b_dsd_addr = {
2254         .bar2_addr64            = XEON_B2B_BAR2_ADDR64,
2255         .bar4_addr64            = XEON_B2B_BAR4_ADDR64,
2256         .bar4_addr32            = XEON_B2B_BAR4_ADDR32,
2257         .bar5_addr32            = XEON_B2B_BAR5_ADDR32,
2258 };
2259
2260 /* operations for primary side of local ntb */
2261 static const struct ntb_dev_ops intel_ntb_ops = {
2262         .mw_count               = intel_ntb_mw_count,
2263         .mw_get_range           = intel_ntb_mw_get_range,
2264         .mw_set_trans           = intel_ntb_mw_set_trans,
2265         .link_is_up             = intel_ntb_link_is_up,
2266         .link_enable            = intel_ntb_link_enable,
2267         .link_disable           = intel_ntb_link_disable,
2268         .db_is_unsafe           = intel_ntb_db_is_unsafe,
2269         .db_valid_mask          = intel_ntb_db_valid_mask,
2270         .db_vector_count        = intel_ntb_db_vector_count,
2271         .db_vector_mask         = intel_ntb_db_vector_mask,
2272         .db_read                = intel_ntb_db_read,
2273         .db_clear               = intel_ntb_db_clear,
2274         .db_set_mask            = intel_ntb_db_set_mask,
2275         .db_clear_mask          = intel_ntb_db_clear_mask,
2276         .peer_db_addr           = intel_ntb_peer_db_addr,
2277         .peer_db_set            = intel_ntb_peer_db_set,
2278         .spad_is_unsafe         = intel_ntb_spad_is_unsafe,
2279         .spad_count             = intel_ntb_spad_count,
2280         .spad_read              = intel_ntb_spad_read,
2281         .spad_write             = intel_ntb_spad_write,
2282         .peer_spad_addr         = intel_ntb_peer_spad_addr,
2283         .peer_spad_read         = intel_ntb_peer_spad_read,
2284         .peer_spad_write        = intel_ntb_peer_spad_write,
2285 };
2286
2287 static const struct file_operations intel_ntb_debugfs_info = {
2288         .owner = THIS_MODULE,
2289         .open = simple_open,
2290         .read = ndev_debugfs_read,
2291 };
2292
2293 static const struct pci_device_id intel_ntb_pci_tbl[] = {
2294         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BWD)},
2295         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
2296         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
2297         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)},
2298         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_HSX)},
2299         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BDX)},
2300         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_JSF)},
2301         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_SNB)},
2302         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_IVT)},
2303         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_HSX)},
2304         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_BDX)},
2305         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_JSF)},
2306         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_SNB)},
2307         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)},
2308         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)},
2309         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_BDX)},
2310         {0}
2311 };
2312 MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);
2313
2314 static struct pci_driver intel_ntb_pci_driver = {
2315         .name = KBUILD_MODNAME,
2316         .id_table = intel_ntb_pci_tbl,
2317         .probe = intel_ntb_pci_probe,
2318         .remove = intel_ntb_pci_remove,
2319 };
2320
2321 static int __init intel_ntb_pci_driver_init(void)
2322 {
2323         pr_info("%s %s\n", NTB_DESC, NTB_VER);
2324
2325         if (debugfs_initialized())
2326                 debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
2327
2328         return pci_register_driver(&intel_ntb_pci_driver);
2329 }
2330 module_init(intel_ntb_pci_driver_init);
2331
2332 static void __exit intel_ntb_pci_driver_exit(void)
2333 {
2334         pci_unregister_driver(&intel_ntb_pci_driver);
2335
2336         debugfs_remove_recursive(debugfs_dir);
2337 }
2338 module_exit(intel_ntb_pci_driver_exit);
2339