GNU Linux-libre 6.8.9-gnu
[releases.git] / drivers / vdpa / ifcvf / ifcvf_main.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Intel IFC VF NIC driver for virtio dataplane offloading
4  *
5  * Copyright (C) 2020 Intel Corporation.
6  *
7  * Author: Zhu Lingshan <lingshan.zhu@intel.com>
8  *
9  */
10
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/sysfs.h>
15 #include "ifcvf_base.h"
16
17 #define DRIVER_AUTHOR   "Intel Corporation"
18 #define IFCVF_DRIVER_NAME       "ifcvf"
19
20 static irqreturn_t ifcvf_config_changed(int irq, void *arg)
21 {
22         struct ifcvf_hw *vf = arg;
23
24         if (vf->config_cb.callback)
25                 return vf->config_cb.callback(vf->config_cb.private);
26
27         return IRQ_HANDLED;
28 }
29
30 static irqreturn_t ifcvf_vq_intr_handler(int irq, void *arg)
31 {
32         struct vring_info *vring = arg;
33
34         if (vring->cb.callback)
35                 return vring->cb.callback(vring->cb.private);
36
37         return IRQ_HANDLED;
38 }
39
40 static irqreturn_t ifcvf_vqs_reused_intr_handler(int irq, void *arg)
41 {
42         struct ifcvf_hw *vf = arg;
43         struct vring_info *vring;
44         int i;
45
46         for (i = 0; i < vf->nr_vring; i++) {
47                 vring = &vf->vring[i];
48                 if (vring->cb.callback)
49                         vring->cb.callback(vring->cb.private);
50         }
51
52         return IRQ_HANDLED;
53 }
54
55 static irqreturn_t ifcvf_dev_intr_handler(int irq, void *arg)
56 {
57         struct ifcvf_hw *vf = arg;
58         u8 isr;
59
60         isr = vp_ioread8(vf->isr);
61         if (isr & VIRTIO_PCI_ISR_CONFIG)
62                 ifcvf_config_changed(irq, arg);
63
64         return ifcvf_vqs_reused_intr_handler(irq, arg);
65 }
66
67 static void ifcvf_free_irq_vectors(void *data)
68 {
69         pci_free_irq_vectors(data);
70 }
71
72 static void ifcvf_free_per_vq_irq(struct ifcvf_hw *vf)
73 {
74         struct pci_dev *pdev = vf->pdev;
75         int i;
76
77         for (i = 0; i < vf->nr_vring; i++) {
78                 if (vf->vring[i].irq != -EINVAL) {
79                         devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
80                         vf->vring[i].irq = -EINVAL;
81                 }
82         }
83 }
84
85 static void ifcvf_free_vqs_reused_irq(struct ifcvf_hw *vf)
86 {
87         struct pci_dev *pdev = vf->pdev;
88
89         if (vf->vqs_reused_irq != -EINVAL) {
90                 devm_free_irq(&pdev->dev, vf->vqs_reused_irq, vf);
91                 vf->vqs_reused_irq = -EINVAL;
92         }
93
94 }
95
96 static void ifcvf_free_vq_irq(struct ifcvf_hw *vf)
97 {
98         if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
99                 ifcvf_free_per_vq_irq(vf);
100         else
101                 ifcvf_free_vqs_reused_irq(vf);
102 }
103
104 static void ifcvf_free_config_irq(struct ifcvf_hw *vf)
105 {
106         struct pci_dev *pdev = vf->pdev;
107
108         if (vf->config_irq == -EINVAL)
109                 return;
110
111         /* If the irq is shared by all vqs and the config interrupt,
112          * it is already freed in ifcvf_free_vq_irq, so here only
113          * need to free config irq when msix_vector_status != MSIX_VECTOR_DEV_SHARED
114          */
115         if (vf->msix_vector_status != MSIX_VECTOR_DEV_SHARED) {
116                 devm_free_irq(&pdev->dev, vf->config_irq, vf);
117                 vf->config_irq = -EINVAL;
118         }
119 }
120
121 static void ifcvf_free_irq(struct ifcvf_hw *vf)
122 {
123         struct pci_dev *pdev = vf->pdev;
124
125         ifcvf_free_vq_irq(vf);
126         ifcvf_free_config_irq(vf);
127         ifcvf_free_irq_vectors(pdev);
128         vf->num_msix_vectors = 0;
129 }
130
131 /* ifcvf MSIX vectors allocator, this helper tries to allocate
132  * vectors for all virtqueues and the config interrupt.
133  * It returns the number of allocated vectors, negative
134  * return value when fails.
135  */
136 static int ifcvf_alloc_vectors(struct ifcvf_hw *vf)
137 {
138         struct pci_dev *pdev = vf->pdev;
139         int max_intr, ret;
140
141         /* all queues and config interrupt  */
142         max_intr = vf->nr_vring + 1;
143         ret = pci_alloc_irq_vectors(pdev, 1, max_intr, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
144
145         if (ret < 0) {
146                 IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n");
147                 return ret;
148         }
149
150         if (ret < max_intr)
151                 IFCVF_INFO(pdev,
152                            "Requested %u vectors, however only %u allocated, lower performance\n",
153                            max_intr, ret);
154
155         return ret;
156 }
157
158 static int ifcvf_request_per_vq_irq(struct ifcvf_hw *vf)
159 {
160         struct pci_dev *pdev = vf->pdev;
161         int i, vector, ret, irq;
162
163         vf->vqs_reused_irq = -EINVAL;
164         for (i = 0; i < vf->nr_vring; i++) {
165                 snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n", pci_name(pdev), i);
166                 vector = i;
167                 irq = pci_irq_vector(pdev, vector);
168                 ret = devm_request_irq(&pdev->dev, irq,
169                                        ifcvf_vq_intr_handler, 0,
170                                        vf->vring[i].msix_name,
171                                        &vf->vring[i]);
172                 if (ret) {
173                         IFCVF_ERR(pdev, "Failed to request irq for vq %d\n", i);
174                         goto err;
175                 }
176
177                 vf->vring[i].irq = irq;
178                 ret = ifcvf_set_vq_vector(vf, i, vector);
179                 if (ret == VIRTIO_MSI_NO_VECTOR) {
180                         IFCVF_ERR(pdev, "No msix vector for vq %u\n", i);
181                         goto err;
182                 }
183         }
184
185         return 0;
186 err:
187         ifcvf_free_irq(vf);
188
189         return -EFAULT;
190 }
191
192 static int ifcvf_request_vqs_reused_irq(struct ifcvf_hw *vf)
193 {
194         struct pci_dev *pdev = vf->pdev;
195         int i, vector, ret, irq;
196
197         vector = 0;
198         snprintf(vf->vring[0].msix_name, 256, "ifcvf[%s]-vqs-reused-irq\n", pci_name(pdev));
199         irq = pci_irq_vector(pdev, vector);
200         ret = devm_request_irq(&pdev->dev, irq,
201                                ifcvf_vqs_reused_intr_handler, 0,
202                                vf->vring[0].msix_name, vf);
203         if (ret) {
204                 IFCVF_ERR(pdev, "Failed to request reused irq for the device\n");
205                 goto err;
206         }
207
208         vf->vqs_reused_irq = irq;
209         for (i = 0; i < vf->nr_vring; i++) {
210                 vf->vring[i].irq = -EINVAL;
211                 ret = ifcvf_set_vq_vector(vf, i, vector);
212                 if (ret == VIRTIO_MSI_NO_VECTOR) {
213                         IFCVF_ERR(pdev, "No msix vector for vq %u\n", i);
214                         goto err;
215                 }
216         }
217
218         return 0;
219 err:
220         ifcvf_free_irq(vf);
221
222         return -EFAULT;
223 }
224
225 static int ifcvf_request_dev_irq(struct ifcvf_hw *vf)
226 {
227         struct pci_dev *pdev = vf->pdev;
228         int i, vector, ret, irq;
229
230         vector = 0;
231         snprintf(vf->vring[0].msix_name, 256, "ifcvf[%s]-dev-irq\n", pci_name(pdev));
232         irq = pci_irq_vector(pdev, vector);
233         ret = devm_request_irq(&pdev->dev, irq,
234                                ifcvf_dev_intr_handler, 0,
235                                vf->vring[0].msix_name, vf);
236         if (ret) {
237                 IFCVF_ERR(pdev, "Failed to request irq for the device\n");
238                 goto err;
239         }
240
241         vf->vqs_reused_irq = irq;
242         for (i = 0; i < vf->nr_vring; i++) {
243                 vf->vring[i].irq = -EINVAL;
244                 ret = ifcvf_set_vq_vector(vf, i, vector);
245                 if (ret == VIRTIO_MSI_NO_VECTOR) {
246                         IFCVF_ERR(pdev, "No msix vector for vq %u\n", i);
247                         goto err;
248                 }
249         }
250
251         vf->config_irq = irq;
252         ret = ifcvf_set_config_vector(vf, vector);
253         if (ret == VIRTIO_MSI_NO_VECTOR) {
254                 IFCVF_ERR(pdev, "No msix vector for device config\n");
255                 goto err;
256         }
257
258         return 0;
259 err:
260         ifcvf_free_irq(vf);
261
262         return -EFAULT;
263
264 }
265
266 static int ifcvf_request_vq_irq(struct ifcvf_hw *vf)
267 {
268         int ret;
269
270         if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
271                 ret = ifcvf_request_per_vq_irq(vf);
272         else
273                 ret = ifcvf_request_vqs_reused_irq(vf);
274
275         return ret;
276 }
277
278 static int ifcvf_request_config_irq(struct ifcvf_hw *vf)
279 {
280         struct pci_dev *pdev = vf->pdev;
281         int config_vector, ret;
282
283         if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
284                 config_vector = vf->nr_vring;
285         else if (vf->msix_vector_status ==  MSIX_VECTOR_SHARED_VQ_AND_CONFIG)
286                 /* vector 0 for vqs and 1 for config interrupt */
287                 config_vector = 1;
288         else if (vf->msix_vector_status == MSIX_VECTOR_DEV_SHARED)
289                 /* re-use the vqs vector */
290                 return 0;
291         else
292                 return -EINVAL;
293
294         snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n",
295                  pci_name(pdev));
296         vf->config_irq = pci_irq_vector(pdev, config_vector);
297         ret = devm_request_irq(&pdev->dev, vf->config_irq,
298                                ifcvf_config_changed, 0,
299                                vf->config_msix_name, vf);
300         if (ret) {
301                 IFCVF_ERR(pdev, "Failed to request config irq\n");
302                 goto err;
303         }
304
305         ret = ifcvf_set_config_vector(vf, config_vector);
306         if (ret == VIRTIO_MSI_NO_VECTOR) {
307                 IFCVF_ERR(pdev, "No msix vector for device config\n");
308                 goto err;
309         }
310
311         return 0;
312 err:
313         ifcvf_free_irq(vf);
314
315         return -EFAULT;
316 }
317
318 static int ifcvf_request_irq(struct ifcvf_hw *vf)
319 {
320         int nvectors, ret, max_intr;
321
322         nvectors = ifcvf_alloc_vectors(vf);
323         if (nvectors <= 0)
324                 return -EFAULT;
325
326         vf->msix_vector_status = MSIX_VECTOR_PER_VQ_AND_CONFIG;
327         max_intr = vf->nr_vring + 1;
328         if (nvectors < max_intr)
329                 vf->msix_vector_status = MSIX_VECTOR_SHARED_VQ_AND_CONFIG;
330
331         if (nvectors == 1) {
332                 vf->msix_vector_status = MSIX_VECTOR_DEV_SHARED;
333                 ret = ifcvf_request_dev_irq(vf);
334
335                 return ret;
336         }
337
338         ret = ifcvf_request_vq_irq(vf);
339         if (ret)
340                 return ret;
341
342         ret = ifcvf_request_config_irq(vf);
343
344         if (ret)
345                 return ret;
346
347         vf->num_msix_vectors = nvectors;
348
349         return 0;
350 }
351
352 static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
353 {
354         return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
355 }
356
357 static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev)
358 {
359         struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
360
361         return adapter->vf;
362 }
363
364 static u64 ifcvf_vdpa_get_device_features(struct vdpa_device *vdpa_dev)
365 {
366         struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
367         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
368         struct pci_dev *pdev = adapter->pdev;
369         u32 type = vf->dev_type;
370         u64 features;
371
372         if (type == VIRTIO_ID_NET || type == VIRTIO_ID_BLOCK)
373                 features = ifcvf_get_dev_features(vf);
374         else {
375                 features = 0;
376                 IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
377         }
378
379         return features;
380 }
381
382 static int ifcvf_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 features)
383 {
384         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
385         int ret;
386
387         ret = ifcvf_verify_min_features(vf, features);
388         if (ret)
389                 return ret;
390
391         ifcvf_set_driver_features(vf, features);
392
393         return 0;
394 }
395
396 static u64 ifcvf_vdpa_get_driver_features(struct vdpa_device *vdpa_dev)
397 {
398         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
399         u64 features;
400
401         features = ifcvf_get_driver_features(vf);
402
403         return features;
404 }
405
406 static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev)
407 {
408         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
409
410         return ifcvf_get_status(vf);
411 }
412
413 static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
414 {
415         struct ifcvf_hw *vf;
416         u8 status_old;
417         int ret;
418
419         vf  = vdpa_to_vf(vdpa_dev);
420         status_old = ifcvf_get_status(vf);
421
422         if (status_old == status)
423                 return;
424
425         if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
426             !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
427                 ret = ifcvf_request_irq(vf);
428                 if (ret) {
429                         IFCVF_ERR(vf->pdev, "failed to request irq with error %d\n", ret);
430                         return;
431                 }
432         }
433
434         ifcvf_set_status(vf, status);
435 }
436
437 static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
438 {
439         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
440         u8 status = ifcvf_get_status(vf);
441
442         ifcvf_stop(vf);
443
444         if (status & VIRTIO_CONFIG_S_DRIVER_OK)
445                 ifcvf_free_irq(vf);
446
447         ifcvf_reset(vf);
448
449         return 0;
450 }
451
452 static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
453 {
454         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
455
456         return ifcvf_get_max_vq_size(vf);
457 }
458
459 static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
460                                    struct vdpa_vq_state *state)
461 {
462         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
463
464         state->split.avail_index = ifcvf_get_vq_state(vf, qid);
465         return 0;
466 }
467
468 static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
469                                    const struct vdpa_vq_state *state)
470 {
471         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
472
473         return ifcvf_set_vq_state(vf, qid, state->split.avail_index);
474 }
475
476 static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
477                                  struct vdpa_callback *cb)
478 {
479         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
480
481         vf->vring[qid].cb = *cb;
482 }
483
484 static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
485                                     u16 qid, bool ready)
486 {
487         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
488
489         ifcvf_set_vq_ready(vf, qid, ready);
490 }
491
492 static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
493 {
494         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
495
496         return ifcvf_get_vq_ready(vf, qid);
497 }
498
499 static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
500                                   u32 num)
501 {
502         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
503
504         ifcvf_set_vq_num(vf, qid, num);
505 }
506
507 static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
508                                      u64 desc_area, u64 driver_area,
509                                      u64 device_area)
510 {
511         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
512
513         return ifcvf_set_vq_address(vf, qid, desc_area, driver_area, device_area);
514 }
515
516 static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
517 {
518         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
519
520         ifcvf_notify_queue(vf, qid);
521 }
522
523 static u32 ifcvf_vdpa_get_generation(struct vdpa_device *vdpa_dev)
524 {
525         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
526
527         return vp_ioread8(&vf->common_cfg->config_generation);
528 }
529
530 static u32 ifcvf_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
531 {
532         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
533
534         return vf->dev_type;
535 }
536
537 static u32 ifcvf_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev)
538 {
539         struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
540         struct pci_dev *pdev = adapter->pdev;
541
542         return pdev->subsystem_vendor;
543 }
544
545 static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
546 {
547         return IFCVF_QUEUE_ALIGNMENT;
548 }
549
550 static size_t ifcvf_vdpa_get_config_size(struct vdpa_device *vdpa_dev)
551 {
552         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
553
554         return  vf->config_size;
555 }
556
557 static u32 ifcvf_vdpa_get_vq_group(struct vdpa_device *vdpa, u16 idx)
558 {
559         return 0;
560 }
561
562 static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
563                                   unsigned int offset,
564                                   void *buf, unsigned int len)
565 {
566         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
567
568         ifcvf_read_dev_config(vf, offset, buf, len);
569 }
570
571 static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev,
572                                   unsigned int offset, const void *buf,
573                                   unsigned int len)
574 {
575         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
576
577         ifcvf_write_dev_config(vf, offset, buf, len);
578 }
579
580 static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
581                                      struct vdpa_callback *cb)
582 {
583         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
584
585         vf->config_cb.callback = cb->callback;
586         vf->config_cb.private = cb->private;
587 }
588
589 static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev,
590                                  u16 qid)
591 {
592         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
593
594         if (vf->vqs_reused_irq < 0)
595                 return vf->vring[qid].irq;
596         else
597                 return -EINVAL;
598 }
599
600 static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_device *vdpa_dev,
601                                                                u16 idx)
602 {
603         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
604         struct vdpa_notification_area area;
605
606         area.addr = vf->vring[idx].notify_pa;
607         if (!vf->notify_off_multiplier)
608                 area.size = PAGE_SIZE;
609         else
610                 area.size = vf->notify_off_multiplier;
611
612         return area;
613 }
614
615 /*
616  * IFCVF currently doesn't have on-chip IOMMU, so not
617  * implemented set_map()/dma_map()/dma_unmap()
618  */
619 static const struct vdpa_config_ops ifc_vdpa_ops = {
620         .get_device_features = ifcvf_vdpa_get_device_features,
621         .set_driver_features = ifcvf_vdpa_set_driver_features,
622         .get_driver_features = ifcvf_vdpa_get_driver_features,
623         .get_status     = ifcvf_vdpa_get_status,
624         .set_status     = ifcvf_vdpa_set_status,
625         .reset          = ifcvf_vdpa_reset,
626         .get_vq_num_max = ifcvf_vdpa_get_vq_num_max,
627         .get_vq_state   = ifcvf_vdpa_get_vq_state,
628         .set_vq_state   = ifcvf_vdpa_set_vq_state,
629         .set_vq_cb      = ifcvf_vdpa_set_vq_cb,
630         .set_vq_ready   = ifcvf_vdpa_set_vq_ready,
631         .get_vq_ready   = ifcvf_vdpa_get_vq_ready,
632         .set_vq_num     = ifcvf_vdpa_set_vq_num,
633         .set_vq_address = ifcvf_vdpa_set_vq_address,
634         .get_vq_irq     = ifcvf_vdpa_get_vq_irq,
635         .kick_vq        = ifcvf_vdpa_kick_vq,
636         .get_generation = ifcvf_vdpa_get_generation,
637         .get_device_id  = ifcvf_vdpa_get_device_id,
638         .get_vendor_id  = ifcvf_vdpa_get_vendor_id,
639         .get_vq_align   = ifcvf_vdpa_get_vq_align,
640         .get_vq_group   = ifcvf_vdpa_get_vq_group,
641         .get_config_size        = ifcvf_vdpa_get_config_size,
642         .get_config     = ifcvf_vdpa_get_config,
643         .set_config     = ifcvf_vdpa_set_config,
644         .set_config_cb  = ifcvf_vdpa_set_config_cb,
645         .get_vq_notification = ifcvf_get_vq_notification,
646 };
647
648 static struct virtio_device_id id_table_net[] = {
649         {VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID},
650         {0},
651 };
652
653 static struct virtio_device_id id_table_blk[] = {
654         {VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID},
655         {0},
656 };
657
658 static u32 get_dev_type(struct pci_dev *pdev)
659 {
660         u32 dev_type;
661
662         /* This drirver drives both modern virtio devices and transitional
663          * devices in modern mode.
664          * vDPA requires feature bit VIRTIO_F_ACCESS_PLATFORM,
665          * so legacy devices and transitional devices in legacy
666          * mode will not work for vDPA, this driver will not
667          * drive devices with legacy interface.
668          */
669
670         if (pdev->device < 0x1040)
671                 dev_type =  pdev->subsystem_device;
672         else
673                 dev_type =  pdev->device - 0x1040;
674
675         return dev_type;
676 }
677
678 static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
679                               const struct vdpa_dev_set_config *config)
680 {
681         struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
682         struct ifcvf_adapter *adapter;
683         struct vdpa_device *vdpa_dev;
684         struct pci_dev *pdev;
685         struct ifcvf_hw *vf;
686         u64 device_features;
687         int ret;
688
689         ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
690         vf = &ifcvf_mgmt_dev->vf;
691         pdev = vf->pdev;
692         adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
693                                     &pdev->dev, &ifc_vdpa_ops, 1, 1, NULL, false);
694         if (IS_ERR(adapter)) {
695                 IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
696                 return PTR_ERR(adapter);
697         }
698
699         ifcvf_mgmt_dev->adapter = adapter;
700         adapter->pdev = pdev;
701         adapter->vdpa.dma_dev = &pdev->dev;
702         adapter->vdpa.mdev = mdev;
703         adapter->vf = vf;
704         vdpa_dev = &adapter->vdpa;
705
706         device_features = vf->hw_features;
707         if (config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) {
708                 if (config->device_features & ~device_features) {
709                         IFCVF_ERR(pdev, "The provisioned features 0x%llx are not supported by this device with features 0x%llx\n",
710                                   config->device_features, device_features);
711                         return -EINVAL;
712                 }
713                 device_features &= config->device_features;
714         }
715         vf->dev_features = device_features;
716
717         if (name)
718                 ret = dev_set_name(&vdpa_dev->dev, "%s", name);
719         else
720                 ret = dev_set_name(&vdpa_dev->dev, "vdpa%u", vdpa_dev->index);
721
722         ret = _vdpa_register_device(&adapter->vdpa, vf->nr_vring);
723         if (ret) {
724                 put_device(&adapter->vdpa.dev);
725                 IFCVF_ERR(pdev, "Failed to register to vDPA bus");
726                 return ret;
727         }
728
729         return 0;
730 }
731
732 static void ifcvf_vdpa_dev_del(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev)
733 {
734         struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
735
736         ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
737         _vdpa_unregister_device(dev);
738         ifcvf_mgmt_dev->adapter = NULL;
739 }
740
741 static const struct vdpa_mgmtdev_ops ifcvf_vdpa_mgmt_dev_ops = {
742         .dev_add = ifcvf_vdpa_dev_add,
743         .dev_del = ifcvf_vdpa_dev_del
744 };
745
746 static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
747 {
748         struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
749         struct device *dev = &pdev->dev;
750         struct ifcvf_hw *vf;
751         u32 dev_type;
752         int ret, i;
753
754         ret = pcim_enable_device(pdev);
755         if (ret) {
756                 IFCVF_ERR(pdev, "Failed to enable device\n");
757                 return ret;
758         }
759         ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4),
760                                  IFCVF_DRIVER_NAME);
761         if (ret) {
762                 IFCVF_ERR(pdev, "Failed to request MMIO region\n");
763                 return ret;
764         }
765
766         ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
767         if (ret) {
768                 IFCVF_ERR(pdev, "No usable DMA configuration\n");
769                 return ret;
770         }
771
772         ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
773         if (ret) {
774                 IFCVF_ERR(pdev,
775                           "Failed for adding devres for freeing irq vectors\n");
776                 return ret;
777         }
778
779         pci_set_master(pdev);
780         ifcvf_mgmt_dev = kzalloc(sizeof(struct ifcvf_vdpa_mgmt_dev), GFP_KERNEL);
781         if (!ifcvf_mgmt_dev) {
782                 IFCVF_ERR(pdev, "Failed to alloc memory for the vDPA management device\n");
783                 return -ENOMEM;
784         }
785
786         vf = &ifcvf_mgmt_dev->vf;
787         vf->dev_type = get_dev_type(pdev);
788         vf->base = pcim_iomap_table(pdev);
789         vf->pdev = pdev;
790
791         ret = ifcvf_init_hw(vf, pdev);
792         if (ret) {
793                 IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
794                 goto err;
795         }
796
797         for (i = 0; i < vf->nr_vring; i++)
798                 vf->vring[i].irq = -EINVAL;
799
800         vf->hw_features = ifcvf_get_hw_features(vf);
801         vf->config_size = ifcvf_get_config_size(vf);
802
803         dev_type = get_dev_type(pdev);
804         switch (dev_type) {
805         case VIRTIO_ID_NET:
806                 ifcvf_mgmt_dev->mdev.id_table = id_table_net;
807                 break;
808         case VIRTIO_ID_BLOCK:
809                 ifcvf_mgmt_dev->mdev.id_table = id_table_blk;
810                 break;
811         default:
812                 IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", dev_type);
813                 ret = -EOPNOTSUPP;
814                 goto err;
815         }
816
817         ifcvf_mgmt_dev->mdev.ops = &ifcvf_vdpa_mgmt_dev_ops;
818         ifcvf_mgmt_dev->mdev.device = dev;
819         ifcvf_mgmt_dev->mdev.max_supported_vqs = vf->nr_vring;
820         ifcvf_mgmt_dev->mdev.supported_features = vf->hw_features;
821         ifcvf_mgmt_dev->mdev.config_attr_mask = (1 << VDPA_ATTR_DEV_FEATURES);
822
823         ret = vdpa_mgmtdev_register(&ifcvf_mgmt_dev->mdev);
824         if (ret) {
825                 IFCVF_ERR(pdev,
826                           "Failed to initialize the management interfaces\n");
827                 goto err;
828         }
829
830         pci_set_drvdata(pdev, ifcvf_mgmt_dev);
831
832         return 0;
833
834 err:
835         kfree(ifcvf_mgmt_dev->vf.vring);
836         kfree(ifcvf_mgmt_dev);
837         return ret;
838 }
839
840 static void ifcvf_remove(struct pci_dev *pdev)
841 {
842         struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
843
844         ifcvf_mgmt_dev = pci_get_drvdata(pdev);
845         vdpa_mgmtdev_unregister(&ifcvf_mgmt_dev->mdev);
846         kfree(ifcvf_mgmt_dev->vf.vring);
847         kfree(ifcvf_mgmt_dev);
848 }
849
850 static struct pci_device_id ifcvf_pci_ids[] = {
851         /* N3000 network device */
852         { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
853                          N3000_DEVICE_ID,
854                          PCI_VENDOR_ID_INTEL,
855                          N3000_SUBSYS_DEVICE_ID) },
856         /* C5000X-PL network device
857          * F2000X-PL network device
858          */
859         { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
860                          VIRTIO_TRANS_ID_NET,
861                          PCI_VENDOR_ID_INTEL,
862                          VIRTIO_ID_NET) },
863         /* C5000X-PL block device */
864         { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
865                          VIRTIO_TRANS_ID_BLOCK,
866                          PCI_VENDOR_ID_INTEL,
867                          VIRTIO_ID_BLOCK) },
868
869         { 0 },
870 };
871 MODULE_DEVICE_TABLE(pci, ifcvf_pci_ids);
872
873 static struct pci_driver ifcvf_driver = {
874         .name     = IFCVF_DRIVER_NAME,
875         .id_table = ifcvf_pci_ids,
876         .probe    = ifcvf_probe,
877         .remove   = ifcvf_remove,
878 };
879
880 module_pci_driver(ifcvf_driver);
881
882 MODULE_LICENSE("GPL v2");