GNU Linux-libre 6.8.9-gnu
[releases.git] / drivers / net / ethernet / amd / pds_core / core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2023 Advanced Micro Devices, Inc */
3
4 #include <linux/pci.h>
5 #include <linux/vmalloc.h>
6
7 #include "core.h"
8
9 static BLOCKING_NOTIFIER_HEAD(pds_notify_chain);
10
11 int pdsc_register_notify(struct notifier_block *nb)
12 {
13         return blocking_notifier_chain_register(&pds_notify_chain, nb);
14 }
15 EXPORT_SYMBOL_GPL(pdsc_register_notify);
16
17 void pdsc_unregister_notify(struct notifier_block *nb)
18 {
19         blocking_notifier_chain_unregister(&pds_notify_chain, nb);
20 }
21 EXPORT_SYMBOL_GPL(pdsc_unregister_notify);
22
23 void pdsc_notify(unsigned long event, void *data)
24 {
25         blocking_notifier_call_chain(&pds_notify_chain, event, data);
26 }
27
28 void pdsc_intr_free(struct pdsc *pdsc, int index)
29 {
30         struct pdsc_intr_info *intr_info;
31
32         if (index >= pdsc->nintrs || index < 0) {
33                 WARN(true, "bad intr index %d\n", index);
34                 return;
35         }
36
37         intr_info = &pdsc->intr_info[index];
38         if (!intr_info->vector)
39                 return;
40         dev_dbg(pdsc->dev, "%s: idx %d vec %d name %s\n",
41                 __func__, index, intr_info->vector, intr_info->name);
42
43         pds_core_intr_mask(&pdsc->intr_ctrl[index], PDS_CORE_INTR_MASK_SET);
44         pds_core_intr_clean(&pdsc->intr_ctrl[index]);
45
46         free_irq(intr_info->vector, intr_info->data);
47
48         memset(intr_info, 0, sizeof(*intr_info));
49 }
50
51 int pdsc_intr_alloc(struct pdsc *pdsc, char *name,
52                     irq_handler_t handler, void *data)
53 {
54         struct pdsc_intr_info *intr_info;
55         unsigned int index;
56         int err;
57
58         /* Find the first available interrupt */
59         for (index = 0; index < pdsc->nintrs; index++)
60                 if (!pdsc->intr_info[index].vector)
61                         break;
62         if (index >= pdsc->nintrs) {
63                 dev_warn(pdsc->dev, "%s: no intr, index=%d nintrs=%d\n",
64                          __func__, index, pdsc->nintrs);
65                 return -ENOSPC;
66         }
67
68         pds_core_intr_clean_flags(&pdsc->intr_ctrl[index],
69                                   PDS_CORE_INTR_CRED_RESET_COALESCE);
70
71         intr_info = &pdsc->intr_info[index];
72
73         intr_info->index = index;
74         intr_info->data = data;
75         strscpy(intr_info->name, name, sizeof(intr_info->name));
76
77         /* Get the OS vector number for the interrupt */
78         err = pci_irq_vector(pdsc->pdev, index);
79         if (err < 0) {
80                 dev_err(pdsc->dev, "failed to get intr vector index %d: %pe\n",
81                         index, ERR_PTR(err));
82                 goto err_out_free_intr;
83         }
84         intr_info->vector = err;
85
86         /* Init the device's intr mask */
87         pds_core_intr_clean(&pdsc->intr_ctrl[index]);
88         pds_core_intr_mask_assert(&pdsc->intr_ctrl[index], 1);
89         pds_core_intr_mask(&pdsc->intr_ctrl[index], PDS_CORE_INTR_MASK_SET);
90
91         /* Register the isr with a name */
92         err = request_irq(intr_info->vector, handler, 0, intr_info->name, data);
93         if (err) {
94                 dev_err(pdsc->dev, "failed to get intr irq vector %d: %pe\n",
95                         intr_info->vector, ERR_PTR(err));
96                 goto err_out_free_intr;
97         }
98
99         return index;
100
101 err_out_free_intr:
102         pdsc_intr_free(pdsc, index);
103         return err;
104 }
105
106 static void pdsc_qcq_intr_free(struct pdsc *pdsc, struct pdsc_qcq *qcq)
107 {
108         if (!(qcq->flags & PDS_CORE_QCQ_F_INTR) ||
109             qcq->intx == PDS_CORE_INTR_INDEX_NOT_ASSIGNED)
110                 return;
111
112         pdsc_intr_free(pdsc, qcq->intx);
113         qcq->intx = PDS_CORE_INTR_INDEX_NOT_ASSIGNED;
114 }
115
116 static int pdsc_qcq_intr_alloc(struct pdsc *pdsc, struct pdsc_qcq *qcq)
117 {
118         char name[PDSC_INTR_NAME_MAX_SZ];
119         int index;
120
121         if (!(qcq->flags & PDS_CORE_QCQ_F_INTR)) {
122                 qcq->intx = PDS_CORE_INTR_INDEX_NOT_ASSIGNED;
123                 return 0;
124         }
125
126         snprintf(name, sizeof(name), "%s-%d-%s",
127                  PDS_CORE_DRV_NAME, pdsc->pdev->bus->number, qcq->q.name);
128         index = pdsc_intr_alloc(pdsc, name, pdsc_adminq_isr, pdsc);
129         if (index < 0)
130                 return index;
131         qcq->intx = index;
132
133         return 0;
134 }
135
136 void pdsc_qcq_free(struct pdsc *pdsc, struct pdsc_qcq *qcq)
137 {
138         struct device *dev = pdsc->dev;
139
140         if (!(qcq && qcq->pdsc))
141                 return;
142
143         pdsc_debugfs_del_qcq(qcq);
144
145         pdsc_qcq_intr_free(pdsc, qcq);
146
147         if (qcq->q_base)
148                 dma_free_coherent(dev, qcq->q_size,
149                                   qcq->q_base, qcq->q_base_pa);
150
151         if (qcq->cq_base)
152                 dma_free_coherent(dev, qcq->cq_size,
153                                   qcq->cq_base, qcq->cq_base_pa);
154
155         vfree(qcq->cq.info);
156         vfree(qcq->q.info);
157
158         memset(qcq, 0, sizeof(*qcq));
159 }
160
161 static void pdsc_q_map(struct pdsc_queue *q, void *base, dma_addr_t base_pa)
162 {
163         struct pdsc_q_info *cur;
164         unsigned int i;
165
166         q->base = base;
167         q->base_pa = base_pa;
168
169         for (i = 0, cur = q->info; i < q->num_descs; i++, cur++)
170                 cur->desc = base + (i * q->desc_size);
171 }
172
173 static void pdsc_cq_map(struct pdsc_cq *cq, void *base, dma_addr_t base_pa)
174 {
175         struct pdsc_cq_info *cur;
176         unsigned int i;
177
178         cq->base = base;
179         cq->base_pa = base_pa;
180
181         for (i = 0, cur = cq->info; i < cq->num_descs; i++, cur++)
182                 cur->comp = base + (i * cq->desc_size);
183 }
184
185 int pdsc_qcq_alloc(struct pdsc *pdsc, unsigned int type, unsigned int index,
186                    const char *name, unsigned int flags, unsigned int num_descs,
187                    unsigned int desc_size, unsigned int cq_desc_size,
188                    unsigned int pid, struct pdsc_qcq *qcq)
189 {
190         struct device *dev = pdsc->dev;
191         void *q_base, *cq_base;
192         dma_addr_t cq_base_pa;
193         dma_addr_t q_base_pa;
194         int err;
195
196         qcq->q.info = vcalloc(num_descs, sizeof(*qcq->q.info));
197         if (!qcq->q.info) {
198                 err = -ENOMEM;
199                 goto err_out;
200         }
201
202         qcq->pdsc = pdsc;
203         qcq->flags = flags;
204         INIT_WORK(&qcq->work, pdsc_work_thread);
205
206         qcq->q.type = type;
207         qcq->q.index = index;
208         qcq->q.num_descs = num_descs;
209         qcq->q.desc_size = desc_size;
210         qcq->q.tail_idx = 0;
211         qcq->q.head_idx = 0;
212         qcq->q.pid = pid;
213         snprintf(qcq->q.name, sizeof(qcq->q.name), "%s%u", name, index);
214
215         err = pdsc_qcq_intr_alloc(pdsc, qcq);
216         if (err)
217                 goto err_out_free_q_info;
218
219         qcq->cq.info = vcalloc(num_descs, sizeof(*qcq->cq.info));
220         if (!qcq->cq.info) {
221                 err = -ENOMEM;
222                 goto err_out_free_irq;
223         }
224
225         qcq->cq.bound_intr = &pdsc->intr_info[qcq->intx];
226         qcq->cq.num_descs = num_descs;
227         qcq->cq.desc_size = cq_desc_size;
228         qcq->cq.tail_idx = 0;
229         qcq->cq.done_color = 1;
230
231         if (flags & PDS_CORE_QCQ_F_NOTIFYQ) {
232                 /* q & cq need to be contiguous in case of notifyq */
233                 qcq->q_size = PDS_PAGE_SIZE +
234                               ALIGN(num_descs * desc_size, PDS_PAGE_SIZE) +
235                               ALIGN(num_descs * cq_desc_size, PDS_PAGE_SIZE);
236                 qcq->q_base = dma_alloc_coherent(dev,
237                                                  qcq->q_size + qcq->cq_size,
238                                                  &qcq->q_base_pa,
239                                                  GFP_KERNEL);
240                 if (!qcq->q_base) {
241                         err = -ENOMEM;
242                         goto err_out_free_cq_info;
243                 }
244                 q_base = PTR_ALIGN(qcq->q_base, PDS_PAGE_SIZE);
245                 q_base_pa = ALIGN(qcq->q_base_pa, PDS_PAGE_SIZE);
246                 pdsc_q_map(&qcq->q, q_base, q_base_pa);
247
248                 cq_base = PTR_ALIGN(q_base +
249                                     ALIGN(num_descs * desc_size, PDS_PAGE_SIZE),
250                                     PDS_PAGE_SIZE);
251                 cq_base_pa = ALIGN(qcq->q_base_pa +
252                                    ALIGN(num_descs * desc_size, PDS_PAGE_SIZE),
253                                    PDS_PAGE_SIZE);
254
255         } else {
256                 /* q DMA descriptors */
257                 qcq->q_size = PDS_PAGE_SIZE + (num_descs * desc_size);
258                 qcq->q_base = dma_alloc_coherent(dev, qcq->q_size,
259                                                  &qcq->q_base_pa,
260                                                  GFP_KERNEL);
261                 if (!qcq->q_base) {
262                         err = -ENOMEM;
263                         goto err_out_free_cq_info;
264                 }
265                 q_base = PTR_ALIGN(qcq->q_base, PDS_PAGE_SIZE);
266                 q_base_pa = ALIGN(qcq->q_base_pa, PDS_PAGE_SIZE);
267                 pdsc_q_map(&qcq->q, q_base, q_base_pa);
268
269                 /* cq DMA descriptors */
270                 qcq->cq_size = PDS_PAGE_SIZE + (num_descs * cq_desc_size);
271                 qcq->cq_base = dma_alloc_coherent(dev, qcq->cq_size,
272                                                   &qcq->cq_base_pa,
273                                                   GFP_KERNEL);
274                 if (!qcq->cq_base) {
275                         err = -ENOMEM;
276                         goto err_out_free_q;
277                 }
278                 cq_base = PTR_ALIGN(qcq->cq_base, PDS_PAGE_SIZE);
279                 cq_base_pa = ALIGN(qcq->cq_base_pa, PDS_PAGE_SIZE);
280         }
281
282         pdsc_cq_map(&qcq->cq, cq_base, cq_base_pa);
283         qcq->cq.bound_q = &qcq->q;
284
285         pdsc_debugfs_add_qcq(pdsc, qcq);
286
287         return 0;
288
289 err_out_free_q:
290         dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa);
291 err_out_free_cq_info:
292         vfree(qcq->cq.info);
293 err_out_free_irq:
294         pdsc_qcq_intr_free(pdsc, qcq);
295 err_out_free_q_info:
296         vfree(qcq->q.info);
297         memset(qcq, 0, sizeof(*qcq));
298 err_out:
299         dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
300         return err;
301 }
302
303 static int pdsc_core_init(struct pdsc *pdsc)
304 {
305         union pds_core_dev_comp comp = {};
306         union pds_core_dev_cmd cmd = {
307                 .init.opcode = PDS_CORE_CMD_INIT,
308         };
309         struct pds_core_dev_init_data_out cido;
310         struct pds_core_dev_init_data_in cidi;
311         u32 dbid_count;
312         u32 dbpage_num;
313         size_t sz;
314         int err;
315
316         cidi.adminq_q_base = cpu_to_le64(pdsc->adminqcq.q_base_pa);
317         cidi.adminq_cq_base = cpu_to_le64(pdsc->adminqcq.cq_base_pa);
318         cidi.notifyq_cq_base = cpu_to_le64(pdsc->notifyqcq.cq.base_pa);
319         cidi.flags = cpu_to_le32(PDS_CORE_QINIT_F_IRQ | PDS_CORE_QINIT_F_ENA);
320         cidi.intr_index = cpu_to_le16(pdsc->adminqcq.intx);
321         cidi.adminq_ring_size = ilog2(pdsc->adminqcq.q.num_descs);
322         cidi.notifyq_ring_size = ilog2(pdsc->notifyqcq.q.num_descs);
323
324         mutex_lock(&pdsc->devcmd_lock);
325
326         sz = min_t(size_t, sizeof(cidi), sizeof(pdsc->cmd_regs->data));
327         memcpy_toio(&pdsc->cmd_regs->data, &cidi, sz);
328
329         err = pdsc_devcmd_locked(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
330         if (!err) {
331                 sz = min_t(size_t, sizeof(cido), sizeof(pdsc->cmd_regs->data));
332                 memcpy_fromio(&cido, &pdsc->cmd_regs->data, sz);
333         }
334
335         mutex_unlock(&pdsc->devcmd_lock);
336         if (err) {
337                 dev_err(pdsc->dev, "Device init command failed: %pe\n",
338                         ERR_PTR(err));
339                 return err;
340         }
341
342         pdsc->hw_index = le32_to_cpu(cido.core_hw_index);
343
344         dbid_count = le32_to_cpu(pdsc->dev_ident.ndbpgs_per_lif);
345         dbpage_num = pdsc->hw_index * dbid_count;
346         pdsc->kern_dbpage = pdsc_map_dbpage(pdsc, dbpage_num);
347         if (!pdsc->kern_dbpage) {
348                 dev_err(pdsc->dev, "Cannot map dbpage, aborting\n");
349                 return -ENOMEM;
350         }
351
352         pdsc->adminqcq.q.hw_type = cido.adminq_hw_type;
353         pdsc->adminqcq.q.hw_index = le32_to_cpu(cido.adminq_hw_index);
354         pdsc->adminqcq.q.dbval = PDS_CORE_DBELL_QID(pdsc->adminqcq.q.hw_index);
355
356         pdsc->notifyqcq.q.hw_type = cido.notifyq_hw_type;
357         pdsc->notifyqcq.q.hw_index = le32_to_cpu(cido.notifyq_hw_index);
358         pdsc->notifyqcq.q.dbval = PDS_CORE_DBELL_QID(pdsc->notifyqcq.q.hw_index);
359
360         pdsc->last_eid = 0;
361
362         return err;
363 }
364
365 static struct pdsc_viftype pdsc_viftype_defaults[] = {
366         [PDS_DEV_TYPE_VDPA] = { .name = PDS_DEV_TYPE_VDPA_STR,
367                                 .vif_id = PDS_DEV_TYPE_VDPA,
368                                 .dl_id = DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET },
369         [PDS_DEV_TYPE_MAX] = {}
370 };
371
372 static int pdsc_viftypes_init(struct pdsc *pdsc)
373 {
374         enum pds_core_vif_types vt;
375
376         pdsc->viftype_status = kzalloc(sizeof(pdsc_viftype_defaults),
377                                        GFP_KERNEL);
378         if (!pdsc->viftype_status)
379                 return -ENOMEM;
380
381         for (vt = 0; vt < PDS_DEV_TYPE_MAX; vt++) {
382                 bool vt_support;
383
384                 if (!pdsc_viftype_defaults[vt].name)
385                         continue;
386
387                 /* Grab the defaults */
388                 pdsc->viftype_status[vt] = pdsc_viftype_defaults[vt];
389
390                 /* See what the Core device has for support */
391                 vt_support = !!le16_to_cpu(pdsc->dev_ident.vif_types[vt]);
392                 dev_dbg(pdsc->dev, "VIF %s is %ssupported\n",
393                         pdsc->viftype_status[vt].name,
394                         vt_support ? "" : "not ");
395
396                 pdsc->viftype_status[vt].supported = vt_support;
397         }
398
399         return 0;
400 }
401
402 int pdsc_setup(struct pdsc *pdsc, bool init)
403 {
404         int numdescs;
405         int err;
406
407         err = pdsc_dev_init(pdsc);
408         if (err)
409                 return err;
410
411         /* Scale the descriptor ring length based on number of CPUs and VFs */
412         numdescs = max_t(int, PDSC_ADMINQ_MIN_LENGTH, num_online_cpus());
413         numdescs += 2 * pci_sriov_get_totalvfs(pdsc->pdev);
414         numdescs = roundup_pow_of_two(numdescs);
415         err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_ADMINQ, 0, "adminq",
416                              PDS_CORE_QCQ_F_CORE | PDS_CORE_QCQ_F_INTR,
417                              numdescs,
418                              sizeof(union pds_core_adminq_cmd),
419                              sizeof(union pds_core_adminq_comp),
420                              0, &pdsc->adminqcq);
421         if (err)
422                 goto err_out_teardown;
423
424         err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_NOTIFYQ, 0, "notifyq",
425                              PDS_CORE_QCQ_F_NOTIFYQ,
426                              PDSC_NOTIFYQ_LENGTH,
427                              sizeof(struct pds_core_notifyq_cmd),
428                              sizeof(union pds_core_notifyq_comp),
429                              0, &pdsc->notifyqcq);
430         if (err)
431                 goto err_out_teardown;
432
433         /* NotifyQ rides on the AdminQ interrupt */
434         pdsc->notifyqcq.intx = pdsc->adminqcq.intx;
435
436         /* Set up the Core with the AdminQ and NotifyQ info */
437         err = pdsc_core_init(pdsc);
438         if (err)
439                 goto err_out_teardown;
440
441         /* Set up the VIFs */
442         if (init) {
443                 err = pdsc_viftypes_init(pdsc);
444                 if (err)
445                         goto err_out_teardown;
446
447                 pdsc_debugfs_add_viftype(pdsc);
448         }
449
450         refcount_set(&pdsc->adminq_refcnt, 1);
451         clear_bit(PDSC_S_FW_DEAD, &pdsc->state);
452         return 0;
453
454 err_out_teardown:
455         pdsc_teardown(pdsc, init);
456         return err;
457 }
458
459 void pdsc_teardown(struct pdsc *pdsc, bool removing)
460 {
461         int i;
462
463         if (!pdsc->pdev->is_virtfn)
464                 pdsc_devcmd_reset(pdsc);
465         if (pdsc->adminqcq.work.func)
466                 cancel_work_sync(&pdsc->adminqcq.work);
467         pdsc_qcq_free(pdsc, &pdsc->notifyqcq);
468         pdsc_qcq_free(pdsc, &pdsc->adminqcq);
469
470         if (removing) {
471                 kfree(pdsc->viftype_status);
472                 pdsc->viftype_status = NULL;
473         }
474
475         if (pdsc->intr_info) {
476                 for (i = 0; i < pdsc->nintrs; i++)
477                         pdsc_intr_free(pdsc, i);
478
479                 kfree(pdsc->intr_info);
480                 pdsc->intr_info = NULL;
481                 pdsc->nintrs = 0;
482         }
483
484         if (pdsc->kern_dbpage) {
485                 iounmap(pdsc->kern_dbpage);
486                 pdsc->kern_dbpage = NULL;
487         }
488
489         pci_free_irq_vectors(pdsc->pdev);
490         set_bit(PDSC_S_FW_DEAD, &pdsc->state);
491 }
492
493 int pdsc_start(struct pdsc *pdsc)
494 {
495         pds_core_intr_mask(&pdsc->intr_ctrl[pdsc->adminqcq.intx],
496                            PDS_CORE_INTR_MASK_CLEAR);
497
498         return 0;
499 }
500
501 void pdsc_stop(struct pdsc *pdsc)
502 {
503         int i;
504
505         if (!pdsc->intr_info)
506                 return;
507
508         /* Mask interrupts that are in use */
509         for (i = 0; i < pdsc->nintrs; i++)
510                 if (pdsc->intr_info[i].vector)
511                         pds_core_intr_mask(&pdsc->intr_ctrl[i],
512                                            PDS_CORE_INTR_MASK_SET);
513 }
514
515 static void pdsc_adminq_wait_and_dec_once_unused(struct pdsc *pdsc)
516 {
517         /* The driver initializes the adminq_refcnt to 1 when the adminq is
518          * allocated and ready for use. Other users/requesters will increment
519          * the refcnt while in use. If the refcnt is down to 1 then the adminq
520          * is not in use and the refcnt can be cleared and adminq freed. Before
521          * calling this function the driver will set PDSC_S_FW_DEAD, which
522          * prevent subsequent attempts to use the adminq and increment the
523          * refcnt to fail. This guarantees that this function will eventually
524          * exit.
525          */
526         while (!refcount_dec_if_one(&pdsc->adminq_refcnt)) {
527                 dev_dbg_ratelimited(pdsc->dev, "%s: adminq in use\n",
528                                     __func__);
529                 cpu_relax();
530         }
531 }
532
533 void pdsc_fw_down(struct pdsc *pdsc)
534 {
535         union pds_core_notifyq_comp reset_event = {
536                 .reset.ecode = cpu_to_le16(PDS_EVENT_RESET),
537                 .reset.state = 0,
538         };
539
540         if (test_and_set_bit(PDSC_S_FW_DEAD, &pdsc->state)) {
541                 dev_warn(pdsc->dev, "%s: already happening\n", __func__);
542                 return;
543         }
544
545         if (pdsc->pdev->is_virtfn)
546                 return;
547
548         pdsc_adminq_wait_and_dec_once_unused(pdsc);
549
550         /* Notify clients of fw_down */
551         if (pdsc->fw_reporter)
552                 devlink_health_report(pdsc->fw_reporter, "FW down reported", pdsc);
553         pdsc_notify(PDS_EVENT_RESET, &reset_event);
554
555         pdsc_stop(pdsc);
556         pdsc_teardown(pdsc, PDSC_TEARDOWN_RECOVERY);
557 }
558
559 void pdsc_fw_up(struct pdsc *pdsc)
560 {
561         union pds_core_notifyq_comp reset_event = {
562                 .reset.ecode = cpu_to_le16(PDS_EVENT_RESET),
563                 .reset.state = 1,
564         };
565         int err;
566
567         if (!test_bit(PDSC_S_FW_DEAD, &pdsc->state)) {
568                 dev_err(pdsc->dev, "%s: fw not dead\n", __func__);
569                 return;
570         }
571
572         if (pdsc->pdev->is_virtfn) {
573                 clear_bit(PDSC_S_FW_DEAD, &pdsc->state);
574                 return;
575         }
576
577         err = pdsc_setup(pdsc, PDSC_SETUP_RECOVERY);
578         if (err)
579                 goto err_out;
580
581         err = pdsc_start(pdsc);
582         if (err)
583                 goto err_out;
584
585         /* Notify clients of fw_up */
586         pdsc->fw_recoveries++;
587         if (pdsc->fw_reporter)
588                 devlink_health_reporter_state_update(pdsc->fw_reporter,
589                                                      DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
590         pdsc_notify(PDS_EVENT_RESET, &reset_event);
591
592         return;
593
594 err_out:
595         pdsc_teardown(pdsc, PDSC_TEARDOWN_RECOVERY);
596 }
597
598 void pdsc_pci_reset_thread(struct work_struct *work)
599 {
600         struct pdsc *pdsc = container_of(work, struct pdsc, pci_reset_work);
601         struct pci_dev *pdev = pdsc->pdev;
602
603         pci_dev_get(pdev);
604         pci_reset_function(pdev);
605         pci_dev_put(pdev);
606 }
607
608 static void pdsc_check_pci_health(struct pdsc *pdsc)
609 {
610         u8 fw_status;
611
612         /* some sort of teardown already in progress */
613         if (!pdsc->info_regs)
614                 return;
615
616         fw_status = ioread8(&pdsc->info_regs->fw_status);
617
618         /* is PCI broken? */
619         if (fw_status != PDS_RC_BAD_PCI)
620                 return;
621
622         /* prevent deadlock between pdsc_reset_prepare and pdsc_health_thread */
623         queue_work(pdsc->wq, &pdsc->pci_reset_work);
624 }
625
626 void pdsc_health_thread(struct work_struct *work)
627 {
628         struct pdsc *pdsc = container_of(work, struct pdsc, health_work);
629         unsigned long mask;
630         bool healthy;
631
632         mutex_lock(&pdsc->config_lock);
633
634         /* Don't do a check when in a transition state */
635         mask = BIT_ULL(PDSC_S_INITING_DRIVER) |
636                BIT_ULL(PDSC_S_STOPPING_DRIVER);
637         if (pdsc->state & mask)
638                 goto out_unlock;
639
640         healthy = pdsc_is_fw_good(pdsc);
641         dev_dbg(pdsc->dev, "%s: health %d fw_status %#02x fw_heartbeat %d\n",
642                 __func__, healthy, pdsc->fw_status, pdsc->last_hb);
643
644         if (test_bit(PDSC_S_FW_DEAD, &pdsc->state)) {
645                 if (healthy)
646                         pdsc_fw_up(pdsc);
647         } else {
648                 if (!healthy)
649                         pdsc_fw_down(pdsc);
650         }
651
652         pdsc_check_pci_health(pdsc);
653
654         pdsc->fw_generation = pdsc->fw_status & PDS_CORE_FW_STS_F_GENERATION;
655
656 out_unlock:
657         mutex_unlock(&pdsc->config_lock);
658 }