GNU Linux-libre 5.19-rc6-gnu
[releases.git] / drivers / crypto / cavium / nitrox / nitrox_mbx.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/bitmap.h>
3 #include <linux/workqueue.h>
4
5 #include "nitrox_csr.h"
6 #include "nitrox_hal.h"
7 #include "nitrox_dev.h"
8 #include "nitrox_mbx.h"
9
10 #define RING_TO_VFNO(_x, _y)    ((_x) / (_y))
11
12 /*
13  * mbx_msg_type - Mailbox message types
14  */
15 enum mbx_msg_type {
16         MBX_MSG_TYPE_NOP,
17         MBX_MSG_TYPE_REQ,
18         MBX_MSG_TYPE_ACK,
19         MBX_MSG_TYPE_NACK,
20 };
21
22 /*
23  * mbx_msg_opcode - Mailbox message opcodes
24  */
25 enum mbx_msg_opcode {
26         MSG_OP_VF_MODE = 1,
27         MSG_OP_VF_UP,
28         MSG_OP_VF_DOWN,
29         MSG_OP_CHIPID_VFID,
30         MSG_OP_MCODE_INFO = 11,
31 };
32
33 struct pf2vf_work {
34         struct nitrox_vfdev *vfdev;
35         struct nitrox_device *ndev;
36         struct work_struct pf2vf_resp;
37 };
38
39 static inline u64 pf2vf_read_mbox(struct nitrox_device *ndev, int ring)
40 {
41         u64 reg_addr;
42
43         reg_addr = NPS_PKT_MBOX_VF_PF_PFDATAX(ring);
44         return nitrox_read_csr(ndev, reg_addr);
45 }
46
47 static inline void pf2vf_write_mbox(struct nitrox_device *ndev, u64 value,
48                                     int ring)
49 {
50         u64 reg_addr;
51
52         reg_addr = NPS_PKT_MBOX_PF_VF_PFDATAX(ring);
53         nitrox_write_csr(ndev, reg_addr, value);
54 }
55
56 static void pf2vf_send_response(struct nitrox_device *ndev,
57                                 struct nitrox_vfdev *vfdev)
58 {
59         union mbox_msg msg;
60
61         msg.value = vfdev->msg.value;
62
63         switch (vfdev->msg.opcode) {
64         case MSG_OP_VF_MODE:
65                 msg.data = ndev->mode;
66                 break;
67         case MSG_OP_VF_UP:
68                 vfdev->nr_queues = vfdev->msg.data;
69                 atomic_set(&vfdev->state, __NDEV_READY);
70                 break;
71         case MSG_OP_CHIPID_VFID:
72                 msg.id.chipid = ndev->idx;
73                 msg.id.vfid = vfdev->vfno;
74                 break;
75         case MSG_OP_VF_DOWN:
76                 vfdev->nr_queues = 0;
77                 atomic_set(&vfdev->state, __NDEV_NOT_READY);
78                 break;
79         case MSG_OP_MCODE_INFO:
80                 msg.data = 0;
81                 msg.mcode_info.count = 2;
82                 msg.mcode_info.info = MCODE_TYPE_SE_SSL | (MCODE_TYPE_AE << 5);
83                 msg.mcode_info.next_se_grp = 1;
84                 msg.mcode_info.next_ae_grp = 1;
85                 break;
86         default:
87                 msg.type = MBX_MSG_TYPE_NOP;
88                 break;
89         }
90
91         if (msg.type == MBX_MSG_TYPE_NOP)
92                 return;
93
94         /* send ACK to VF */
95         msg.type = MBX_MSG_TYPE_ACK;
96         pf2vf_write_mbox(ndev, msg.value, vfdev->ring);
97
98         vfdev->msg.value = 0;
99         atomic64_inc(&vfdev->mbx_resp);
100 }
101
102 static void pf2vf_resp_handler(struct work_struct *work)
103 {
104         struct pf2vf_work *pf2vf_resp = container_of(work, struct pf2vf_work,
105                                                      pf2vf_resp);
106         struct nitrox_vfdev *vfdev = pf2vf_resp->vfdev;
107         struct nitrox_device *ndev = pf2vf_resp->ndev;
108
109         switch (vfdev->msg.type) {
110         case MBX_MSG_TYPE_REQ:
111                 /* process the request from VF */
112                 pf2vf_send_response(ndev, vfdev);
113                 break;
114         case MBX_MSG_TYPE_ACK:
115         case MBX_MSG_TYPE_NACK:
116                 break;
117         }
118
119         kfree(pf2vf_resp);
120 }
121
122 void nitrox_pf2vf_mbox_handler(struct nitrox_device *ndev)
123 {
124         DECLARE_BITMAP(csr, BITS_PER_TYPE(u64));
125         struct nitrox_vfdev *vfdev;
126         struct pf2vf_work *pfwork;
127         u64 value, reg_addr;
128         u32 i;
129         int vfno;
130
131         /* loop for VF(0..63) */
132         reg_addr = NPS_PKT_MBOX_INT_LO;
133         value = nitrox_read_csr(ndev, reg_addr);
134         bitmap_from_u64(csr, value);
135         for_each_set_bit(i, csr, BITS_PER_TYPE(csr)) {
136                 /* get the vfno from ring */
137                 vfno = RING_TO_VFNO(i, ndev->iov.max_vf_queues);
138                 vfdev = ndev->iov.vfdev + vfno;
139                 vfdev->ring = i;
140                 /* fill the vf mailbox data */
141                 vfdev->msg.value = pf2vf_read_mbox(ndev, vfdev->ring);
142                 pfwork = kzalloc(sizeof(*pfwork), GFP_ATOMIC);
143                 if (!pfwork)
144                         continue;
145
146                 pfwork->vfdev = vfdev;
147                 pfwork->ndev = ndev;
148                 INIT_WORK(&pfwork->pf2vf_resp, pf2vf_resp_handler);
149                 queue_work(ndev->iov.pf2vf_wq, &pfwork->pf2vf_resp);
150                 /* clear the corresponding vf bit */
151                 nitrox_write_csr(ndev, reg_addr, BIT_ULL(i));
152         }
153
154         /* loop for VF(64..127) */
155         reg_addr = NPS_PKT_MBOX_INT_HI;
156         value = nitrox_read_csr(ndev, reg_addr);
157         bitmap_from_u64(csr, value);
158         for_each_set_bit(i, csr, BITS_PER_TYPE(csr)) {
159                 /* get the vfno from ring */
160                 vfno = RING_TO_VFNO(i + 64, ndev->iov.max_vf_queues);
161                 vfdev = ndev->iov.vfdev + vfno;
162                 vfdev->ring = (i + 64);
163                 /* fill the vf mailbox data */
164                 vfdev->msg.value = pf2vf_read_mbox(ndev, vfdev->ring);
165
166                 pfwork = kzalloc(sizeof(*pfwork), GFP_ATOMIC);
167                 if (!pfwork)
168                         continue;
169
170                 pfwork->vfdev = vfdev;
171                 pfwork->ndev = ndev;
172                 INIT_WORK(&pfwork->pf2vf_resp, pf2vf_resp_handler);
173                 queue_work(ndev->iov.pf2vf_wq, &pfwork->pf2vf_resp);
174                 /* clear the corresponding vf bit */
175                 nitrox_write_csr(ndev, reg_addr, BIT_ULL(i));
176         }
177 }
178
179 int nitrox_mbox_init(struct nitrox_device *ndev)
180 {
181         struct nitrox_vfdev *vfdev;
182         int i;
183
184         ndev->iov.vfdev = kcalloc(ndev->iov.num_vfs,
185                                   sizeof(struct nitrox_vfdev), GFP_KERNEL);
186         if (!ndev->iov.vfdev)
187                 return -ENOMEM;
188
189         for (i = 0; i < ndev->iov.num_vfs; i++) {
190                 vfdev = ndev->iov.vfdev + i;
191                 vfdev->vfno = i;
192         }
193
194         /* allocate pf2vf response workqueue */
195         ndev->iov.pf2vf_wq = alloc_workqueue("nitrox_pf2vf", 0, 0);
196         if (!ndev->iov.pf2vf_wq) {
197                 kfree(ndev->iov.vfdev);
198                 return -ENOMEM;
199         }
200         /* enable pf2vf mailbox interrupts */
201         enable_pf2vf_mbox_interrupts(ndev);
202
203         return 0;
204 }
205
206 void nitrox_mbox_cleanup(struct nitrox_device *ndev)
207 {
208         /* disable pf2vf mailbox interrupts */
209         disable_pf2vf_mbox_interrupts(ndev);
210         /* destroy workqueue */
211         if (ndev->iov.pf2vf_wq)
212                 destroy_workqueue(ndev->iov.pf2vf_wq);
213
214         kfree(ndev->iov.vfdev);
215         ndev->iov.pf2vf_wq = NULL;
216         ndev->iov.vfdev = NULL;
217 }