1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2016 Broadcom Limited
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
10 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/interrupt.h>
15 #include <linux/pci.h>
16 #include <linux/netdevice.h>
17 #include <linux/rtnetlink.h>
18 #include <linux/bitops.h>
19 #include <linux/irq.h>
20 #include <asm/byteorder.h>
21 #include <linux/bitmap.h>
27 static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id,
28 struct bnxt_ulp_ops *ulp_ops, void *handle)
30 struct net_device *dev = edev->net;
31 struct bnxt *bp = netdev_priv(dev);
35 if (ulp_id >= BNXT_MAX_ULP)
38 ulp = &edev->ulp_tbl[ulp_id];
39 if (rcu_access_pointer(ulp->ulp_ops)) {
40 netdev_err(bp->dev, "ulp id %d already registered\n", ulp_id);
43 if (ulp_id == BNXT_ROCE_ULP) {
44 unsigned int max_stat_ctxs;
46 max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
47 if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
48 bp->num_stat_ctxs == max_stat_ctxs)
50 bnxt_set_max_func_stat_ctxs(bp, max_stat_ctxs -
51 BNXT_MIN_ROCE_STAT_CTXS);
54 atomic_set(&ulp->ref_count, 0);
56 rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
58 if (ulp_id == BNXT_ROCE_ULP) {
59 if (test_bit(BNXT_STATE_OPEN, &bp->state))
60 bnxt_hwrm_vnic_cfg(bp, 0);
66 static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id)
68 struct net_device *dev = edev->net;
69 struct bnxt *bp = netdev_priv(dev);
74 if (ulp_id >= BNXT_MAX_ULP)
77 ulp = &edev->ulp_tbl[ulp_id];
78 if (!rcu_access_pointer(ulp->ulp_ops)) {
79 netdev_err(bp->dev, "ulp id %d not registered\n", ulp_id);
82 if (ulp_id == BNXT_ROCE_ULP) {
83 unsigned int max_stat_ctxs;
85 max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
86 bnxt_set_max_func_stat_ctxs(bp, max_stat_ctxs + 1);
87 if (ulp->msix_requested)
88 edev->en_ops->bnxt_free_msix(edev, ulp_id);
90 if (ulp->max_async_event_id)
91 bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
93 RCU_INIT_POINTER(ulp->ulp_ops, NULL);
95 ulp->max_async_event_id = 0;
96 ulp->async_events_bmap = NULL;
97 while (atomic_read(&ulp->ref_count) != 0 && i < 10) {
104 static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
105 struct bnxt_msix_entry *ent, int num_msix)
107 struct net_device *dev = edev->net;
108 struct bnxt *bp = netdev_priv(dev);
109 int max_idx, max_cp_rings;
110 int avail_msix, i, idx;
113 if (ulp_id != BNXT_ROCE_ULP)
116 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
119 max_cp_rings = bnxt_get_max_func_cp_rings(bp);
120 max_idx = min_t(int, bp->total_irqs, max_cp_rings);
121 avail_msix = max_idx - bp->cp_nr_rings;
124 if (avail_msix > num_msix)
125 avail_msix = num_msix;
127 idx = max_idx - avail_msix;
128 for (i = 0; i < avail_msix; i++) {
129 ent[i].vector = bp->irq_tbl[idx + i].vector;
130 ent[i].ring_idx = idx + i;
131 ent[i].db_offset = (idx + i) * 0x80;
133 bnxt_set_max_func_irqs(bp, max_idx - avail_msix);
134 bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix);
135 edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
139 static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
141 struct net_device *dev = edev->net;
142 struct bnxt *bp = netdev_priv(dev);
143 int max_cp_rings, msix_requested;
146 if (ulp_id != BNXT_ROCE_ULP)
149 max_cp_rings = bnxt_get_max_func_cp_rings(bp);
150 msix_requested = edev->ulp_tbl[ulp_id].msix_requested;
151 bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested);
152 edev->ulp_tbl[ulp_id].msix_requested = 0;
153 bnxt_set_max_func_irqs(bp, bp->total_irqs);
157 void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id)
160 if (bnxt_ulp_registered(bp->edev, ulp_id)) {
161 struct bnxt_en_dev *edev = bp->edev;
162 unsigned int msix_req, max;
164 msix_req = edev->ulp_tbl[ulp_id].msix_requested;
165 max = bnxt_get_max_func_cp_rings(bp);
166 bnxt_set_max_func_cp_rings(bp, max - msix_req);
167 max = bnxt_get_max_func_stat_ctxs(bp);
168 bnxt_set_max_func_stat_ctxs(bp, max - 1);
172 static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
173 struct bnxt_fw_msg *fw_msg)
175 struct net_device *dev = edev->net;
176 struct bnxt *bp = netdev_priv(dev);
180 mutex_lock(&bp->hwrm_cmd_lock);
182 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
183 rc = _hwrm_send_message(bp, fw_msg->msg, fw_msg->msg_len,
186 struct output *resp = bp->hwrm_cmd_resp_addr;
187 u32 len = le16_to_cpu(resp->resp_len);
189 if (fw_msg->resp_max_len < len)
190 len = fw_msg->resp_max_len;
192 memcpy(fw_msg->resp, resp, len);
194 mutex_unlock(&bp->hwrm_cmd_lock);
198 static void bnxt_ulp_get(struct bnxt_ulp *ulp)
200 atomic_inc(&ulp->ref_count);
203 static void bnxt_ulp_put(struct bnxt_ulp *ulp)
205 atomic_dec(&ulp->ref_count);
208 void bnxt_ulp_stop(struct bnxt *bp)
210 struct bnxt_en_dev *edev = bp->edev;
211 struct bnxt_ulp_ops *ops;
217 for (i = 0; i < BNXT_MAX_ULP; i++) {
218 struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
220 ops = rtnl_dereference(ulp->ulp_ops);
221 if (!ops || !ops->ulp_stop)
223 ops->ulp_stop(ulp->handle);
227 void bnxt_ulp_start(struct bnxt *bp)
229 struct bnxt_en_dev *edev = bp->edev;
230 struct bnxt_ulp_ops *ops;
236 for (i = 0; i < BNXT_MAX_ULP; i++) {
237 struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
239 ops = rtnl_dereference(ulp->ulp_ops);
240 if (!ops || !ops->ulp_start)
242 ops->ulp_start(ulp->handle);
246 void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs)
248 struct bnxt_en_dev *edev = bp->edev;
249 struct bnxt_ulp_ops *ops;
255 for (i = 0; i < BNXT_MAX_ULP; i++) {
256 struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
259 ops = rcu_dereference(ulp->ulp_ops);
260 if (!ops || !ops->ulp_sriov_config) {
266 ops->ulp_sriov_config(ulp->handle, num_vfs);
271 void bnxt_ulp_shutdown(struct bnxt *bp)
273 struct bnxt_en_dev *edev = bp->edev;
274 struct bnxt_ulp_ops *ops;
280 for (i = 0; i < BNXT_MAX_ULP; i++) {
281 struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
283 ops = rtnl_dereference(ulp->ulp_ops);
284 if (!ops || !ops->ulp_shutdown)
286 ops->ulp_shutdown(ulp->handle);
290 void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)
292 u16 event_id = le16_to_cpu(cmpl->event_id);
293 struct bnxt_en_dev *edev = bp->edev;
294 struct bnxt_ulp_ops *ops;
301 for (i = 0; i < BNXT_MAX_ULP; i++) {
302 struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
304 ops = rcu_dereference(ulp->ulp_ops);
305 if (!ops || !ops->ulp_async_notifier)
307 if (!ulp->async_events_bmap ||
308 event_id > ulp->max_async_event_id)
311 /* Read max_async_event_id first before testing the bitmap. */
313 if (test_bit(event_id, ulp->async_events_bmap))
314 ops->ulp_async_notifier(ulp->handle, cmpl);
319 static int bnxt_register_async_events(struct bnxt_en_dev *edev, int ulp_id,
320 unsigned long *events_bmap, u16 max_id)
322 struct net_device *dev = edev->net;
323 struct bnxt *bp = netdev_priv(dev);
324 struct bnxt_ulp *ulp;
326 if (ulp_id >= BNXT_MAX_ULP)
329 ulp = &edev->ulp_tbl[ulp_id];
330 ulp->async_events_bmap = events_bmap;
331 /* Make sure bnxt_ulp_async_events() sees this order */
333 ulp->max_async_event_id = max_id;
334 bnxt_hwrm_func_rgtr_async_events(bp, events_bmap, max_id + 1);
338 static const struct bnxt_en_ops bnxt_en_ops_tbl = {
339 .bnxt_register_device = bnxt_register_dev,
340 .bnxt_unregister_device = bnxt_unregister_dev,
341 .bnxt_request_msix = bnxt_req_msix_vecs,
342 .bnxt_free_msix = bnxt_free_msix_vecs,
343 .bnxt_send_fw_msg = bnxt_send_msg,
344 .bnxt_register_fw_async_events = bnxt_register_async_events,
347 struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev)
349 struct bnxt *bp = netdev_priv(dev);
350 struct bnxt_en_dev *edev;
354 edev = kzalloc(sizeof(*edev), GFP_KERNEL);
356 return ERR_PTR(-ENOMEM);
357 edev->en_ops = &bnxt_en_ops_tbl;
358 if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
359 edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
360 if (bp->flags & BNXT_FLAG_ROCEV2_CAP)
361 edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
363 edev->pdev = bp->pdev;