2 * Copyright (c) 2016, Mellanox Technologies, Ltd. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/module.h>
35 #include <linux/mlx5/driver.h>
36 #include "mlx5_core.h"
40 struct mlx5_core_dev *mdev;
41 spinlock_t lock; /* protect vxlan table */
42 /* max_num_ports is usuallly 4, 16 buckets is more than enough */
43 DECLARE_HASHTABLE(htable, 4);
45 struct mutex sync_lock; /* sync add/del port HW operations */
48 struct mlx5_vxlan_port {
49 struct hlist_node hlist;
54 static inline u8 mlx5_vxlan_max_udp_ports(struct mlx5_core_dev *mdev)
56 return MLX5_CAP_ETH(mdev, max_vxlan_udp_ports) ?: 4;
59 static int mlx5_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port)
61 u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {0};
62 u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)] = {0};
64 MLX5_SET(add_vxlan_udp_dport_in, in, opcode,
65 MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT);
66 MLX5_SET(add_vxlan_udp_dport_in, in, vxlan_udp_port, port);
67 return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
70 static int mlx5_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
72 u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)] = {0};
73 u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)] = {0};
75 MLX5_SET(delete_vxlan_udp_dport_in, in, opcode,
76 MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
77 MLX5_SET(delete_vxlan_udp_dport_in, in, vxlan_udp_port, port);
78 return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
81 static struct mlx5_vxlan_port*
82 mlx5_vxlan_lookup_port_locked(struct mlx5_vxlan *vxlan, u16 port)
84 struct mlx5_vxlan_port *vxlanp;
86 hash_for_each_possible(vxlan->htable, vxlanp, hlist, port) {
87 if (vxlanp->udp_port == port)
94 struct mlx5_vxlan_port *mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port)
96 struct mlx5_vxlan_port *vxlanp;
98 if (!mlx5_vxlan_allowed(vxlan))
101 spin_lock_bh(&vxlan->lock);
102 vxlanp = mlx5_vxlan_lookup_port_locked(vxlan, port);
103 spin_unlock_bh(&vxlan->lock);
108 int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port)
110 struct mlx5_vxlan_port *vxlanp;
113 vxlanp = mlx5_vxlan_lookup_port(vxlan, port);
115 atomic_inc(&vxlanp->refcount);
119 mutex_lock(&vxlan->sync_lock);
120 if (vxlan->num_ports >= mlx5_vxlan_max_udp_ports(vxlan->mdev)) {
121 mlx5_core_info(vxlan->mdev,
122 "UDP port (%d) not offloaded, max number of UDP ports (%d) are already offloaded\n",
123 port, mlx5_vxlan_max_udp_ports(vxlan->mdev));
128 ret = mlx5_vxlan_core_add_port_cmd(vxlan->mdev, port);
132 vxlanp = kzalloc(sizeof(*vxlanp), GFP_KERNEL);
135 goto err_delete_port;
138 vxlanp->udp_port = port;
139 atomic_set(&vxlanp->refcount, 1);
141 spin_lock_bh(&vxlan->lock);
142 hash_add(vxlan->htable, &vxlanp->hlist, port);
143 spin_unlock_bh(&vxlan->lock);
146 mutex_unlock(&vxlan->sync_lock);
150 mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port);
153 mutex_unlock(&vxlan->sync_lock);
157 int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port)
159 struct mlx5_vxlan_port *vxlanp;
163 mutex_lock(&vxlan->sync_lock);
165 spin_lock_bh(&vxlan->lock);
166 vxlanp = mlx5_vxlan_lookup_port_locked(vxlan, port);
172 if (atomic_dec_and_test(&vxlanp->refcount)) {
173 hash_del(&vxlanp->hlist);
178 spin_unlock_bh(&vxlan->lock);
181 mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port);
186 mutex_unlock(&vxlan->sync_lock);
191 struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev)
193 struct mlx5_vxlan *vxlan;
195 if (!MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) || !mlx5_core_is_pf(mdev))
196 return ERR_PTR(-ENOTSUPP);
198 vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL);
200 return ERR_PTR(-ENOMEM);
203 mutex_init(&vxlan->sync_lock);
204 spin_lock_init(&vxlan->lock);
205 hash_init(vxlan->htable);
207 /* Hardware adds 4789 by default */
208 mlx5_vxlan_add_port(vxlan, 4789);
213 void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan)
215 struct mlx5_vxlan_port *vxlanp;
216 struct hlist_node *tmp;
219 if (!mlx5_vxlan_allowed(vxlan))
222 /* Lockless since we are the only hash table consumers*/
223 hash_for_each_safe(vxlan->htable, bkt, tmp, vxlanp, hlist) {
224 hash_del(&vxlanp->hlist);
225 mlx5_vxlan_core_del_port_cmd(vxlan->mdev, vxlanp->udp_port);