1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * ip_vs_app.c: Application module support for IPVS
5 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 * Most code here is taken from ip_masq_app.c in kernel 2.2. The difference
8 * is that ip_vs_app module handles the reverse direction (incoming requests
9 * and outgoing responses).
11 * IP_MASQ_APP application masquerading module
13 * Author: Juan Jose Ciarlante, <jjciarla@raiz.uncu.edu.ar>
16 #define KMSG_COMPONENT "IPVS"
17 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/skbuff.h>
24 #include <linux/netfilter.h>
25 #include <linux/slab.h>
26 #include <net/net_namespace.h>
27 #include <net/protocol.h>
29 #include <linux/stat.h>
30 #include <linux/proc_fs.h>
31 #include <linux/seq_file.h>
32 #include <linux/mutex.h>
34 #include <net/ip_vs.h>
36 EXPORT_SYMBOL(register_ip_vs_app);
37 EXPORT_SYMBOL(unregister_ip_vs_app);
38 EXPORT_SYMBOL(register_ip_vs_app_inc);
40 static DEFINE_MUTEX(__ip_vs_app_mutex);
43 * Get an ip_vs_app object
45 static inline int ip_vs_app_get(struct ip_vs_app *app)
47 return try_module_get(app->module);
51 static inline void ip_vs_app_put(struct ip_vs_app *app)
53 module_put(app->module);
56 static void ip_vs_app_inc_destroy(struct ip_vs_app *inc)
58 kfree(inc->timeout_table);
62 static void ip_vs_app_inc_rcu_free(struct rcu_head *head)
64 struct ip_vs_app *inc = container_of(head, struct ip_vs_app, rcu_head);
66 ip_vs_app_inc_destroy(inc);
70 * Allocate/initialize app incarnation and register it in proto apps.
73 ip_vs_app_inc_new(struct netns_ipvs *ipvs, struct ip_vs_app *app, __u16 proto,
76 struct ip_vs_protocol *pp;
77 struct ip_vs_app *inc;
80 if (!(pp = ip_vs_proto_get(proto)))
81 return -EPROTONOSUPPORT;
83 if (!pp->unregister_app)
86 inc = kmemdup(app, sizeof(*inc), GFP_KERNEL);
89 INIT_LIST_HEAD(&inc->p_list);
90 INIT_LIST_HEAD(&inc->incs_list);
92 inc->port = htons(port);
93 atomic_set(&inc->usecnt, 0);
97 ip_vs_create_timeout_table(app->timeouts,
99 if (!inc->timeout_table) {
105 ret = pp->register_app(ipvs, inc);
109 list_add(&inc->a_list, &app->incs_list);
110 IP_VS_DBG(9, "%s App %s:%u registered\n",
111 pp->name, inc->name, ntohs(inc->port));
116 ip_vs_app_inc_destroy(inc);
122 * Release app incarnation
125 ip_vs_app_inc_release(struct netns_ipvs *ipvs, struct ip_vs_app *inc)
127 struct ip_vs_protocol *pp;
129 if (!(pp = ip_vs_proto_get(inc->protocol)))
132 if (pp->unregister_app)
133 pp->unregister_app(ipvs, inc);
135 IP_VS_DBG(9, "%s App %s:%u unregistered\n",
136 pp->name, inc->name, ntohs(inc->port));
138 list_del(&inc->a_list);
140 call_rcu(&inc->rcu_head, ip_vs_app_inc_rcu_free);
145 * Get reference to app inc (only called from softirq)
148 int ip_vs_app_inc_get(struct ip_vs_app *inc)
152 result = ip_vs_app_get(inc->app);
154 atomic_inc(&inc->usecnt);
160 * Put the app inc (only called from timer or net softirq)
162 void ip_vs_app_inc_put(struct ip_vs_app *inc)
164 atomic_dec(&inc->usecnt);
165 ip_vs_app_put(inc->app);
170 * Register an application incarnation in protocol applications
173 register_ip_vs_app_inc(struct netns_ipvs *ipvs, struct ip_vs_app *app, __u16 proto,
178 mutex_lock(&__ip_vs_app_mutex);
180 result = ip_vs_app_inc_new(ipvs, app, proto, port);
182 mutex_unlock(&__ip_vs_app_mutex);
188 /* Register application for netns */
189 struct ip_vs_app *register_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *app)
194 mutex_lock(&__ip_vs_app_mutex);
196 /* increase the module use count */
197 if (!ip_vs_use_count_inc()) {
202 list_for_each_entry(a, &ipvs->app_list, a_list) {
203 if (!strcmp(app->name, a->name)) {
205 /* decrease the module use count */
206 ip_vs_use_count_dec();
210 a = kmemdup(app, sizeof(*app), GFP_KERNEL);
213 /* decrease the module use count */
214 ip_vs_use_count_dec();
217 INIT_LIST_HEAD(&a->incs_list);
218 list_add(&a->a_list, &ipvs->app_list);
221 mutex_unlock(&__ip_vs_app_mutex);
223 return err ? ERR_PTR(err) : a;
228 * ip_vs_app unregistration routine
229 * We are sure there are no app incarnations attached to services
230 * Caller should use synchronize_rcu() or rcu_barrier()
232 void unregister_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *app)
234 struct ip_vs_app *a, *anxt, *inc, *nxt;
236 mutex_lock(&__ip_vs_app_mutex);
238 list_for_each_entry_safe(a, anxt, &ipvs->app_list, a_list) {
239 if (app && strcmp(app->name, a->name))
241 list_for_each_entry_safe(inc, nxt, &a->incs_list, a_list) {
242 ip_vs_app_inc_release(ipvs, inc);
245 list_del(&a->a_list);
248 /* decrease the module use count */
249 ip_vs_use_count_dec();
252 mutex_unlock(&__ip_vs_app_mutex);
257 * Bind ip_vs_conn to its ip_vs_app (called by cp constructor)
259 int ip_vs_bind_app(struct ip_vs_conn *cp,
260 struct ip_vs_protocol *pp)
262 return pp->app_conn_bind(cp);
267 * Unbind cp from application incarnation (called by cp destructor)
269 void ip_vs_unbind_app(struct ip_vs_conn *cp)
271 struct ip_vs_app *inc = cp->app;
276 if (inc->unbind_conn)
277 inc->unbind_conn(inc, cp);
279 inc->done_conn(inc, cp);
280 ip_vs_app_inc_put(inc);
286 * Fixes th->seq based on ip_vs_seq info.
288 static inline void vs_fix_seq(const struct ip_vs_seq *vseq, struct tcphdr *th)
290 __u32 seq = ntohl(th->seq);
293 * Adjust seq with delta-offset for all packets after
294 * the most recent resized pkt seq and with previous_delta offset
295 * for all packets before most recent resized pkt seq.
297 if (vseq->delta || vseq->previous_delta) {
298 if(after(seq, vseq->init_seq)) {
299 th->seq = htonl(seq + vseq->delta);
300 IP_VS_DBG(9, "%s(): added delta (%d) to seq\n",
301 __func__, vseq->delta);
303 th->seq = htonl(seq + vseq->previous_delta);
304 IP_VS_DBG(9, "%s(): added previous_delta (%d) to seq\n",
305 __func__, vseq->previous_delta);
312 * Fixes th->ack_seq based on ip_vs_seq info.
315 vs_fix_ack_seq(const struct ip_vs_seq *vseq, struct tcphdr *th)
317 __u32 ack_seq = ntohl(th->ack_seq);
320 * Adjust ack_seq with delta-offset for
321 * the packets AFTER most recent resized pkt has caused a shift
322 * for packets before most recent resized pkt, use previous_delta
324 if (vseq->delta || vseq->previous_delta) {
325 /* since ack_seq is the number of octet that is expected
326 to receive next, so compare it with init_seq+delta */
327 if(after(ack_seq, vseq->init_seq+vseq->delta)) {
328 th->ack_seq = htonl(ack_seq - vseq->delta);
329 IP_VS_DBG(9, "%s(): subtracted delta "
330 "(%d) from ack_seq\n", __func__, vseq->delta);
333 th->ack_seq = htonl(ack_seq - vseq->previous_delta);
334 IP_VS_DBG(9, "%s(): subtracted "
335 "previous_delta (%d) from ack_seq\n",
336 __func__, vseq->previous_delta);
343 * Updates ip_vs_seq if pkt has been resized
344 * Assumes already checked proto==IPPROTO_TCP and diff!=0.
346 static inline void vs_seq_update(struct ip_vs_conn *cp, struct ip_vs_seq *vseq,
347 unsigned int flag, __u32 seq, int diff)
349 /* spinlock is to keep updating cp->flags atomic */
350 spin_lock_bh(&cp->lock);
351 if (!(cp->flags & flag) || after(seq, vseq->init_seq)) {
352 vseq->previous_delta = vseq->delta;
354 vseq->init_seq = seq;
357 spin_unlock_bh(&cp->lock);
360 static inline int app_tcp_pkt_out(struct ip_vs_conn *cp, struct sk_buff *skb,
361 struct ip_vs_app *app,
362 struct ip_vs_iphdr *ipvsh)
365 const unsigned int tcp_offset = ip_hdrlen(skb);
369 if (skb_ensure_writable(skb, tcp_offset + sizeof(*th)))
372 th = (struct tcphdr *)(skb_network_header(skb) + tcp_offset);
375 * Remember seq number in case this pkt gets resized
377 seq = ntohl(th->seq);
380 * Fix seq stuff if flagged as so.
382 if (cp->flags & IP_VS_CONN_F_OUT_SEQ)
383 vs_fix_seq(&cp->out_seq, th);
384 if (cp->flags & IP_VS_CONN_F_IN_SEQ)
385 vs_fix_ack_seq(&cp->in_seq, th);
388 * Call private output hook function
390 if (app->pkt_out == NULL)
393 if (!app->pkt_out(app, cp, skb, &diff, ipvsh))
397 * Update ip_vs seq stuff if len has changed.
400 vs_seq_update(cp, &cp->out_seq,
401 IP_VS_CONN_F_OUT_SEQ, seq, diff);
407 * Output pkt hook. Will call bound ip_vs_app specific function
408 * called by ipvs packet handler, assumes previously checked cp!=NULL
409 * returns false if it can't handle packet (oom)
411 int ip_vs_app_pkt_out(struct ip_vs_conn *cp, struct sk_buff *skb,
412 struct ip_vs_iphdr *ipvsh)
414 struct ip_vs_app *app;
417 * check if application module is bound to
420 if ((app = cp->app) == NULL)
423 /* TCP is complicated */
424 if (cp->protocol == IPPROTO_TCP)
425 return app_tcp_pkt_out(cp, skb, app, ipvsh);
428 * Call private output hook function
430 if (app->pkt_out == NULL)
433 return app->pkt_out(app, cp, skb, NULL, ipvsh);
437 static inline int app_tcp_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb,
438 struct ip_vs_app *app,
439 struct ip_vs_iphdr *ipvsh)
442 const unsigned int tcp_offset = ip_hdrlen(skb);
446 if (skb_ensure_writable(skb, tcp_offset + sizeof(*th)))
449 th = (struct tcphdr *)(skb_network_header(skb) + tcp_offset);
452 * Remember seq number in case this pkt gets resized
454 seq = ntohl(th->seq);
457 * Fix seq stuff if flagged as so.
459 if (cp->flags & IP_VS_CONN_F_IN_SEQ)
460 vs_fix_seq(&cp->in_seq, th);
461 if (cp->flags & IP_VS_CONN_F_OUT_SEQ)
462 vs_fix_ack_seq(&cp->out_seq, th);
465 * Call private input hook function
467 if (app->pkt_in == NULL)
470 if (!app->pkt_in(app, cp, skb, &diff, ipvsh))
474 * Update ip_vs seq stuff if len has changed.
477 vs_seq_update(cp, &cp->in_seq,
478 IP_VS_CONN_F_IN_SEQ, seq, diff);
484 * Input pkt hook. Will call bound ip_vs_app specific function
485 * called by ipvs packet handler, assumes previously checked cp!=NULL.
486 * returns false if can't handle packet (oom).
488 int ip_vs_app_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb,
489 struct ip_vs_iphdr *ipvsh)
491 struct ip_vs_app *app;
494 * check if application module is bound to
497 if ((app = cp->app) == NULL)
500 /* TCP is complicated */
501 if (cp->protocol == IPPROTO_TCP)
502 return app_tcp_pkt_in(cp, skb, app, ipvsh);
505 * Call private input hook function
507 if (app->pkt_in == NULL)
510 return app->pkt_in(app, cp, skb, NULL, ipvsh);
514 #ifdef CONFIG_PROC_FS
516 * /proc/net/ip_vs_app entry function
519 static struct ip_vs_app *ip_vs_app_idx(struct netns_ipvs *ipvs, loff_t pos)
521 struct ip_vs_app *app, *inc;
523 list_for_each_entry(app, &ipvs->app_list, a_list) {
524 list_for_each_entry(inc, &app->incs_list, a_list) {
533 static void *ip_vs_app_seq_start(struct seq_file *seq, loff_t *pos)
535 struct net *net = seq_file_net(seq);
536 struct netns_ipvs *ipvs = net_ipvs(net);
538 mutex_lock(&__ip_vs_app_mutex);
540 return *pos ? ip_vs_app_idx(ipvs, *pos - 1) : SEQ_START_TOKEN;
543 static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos)
545 struct ip_vs_app *inc, *app;
547 struct net *net = seq_file_net(seq);
548 struct netns_ipvs *ipvs = net_ipvs(net);
551 if (v == SEQ_START_TOKEN)
552 return ip_vs_app_idx(ipvs, 0);
557 if ((e = inc->a_list.next) != &app->incs_list)
558 return list_entry(e, struct ip_vs_app, a_list);
560 /* go on to next application */
561 for (e = app->a_list.next; e != &ipvs->app_list; e = e->next) {
562 app = list_entry(e, struct ip_vs_app, a_list);
563 list_for_each_entry(inc, &app->incs_list, a_list) {
570 static void ip_vs_app_seq_stop(struct seq_file *seq, void *v)
572 mutex_unlock(&__ip_vs_app_mutex);
575 static int ip_vs_app_seq_show(struct seq_file *seq, void *v)
577 if (v == SEQ_START_TOKEN)
578 seq_puts(seq, "prot port usecnt name\n");
580 const struct ip_vs_app *inc = v;
582 seq_printf(seq, "%-3s %-7u %-6d %-17s\n",
583 ip_vs_proto_name(inc->protocol),
585 atomic_read(&inc->usecnt),
591 static const struct seq_operations ip_vs_app_seq_ops = {
592 .start = ip_vs_app_seq_start,
593 .next = ip_vs_app_seq_next,
594 .stop = ip_vs_app_seq_stop,
595 .show = ip_vs_app_seq_show,
599 int __net_init ip_vs_app_net_init(struct netns_ipvs *ipvs)
601 INIT_LIST_HEAD(&ipvs->app_list);
602 #ifdef CONFIG_PROC_FS
603 if (!proc_create_net("ip_vs_app", 0, ipvs->net->proc_net,
605 sizeof(struct seq_net_private)))
611 void __net_exit ip_vs_app_net_cleanup(struct netns_ipvs *ipvs)
613 unregister_ip_vs_app(ipvs, NULL /* all */);
614 #ifdef CONFIG_PROC_FS
615 remove_proc_entry("ip_vs_app", ipvs->net->proc_net);