Skip to content

Commit cdaa432

Browse files
author
CKI KWF Bot
committed
Merge: xfrm: stable backport for 10.2 phase 1
MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-10/-/merge_requests/1524 JIRA: https://issues.redhat.com/browse/RHEL-115581 Backport of upstream commits: 9c607d4 ("xfrm: Use nested-BH locking for nat_keepalive_sk_ipv[46]") 63c1f19 ("espintcp: fix skb leaks") 0283636 ("espintcp: remove encap socket caching to avoid reference leak") e7a37c9 ("xfrm: use kfree_sensitive() for SA secret zeroization") 95cfe23 ("xfrm: Skip redundant statistics update for crypto offload") 94f3980 ("xfrm: Duplicate SPI Handling") cd8ae32 ("xfrm: xfrm_alloc_spi shouldn't use 0 as SPI") Signed-off-by: Sabrina Dubroca <sdubroca@redhat.com> Approved-by: Florian Westphal <fwestpha@redhat.com> Approved-by: Xin Long <lxin@redhat.com> Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> Merged-by: CKI GitLab Kmaint Pipeline Bot <26919896-cki-kmaint-pipeline-bot@users.noreply.gitlab.com>
2 parents bead764 + 607f0cc commit cdaa432

File tree

6 files changed

+92
-140
lines changed

6 files changed

+92
-140
lines changed

include/net/xfrm.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -241,7 +241,6 @@ struct xfrm_state {
241241

242242
/* Data for encapsulator */
243243
struct xfrm_encap_tmpl *encap;
244-
struct sock __rcu *encap_sk;
245244

246245
/* NAT keepalive */
247246
u32 nat_keepalive_interval; /* seconds */

net/ipv4/esp4.c

Lines changed: 7 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -120,47 +120,16 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb)
120120
}
121121

122122
#ifdef CONFIG_INET_ESPINTCP
123-
struct esp_tcp_sk {
124-
struct sock *sk;
125-
struct rcu_head rcu;
126-
};
127-
128-
static void esp_free_tcp_sk(struct rcu_head *head)
129-
{
130-
struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu);
131-
132-
sock_put(esk->sk);
133-
kfree(esk);
134-
}
135-
136123
static struct sock *esp_find_tcp_sk(struct xfrm_state *x)
137124
{
138125
struct xfrm_encap_tmpl *encap = x->encap;
139126
struct net *net = xs_net(x);
140-
struct esp_tcp_sk *esk;
141127
__be16 sport, dport;
142-
struct sock *nsk;
143128
struct sock *sk;
144129

145-
sk = rcu_dereference(x->encap_sk);
146-
if (sk && sk->sk_state == TCP_ESTABLISHED)
147-
return sk;
148-
149130
spin_lock_bh(&x->lock);
150131
sport = encap->encap_sport;
151132
dport = encap->encap_dport;
152-
nsk = rcu_dereference_protected(x->encap_sk,
153-
lockdep_is_held(&x->lock));
154-
if (sk && sk == nsk) {
155-
esk = kmalloc(sizeof(*esk), GFP_ATOMIC);
156-
if (!esk) {
157-
spin_unlock_bh(&x->lock);
158-
return ERR_PTR(-ENOMEM);
159-
}
160-
RCU_INIT_POINTER(x->encap_sk, NULL);
161-
esk->sk = sk;
162-
call_rcu(&esk->rcu, esp_free_tcp_sk);
163-
}
164133
spin_unlock_bh(&x->lock);
165134

166135
sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, x->id.daddr.a4,
@@ -173,20 +142,6 @@ static struct sock *esp_find_tcp_sk(struct xfrm_state *x)
173142
return ERR_PTR(-EINVAL);
174143
}
175144

176-
spin_lock_bh(&x->lock);
177-
nsk = rcu_dereference_protected(x->encap_sk,
178-
lockdep_is_held(&x->lock));
179-
if (encap->encap_sport != sport ||
180-
encap->encap_dport != dport) {
181-
sock_put(sk);
182-
sk = nsk ?: ERR_PTR(-EREMCHG);
183-
} else if (sk == nsk) {
184-
sock_put(sk);
185-
} else {
186-
rcu_assign_pointer(x->encap_sk, sk);
187-
}
188-
spin_unlock_bh(&x->lock);
189-
190145
return sk;
191146
}
192147

@@ -199,8 +154,10 @@ static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb)
199154

200155
sk = esp_find_tcp_sk(x);
201156
err = PTR_ERR_OR_ZERO(sk);
202-
if (err)
157+
if (err) {
158+
kfree_skb(skb);
203159
goto out;
160+
}
204161

205162
bh_lock_sock(sk);
206163
if (sock_owned_by_user(sk))
@@ -209,6 +166,8 @@ static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb)
209166
err = espintcp_push_skb(sk, skb);
210167
bh_unlock_sock(sk);
211168

169+
sock_put(sk);
170+
212171
out:
213172
rcu_read_unlock();
214173
return err;
@@ -392,6 +351,8 @@ static struct ip_esp_hdr *esp_output_tcp_encap(struct xfrm_state *x,
392351
if (IS_ERR(sk))
393352
return ERR_CAST(sk);
394353

354+
sock_put(sk);
355+
395356
*lenp = htons(len);
396357
esph = (struct ip_esp_hdr *)(lenp + 1);
397358

net/ipv6/esp6.c

Lines changed: 7 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -137,47 +137,16 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb)
137137
}
138138

139139
#ifdef CONFIG_INET6_ESPINTCP
140-
struct esp_tcp_sk {
141-
struct sock *sk;
142-
struct rcu_head rcu;
143-
};
144-
145-
static void esp_free_tcp_sk(struct rcu_head *head)
146-
{
147-
struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu);
148-
149-
sock_put(esk->sk);
150-
kfree(esk);
151-
}
152-
153140
static struct sock *esp6_find_tcp_sk(struct xfrm_state *x)
154141
{
155142
struct xfrm_encap_tmpl *encap = x->encap;
156143
struct net *net = xs_net(x);
157-
struct esp_tcp_sk *esk;
158144
__be16 sport, dport;
159-
struct sock *nsk;
160145
struct sock *sk;
161146

162-
sk = rcu_dereference(x->encap_sk);
163-
if (sk && sk->sk_state == TCP_ESTABLISHED)
164-
return sk;
165-
166147
spin_lock_bh(&x->lock);
167148
sport = encap->encap_sport;
168149
dport = encap->encap_dport;
169-
nsk = rcu_dereference_protected(x->encap_sk,
170-
lockdep_is_held(&x->lock));
171-
if (sk && sk == nsk) {
172-
esk = kmalloc(sizeof(*esk), GFP_ATOMIC);
173-
if (!esk) {
174-
spin_unlock_bh(&x->lock);
175-
return ERR_PTR(-ENOMEM);
176-
}
177-
RCU_INIT_POINTER(x->encap_sk, NULL);
178-
esk->sk = sk;
179-
call_rcu(&esk->rcu, esp_free_tcp_sk);
180-
}
181150
spin_unlock_bh(&x->lock);
182151

183152
sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, &x->id.daddr.in6,
@@ -190,20 +159,6 @@ static struct sock *esp6_find_tcp_sk(struct xfrm_state *x)
190159
return ERR_PTR(-EINVAL);
191160
}
192161

193-
spin_lock_bh(&x->lock);
194-
nsk = rcu_dereference_protected(x->encap_sk,
195-
lockdep_is_held(&x->lock));
196-
if (encap->encap_sport != sport ||
197-
encap->encap_dport != dport) {
198-
sock_put(sk);
199-
sk = nsk ?: ERR_PTR(-EREMCHG);
200-
} else if (sk == nsk) {
201-
sock_put(sk);
202-
} else {
203-
rcu_assign_pointer(x->encap_sk, sk);
204-
}
205-
spin_unlock_bh(&x->lock);
206-
207162
return sk;
208163
}
209164

@@ -216,8 +171,10 @@ static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb)
216171

217172
sk = esp6_find_tcp_sk(x);
218173
err = PTR_ERR_OR_ZERO(sk);
219-
if (err)
174+
if (err) {
175+
kfree_skb(skb);
220176
goto out;
177+
}
221178

222179
bh_lock_sock(sk);
223180
if (sock_owned_by_user(sk))
@@ -226,6 +183,8 @@ static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb)
226183
err = espintcp_push_skb(sk, skb);
227184
bh_unlock_sock(sk);
228185

186+
sock_put(sk);
187+
229188
out:
230189
rcu_read_unlock();
231190
return err;
@@ -422,6 +381,8 @@ static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x,
422381
if (IS_ERR(sk))
423382
return ERR_CAST(sk);
424383

384+
sock_put(sk);
385+
425386
*lenp = htons(len);
426387
esph = (struct ip_esp_hdr *)(lenp + 1);
427388

net/xfrm/espintcp.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -171,8 +171,10 @@ int espintcp_queue_out(struct sock *sk, struct sk_buff *skb)
171171
struct espintcp_ctx *ctx = espintcp_getctx(sk);
172172

173173
if (skb_queue_len(&ctx->out_queue) >=
174-
READ_ONCE(net_hotdata.max_backlog))
174+
READ_ONCE(net_hotdata.max_backlog)) {
175+
kfree_skb(skb);
175176
return -ENOBUFS;
177+
}
176178

177179
__skb_queue_tail(&ctx->out_queue, skb);
178180

net/xfrm/xfrm_nat_keepalive.c

Lines changed: 20 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -9,9 +9,13 @@
99
#include <net/ip6_checksum.h>
1010
#include <net/xfrm.h>
1111

12-
static DEFINE_PER_CPU(struct sock *, nat_keepalive_sk_ipv4);
12+
static DEFINE_PER_CPU(struct sock_bh_locked, nat_keepalive_sk_ipv4) = {
13+
.bh_lock = INIT_LOCAL_LOCK(bh_lock),
14+
};
1315
#if IS_ENABLED(CONFIG_IPV6)
14-
static DEFINE_PER_CPU(struct sock *, nat_keepalive_sk_ipv6);
16+
static DEFINE_PER_CPU(struct sock_bh_locked, nat_keepalive_sk_ipv6) = {
17+
.bh_lock = INIT_LOCAL_LOCK(bh_lock),
18+
};
1519
#endif
1620

1721
struct nat_keepalive {
@@ -56,10 +60,12 @@ static int nat_keepalive_send_ipv4(struct sk_buff *skb,
5660

5761
skb_dst_set(skb, &rt->dst);
5862

59-
sk = *this_cpu_ptr(&nat_keepalive_sk_ipv4);
63+
local_lock_nested_bh(&nat_keepalive_sk_ipv4.bh_lock);
64+
sk = this_cpu_read(nat_keepalive_sk_ipv4.sock);
6065
sock_net_set(sk, net);
6166
err = ip_build_and_send_pkt(skb, sk, fl4.saddr, fl4.daddr, NULL, tos);
6267
sock_net_set(sk, &init_net);
68+
local_unlock_nested_bh(&nat_keepalive_sk_ipv4.bh_lock);
6369
return err;
6470
}
6571

@@ -89,15 +95,19 @@ static int nat_keepalive_send_ipv6(struct sk_buff *skb,
8995
fl6.fl6_sport = ka->encap_sport;
9096
fl6.fl6_dport = ka->encap_dport;
9197

92-
sk = *this_cpu_ptr(&nat_keepalive_sk_ipv6);
98+
local_lock_nested_bh(&nat_keepalive_sk_ipv6.bh_lock);
99+
sk = this_cpu_read(nat_keepalive_sk_ipv6.sock);
93100
sock_net_set(sk, net);
94101
dst = ipv6_stub->ipv6_dst_lookup_flow(net, sk, &fl6, NULL);
95-
if (IS_ERR(dst))
102+
if (IS_ERR(dst)) {
103+
local_unlock_nested_bh(&nat_keepalive_sk_ipv6.bh_lock);
96104
return PTR_ERR(dst);
105+
}
97106

98107
skb_dst_set(skb, dst);
99108
err = ipv6_stub->ip6_xmit(sk, skb, &fl6, skb->mark, NULL, 0, 0);
100109
sock_net_set(sk, &init_net);
110+
local_unlock_nested_bh(&nat_keepalive_sk_ipv6.bh_lock);
101111
return err;
102112
}
103113
#endif
@@ -202,7 +212,7 @@ static void nat_keepalive_work(struct work_struct *work)
202212
(ctx.next_run - ctx.now) * HZ);
203213
}
204214

205-
static int nat_keepalive_sk_init(struct sock * __percpu *socks,
215+
static int nat_keepalive_sk_init(struct sock_bh_locked __percpu *socks,
206216
unsigned short family)
207217
{
208218
struct sock *sk;
@@ -214,22 +224,22 @@ static int nat_keepalive_sk_init(struct sock * __percpu *socks,
214224
if (err < 0)
215225
goto err;
216226

217-
*per_cpu_ptr(socks, i) = sk;
227+
per_cpu_ptr(socks, i)->sock = sk;
218228
}
219229

220230
return 0;
221231
err:
222232
for_each_possible_cpu(i)
223-
inet_ctl_sock_destroy(*per_cpu_ptr(socks, i));
233+
inet_ctl_sock_destroy(per_cpu_ptr(socks, i)->sock);
224234
return err;
225235
}
226236

227-
static void nat_keepalive_sk_fini(struct sock * __percpu *socks)
237+
static void nat_keepalive_sk_fini(struct sock_bh_locked __percpu *socks)
228238
{
229239
int i;
230240

231241
for_each_possible_cpu(i)
232-
inet_ctl_sock_destroy(*per_cpu_ptr(socks, i));
242+
inet_ctl_sock_destroy(per_cpu_ptr(socks, i)->sock);
233243
}
234244

235245
void xfrm_nat_keepalive_state_updated(struct xfrm_state *x)

0 commit comments

Comments
 (0)