Skip to content

Commit 42d9c80

Browse files
author
CKI KWF Bot
committed
Merge: tcp: stable backport from upstream for 10.2 phase 1
MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-10/-/merge_requests/1558 JIRA: https://issues.redhat.com/browse/RHEL-115580 CVE: CVE-2025-39955 A bunch of stable fixes from upstream, addressing non critical issues. Signed-off-by: Paolo Abeni <pabeni@redhat.com> Approved-by: Florian Westphal <fwestpha@redhat.com> Approved-by: Davide Caratti <dcaratti@redhat.com> Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> Merged-by: CKI GitLab Kmaint Pipeline Bot <26919896-cki-kmaint-pipeline-bot@users.noreply.gitlab.com>
2 parents 7bbc1ea + 1203b79 commit 42d9c80

File tree

5 files changed

+43
-18
lines changed

5 files changed

+43
-18
lines changed

net/ipv4/tcp.c

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3048,8 +3048,8 @@ bool tcp_check_oom(const struct sock *sk, int shift)
30483048

30493049
void __tcp_close(struct sock *sk, long timeout)
30503050
{
3051+
bool data_was_unread = false;
30513052
struct sk_buff *skb;
3052-
int data_was_unread = 0;
30533053
int state;
30543054

30553055
WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
@@ -3068,11 +3068,12 @@ void __tcp_close(struct sock *sk, long timeout)
30683068
* reader process may not have drained the data yet!
30693069
*/
30703070
while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
3071-
u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq;
3071+
u32 end_seq = TCP_SKB_CB(skb)->end_seq;
30723072

30733073
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
3074-
len--;
3075-
data_was_unread += len;
3074+
end_seq--;
3075+
if (after(end_seq, tcp_sk(sk)->copied_seq))
3076+
data_was_unread = true;
30763077
__kfree_skb(skb);
30773078
}
30783079

@@ -3276,6 +3277,7 @@ int tcp_disconnect(struct sock *sk, int flags)
32763277
struct inet_connection_sock *icsk = inet_csk(sk);
32773278
struct tcp_sock *tp = tcp_sk(sk);
32783279
int old_state = sk->sk_state;
3280+
struct request_sock *req;
32793281
u32 seq;
32803282

32813283
if (old_state != TCP_CLOSE)
@@ -3390,6 +3392,10 @@ int tcp_disconnect(struct sock *sk, int flags)
33903392

33913393

33923394
/* Clean up fastopen related fields */
3395+
req = rcu_dereference_protected(tp->fastopen_rsk,
3396+
lockdep_sock_is_held(sk));
3397+
if (req)
3398+
reqsk_fastopen_remove(sk, req, false);
33933399
tcp_free_fastopen_req(tp);
33943400
inet_clear_bit(DEFER_CONNECT, sk);
33953401
tp->fastopen_client_fail = 0;

net/ipv4/tcp_fastopen.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
#include <linux/tcp.h>
44
#include <linux/rcupdate.h>
55
#include <net/tcp.h>
6+
#include <net/busy_poll.h>
67

78
void tcp_fastopen_init_key_once(struct net *net)
89
{
@@ -279,6 +280,8 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
279280

280281
refcount_set(&req->rsk_refcnt, 2);
281282

283+
sk_mark_napi_id_set(child, skb);
284+
282285
/* Now finish processing the fastopen child socket. */
283286
tcp_init_transfer(child, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, skb);
284287

net/ipv4/tcp_input.c

Lines changed: 28 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -2482,20 +2482,33 @@ static inline bool tcp_packet_delayed(const struct tcp_sock *tp)
24822482
{
24832483
const struct sock *sk = (const struct sock *)tp;
24842484

2485-
if (tp->retrans_stamp &&
2486-
tcp_tsopt_ecr_before(tp, tp->retrans_stamp))
2487-
return true; /* got echoed TS before first retransmission */
2488-
2489-
/* Check if nothing was retransmitted (retrans_stamp==0), which may
2490-
* happen in fast recovery due to TSQ. But we ignore zero retrans_stamp
2491-
* in TCP_SYN_SENT, since when we set FLAG_SYN_ACKED we also clear
2492-
* retrans_stamp even if we had retransmitted the SYN.
2485+
/* Received an echoed timestamp before the first retransmission? */
2486+
if (tp->retrans_stamp)
2487+
return tcp_tsopt_ecr_before(tp, tp->retrans_stamp);
2488+
2489+
/* We set tp->retrans_stamp upon the first retransmission of a loss
2490+
* recovery episode, so normally if tp->retrans_stamp is 0 then no
2491+
* retransmission has happened yet (likely due to TSQ, which can cause
2492+
* fast retransmits to be delayed). So if snd_una advanced while
2493+
* (tp->retrans_stamp is 0 then apparently a packet was merely delayed,
2494+
* not lost. But there are exceptions where we retransmit but then
2495+
* clear tp->retrans_stamp, so we check for those exceptions.
24932496
*/
2494-
if (!tp->retrans_stamp && /* no record of a retransmit/SYN? */
2495-
sk->sk_state != TCP_SYN_SENT) /* not the FLAG_SYN_ACKED case? */
2496-
return true; /* nothing was retransmitted */
24972497

2498-
return false;
2498+
/* (1) For non-SACK connections, tcp_is_non_sack_preventing_reopen()
2499+
* clears tp->retrans_stamp when snd_una == high_seq.
2500+
*/
2501+
if (!tcp_is_sack(tp) && !before(tp->snd_una, tp->high_seq))
2502+
return false;
2503+
2504+
/* (2) In TCP_SYN_SENT tcp_clean_rtx_queue() clears tp->retrans_stamp
2505+
* when setting FLAG_SYN_ACKED is set, even if the SYN was
2506+
* retransmitted.
2507+
*/
2508+
if (sk->sk_state == TCP_SYN_SENT)
2509+
return false;
2510+
2511+
return true; /* tp->retrans_stamp is zero; no retransmit yet */
24992512
}
25002513

25012514
/* Undo procedures. */
@@ -4959,8 +4972,9 @@ static void tcp_ofo_queue(struct sock *sk)
49594972

49604973
if (before(TCP_SKB_CB(skb)->seq, dsack_high)) {
49614974
__u32 dsack = dsack_high;
4975+
49624976
if (before(TCP_SKB_CB(skb)->end_seq, dsack_high))
4963-
dsack_high = TCP_SKB_CB(skb)->end_seq;
4977+
dsack = TCP_SKB_CB(skb)->end_seq;
49644978
tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack);
49654979
}
49664980
p = rb_next(p);
@@ -5028,6 +5042,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
50285042
return;
50295043
}
50305044

5045+
tcp_measure_rcv_mss(sk, skb);
50315046
/* Disable header prediction. */
50325047
tp->pred_flags = 0;
50335048
inet_csk_schedule_ack(sk);
@@ -7335,7 +7350,6 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
73357350
&foc, TCP_SYNACK_FASTOPEN, skb);
73367351
/* Add the child socket directly into the accept queue */
73377352
if (!inet_csk_reqsk_queue_add(sk, req, fastopen_sk)) {
7338-
reqsk_fastopen_remove(fastopen_sk, req, false);
73397353
bh_unlock_sock(fastopen_sk);
73407354
sock_put(fastopen_sk);
73417355
goto drop_and_free;

net/ipv4/tcp_offload.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -355,6 +355,7 @@ struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
355355
flush |= skb->ip_summed != p->ip_summed;
356356
flush |= skb->csum_level != p->csum_level;
357357
flush |= NAPI_GRO_CB(p)->count >= 64;
358+
skb_set_network_header(skb, skb_gro_receive_network_offset(skb));
358359

359360
if (flush || skb_gro_receive_list(p, skb))
360361
mss = 1;

net/ipv4/udp_offload.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -604,6 +604,7 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
604604
NAPI_GRO_CB(skb)->flush = 1;
605605
return NULL;
606606
}
607+
skb_set_network_header(skb, skb_gro_receive_network_offset(skb));
607608
ret = skb_gro_receive_list(p, skb);
608609
} else {
609610
skb_gro_postpull_rcsum(skb, uh,

0 commit comments

Comments
 (0)