From 81e115e19ad2af53215ef3febd0581f214875f43 Mon Sep 17 00:00:00 2001 From: Emmankoko Date: Sat, 4 Jan 2025 18:20:11 +0000 Subject: [PATCH 1/9] Initiate NPF in ALTQ altq communicate to pf through pf_altq queue structures. similar is done for npf and being used to replace pf's pf_altq. npf_altq fileds are compatible with struct pf_altq as they represent a complete queue structure. a needs-flag is set on npf to produce a npf.h during config in the kernel directory which will set optional compilation for npf related code in altq. it is also handled at runtime in the npf module. --- sys/altq/altq_cbq.c | 22 ++++----- sys/altq/altq_hfsc.c | 22 ++++----- sys/altq/altq_priq.c | 22 ++++----- sys/altq/altq_subr.c | 24 ++++----- sys/modules/npf/npf.h | 1 + sys/net/npf/files.npf | 2 +- sys/net/npf/npf_altq.h | 107 +++++++++++++++++++++++++++++++++++++++++ 7 files changed, 154 insertions(+), 46 deletions(-) create mode 100644 sys/modules/npf/npf.h create mode 100644 sys/net/npf/npf_altq.h diff --git a/sys/altq/altq_cbq.c b/sys/altq/altq_cbq.c index 583a3a2c1c90b..9b508a456e3db 100644 --- a/sys/altq/altq_cbq.c +++ b/sys/altq/altq_cbq.c @@ -37,7 +37,7 @@ __KERNEL_RCSID(0, "$NetBSD: altq_cbq.c,v 1.42 2025/01/08 13:00:04 joe Exp $"); #ifdef _KERNEL_OPT #include "opt_altq.h" #include "opt_inet.h" -#include "pf.h" +#include "npf.h" #endif #ifdef ALTQ_CBQ /* cbq is enabled by ALTQ_CBQ option in opt_altq.h */ @@ -59,8 +59,8 @@ __KERNEL_RCSID(0, "$NetBSD: altq_cbq.c,v 1.42 2025/01/08 13:00:04 joe Exp $"); #include #include -#if NPF > 0 -#include +#if NNPF > 0 +#include #endif #include #include @@ -242,9 +242,9 @@ get_class_stats(class_stats_t *statsp, struct rm_class *cl) #endif } -#if NPF > 0 +#if NNPF > 0 int -cbq_pfattach(struct pf_altq *a) +cbq_pfattach(struct npf_altq *a) { struct ifnet *ifp; int s, error; @@ -259,7 +259,7 @@ cbq_pfattach(struct pf_altq *a) } int -cbq_add_altq(struct pf_altq *a) +cbq_add_altq(struct npf_altq *a) { cbq_state_t *cbqp; struct ifnet *ifp; @@ -285,7 +285,7 @@ cbq_add_altq(struct pf_altq *a) } int -cbq_remove_altq(struct pf_altq *a) +cbq_remove_altq(struct npf_altq *a) { cbq_state_t *cbqp; @@ -308,7 +308,7 @@ cbq_remove_altq(struct pf_altq *a) #define NSEC_TO_PSEC(s) ((uint64_t)(s) * 1000) int -cbq_add_queue(struct pf_altq *a) +cbq_add_queue(struct npf_altq *a) { struct rm_class *borrow, *parent; cbq_state_t *cbqp; @@ -415,7 +415,7 @@ cbq_add_queue(struct pf_altq *a) } int -cbq_remove_queue(struct pf_altq *a) +cbq_remove_queue(struct npf_altq *a) { struct rm_class *cl; cbq_state_t *cbqp; @@ -451,7 +451,7 @@ cbq_remove_queue(struct pf_altq *a) } int -cbq_getqstats(struct pf_altq *a, void *ubuf, int *nbytes) +cbq_getqstats(struct npf_altq *a, void *ubuf, int *nbytes) { cbq_state_t *cbqp; struct rm_class *cl; @@ -475,7 +475,7 @@ cbq_getqstats(struct pf_altq *a, void *ubuf, int *nbytes) *nbytes = sizeof(stats); return (0); } -#endif /* NPF > 0 */ +#endif /* NNPF > 0 */ /* * int diff --git a/sys/altq/altq_hfsc.c b/sys/altq/altq_hfsc.c index 46cd68b65cb8e..dde154d422891 100644 --- a/sys/altq/altq_hfsc.c +++ b/sys/altq/altq_hfsc.c @@ -48,7 +48,7 @@ __KERNEL_RCSID(0, "$NetBSD: altq_hfsc.c,v 1.31 2025/01/08 13:00:04 joe Exp $"); #ifdef _KERNEL_OPT #include "opt_altq.h" #include "opt_inet.h" -#include "pf.h" +#include "npf.h" #endif #ifdef ALTQ_HFSC /* hfsc is enabled by ALTQ_HFSC option in opt_altq.h */ @@ -70,8 +70,8 @@ __KERNEL_RCSID(0, "$NetBSD: altq_hfsc.c,v 1.31 2025/01/08 13:00:04 joe Exp $"); #include #include -#if NPF > 0 -#include +#if NNPF > 0 +#include #endif #include #include @@ -173,9 +173,9 @@ altqdev_decl(hfsc); static struct hfsc_if *hif_list = NULL; #endif /* ALTQ3_COMPAT */ -#if NPF > 0 +#if NNPF > 0 int -hfsc_pfattach(struct pf_altq *a) +hfsc_pfattach(struct npf_altq *a) { struct ifnet *ifp; int s, error; @@ -190,7 +190,7 @@ hfsc_pfattach(struct pf_altq *a) } int -hfsc_add_altq(struct pf_altq *a) +hfsc_add_altq(struct npf_altq *a) { struct hfsc_if *hif; struct ifnet *ifp; @@ -219,7 +219,7 @@ hfsc_add_altq(struct pf_altq *a) } int -hfsc_remove_altq(struct pf_altq *a) +hfsc_remove_altq(struct npf_altq *a) { struct hfsc_if *hif; @@ -238,7 +238,7 @@ hfsc_remove_altq(struct pf_altq *a) } int -hfsc_add_queue(struct pf_altq *a) +hfsc_add_queue(struct npf_altq *a) { struct hfsc_if *hif; struct hfsc_class *cl, *parent; @@ -281,7 +281,7 @@ hfsc_add_queue(struct pf_altq *a) } int -hfsc_remove_queue(struct pf_altq *a) +hfsc_remove_queue(struct npf_altq *a) { struct hfsc_if *hif; struct hfsc_class *cl; @@ -296,7 +296,7 @@ hfsc_remove_queue(struct pf_altq *a) } int -hfsc_getqstats(struct pf_altq *a, void *ubuf, int *nbytes) +hfsc_getqstats(struct npf_altq *a, void *ubuf, int *nbytes) { struct hfsc_if *hif; struct hfsc_class *cl; @@ -320,7 +320,7 @@ hfsc_getqstats(struct pf_altq *a, void *ubuf, int *nbytes) *nbytes = sizeof(stats); return 0; } -#endif /* NPF > 0 */ +#endif /* NNPF > 0 */ /* * bring the interface back to the initial state by discarding diff --git a/sys/altq/altq_priq.c b/sys/altq/altq_priq.c index 80a4151f5f85a..f3d2f80e5aa09 100644 --- a/sys/altq/altq_priq.c +++ b/sys/altq/altq_priq.c @@ -36,7 +36,7 @@ __KERNEL_RCSID(0, "$NetBSD: altq_priq.c,v 1.29 2025/01/08 13:00:04 joe Exp $"); #ifdef _KERNEL_OPT #include "opt_altq.h" #include "opt_inet.h" -#include "pf.h" +#include "npf.h" #endif #ifdef ALTQ_PRIQ /* priq is enabled by ALTQ_PRIQ option in opt_altq.h */ @@ -56,8 +56,8 @@ __KERNEL_RCSID(0, "$NetBSD: altq_priq.c,v 1.29 2025/01/08 13:00:04 joe Exp $"); #include #include -#if NPF > 0 -#include +#if NNPF > 0 +#include #endif #include #include @@ -105,9 +105,9 @@ altqdev_decl(priq); static struct priq_if *pif_list = NULL; #endif /* ALTQ3_COMPAT */ -#if NPF > 0 +#if NNPF > 0 int -priq_pfattach(struct pf_altq *a) +priq_pfattach(struct npf_altq *a) { struct ifnet *ifp; int s, error; @@ -122,7 +122,7 @@ priq_pfattach(struct pf_altq *a) } int -priq_add_altq(struct pf_altq *a) +priq_add_altq(struct npf_altq *a) { struct priq_if *pif; struct ifnet *ifp; @@ -146,7 +146,7 @@ priq_add_altq(struct pf_altq *a) } int -priq_remove_altq(struct pf_altq *a) +priq_remove_altq(struct npf_altq *a) { struct priq_if *pif; @@ -161,7 +161,7 @@ priq_remove_altq(struct pf_altq *a) } int -priq_add_queue(struct pf_altq *a) +priq_add_queue(struct npf_altq *a) { struct priq_if *pif; struct priq_class *cl; @@ -188,7 +188,7 @@ priq_add_queue(struct pf_altq *a) } int -priq_remove_queue(struct pf_altq *a) +priq_remove_queue(struct npf_altq *a) { struct priq_if *pif; struct priq_class *cl; @@ -203,7 +203,7 @@ priq_remove_queue(struct pf_altq *a) } int -priq_getqstats(struct pf_altq *a, void *ubuf, int *nbytes) +priq_getqstats(struct npf_altq *a, void *ubuf, int *nbytes) { struct priq_if *pif; struct priq_class *cl; @@ -227,7 +227,7 @@ priq_getqstats(struct pf_altq *a, void *ubuf, int *nbytes) *nbytes = sizeof(stats); return 0; } -#endif /* NPF > 0 */ +#endif /* NNPF > 0 */ /* * bring the interface back to the initial state by discarding diff --git a/sys/altq/altq_subr.c b/sys/altq/altq_subr.c index 53e54b9c8948b..cba90252455dc 100644 --- a/sys/altq/altq_subr.c +++ b/sys/altq/altq_subr.c @@ -33,7 +33,7 @@ __KERNEL_RCSID(0, "$NetBSD: altq_subr.c,v 1.34 2025/01/08 13:00:04 joe Exp $"); #ifdef _KERNEL_OPT #include "opt_altq.h" #include "opt_inet.h" -#include "pf.h" +#include "npf.h" #endif #include @@ -62,8 +62,8 @@ __KERNEL_RCSID(0, "$NetBSD: altq_subr.c,v 1.34 2025/01/08 13:00:04 joe Exp $"); #include #include -#if NPF > 0 -#include +#if NNPF > 0 +#include #endif #include #ifdef ALTQ3_COMPAT @@ -400,13 +400,13 @@ tbr_get(struct ifaltq *ifq, struct tb_profile *profile) return 0; } -#if NPF > 0 +#if NNPF > 0 /* * attach a discipline to the interface. if one already exists, it is * overridden. */ int -altq_pfattach(struct pf_altq *a) +altq_pfattach(struct npf_altq *a) { int error = 0; @@ -441,7 +441,7 @@ altq_pfattach(struct pf_altq *a) * discipline. */ int -altq_pfdetach(struct pf_altq *a) +altq_pfdetach(struct npf_altq *a) { struct ifnet *ifp; int s, error = 0; @@ -467,7 +467,7 @@ altq_pfdetach(struct pf_altq *a) * add a discipline or a queue */ int -altq_add(struct pf_altq *a) +altq_add(struct npf_altq *a) { int error = 0; @@ -506,7 +506,7 @@ altq_add(struct pf_altq *a) * remove a discipline or a queue */ int -altq_remove(struct pf_altq *a) +altq_remove(struct npf_altq *a) { int error = 0; @@ -540,7 +540,7 @@ altq_remove(struct pf_altq *a) * add a queue to the discipline */ int -altq_add_queue(struct pf_altq *a) +altq_add_queue(struct npf_altq *a) { int error = 0; @@ -571,7 +571,7 @@ altq_add_queue(struct pf_altq *a) * remove a queue from the discipline */ int -altq_remove_queue(struct pf_altq *a) +altq_remove_queue(struct npf_altq *a) { int error = 0; @@ -602,7 +602,7 @@ altq_remove_queue(struct pf_altq *a) * get queue statistics */ int -altq_getqstats(struct pf_altq *a, void *ubuf, int *nbytes) +altq_getqstats(struct npf_altq *a, void *ubuf, int *nbytes) { int error = 0; @@ -628,7 +628,7 @@ altq_getqstats(struct pf_altq *a, void *ubuf, int *nbytes) return error; } -#endif /* NPF > 0 */ +#endif /* NNPF > 0 */ /* * read and write diffserv field in IPv4 or IPv6 header diff --git a/sys/modules/npf/npf.h b/sys/modules/npf/npf.h new file mode 100644 index 0000000000000..8945345e2890c --- /dev/null +++ b/sys/modules/npf/npf.h @@ -0,0 +1 @@ +#define NNPF 1 \ No newline at end of file diff --git a/sys/net/npf/files.npf b/sys/net/npf/files.npf index 80727f79f5d39..c6346153abc69 100644 --- a/sys/net/npf/files.npf +++ b/sys/net/npf/files.npf @@ -10,7 +10,7 @@ defpseudo npf: ifnet, libnv # Core -file net/npf/npf.c npf +file net/npf/npf.c npf needs-flag file net/npf/npf_conf.c npf file net/npf/npf_ctl.c npf file net/npf/npf_handler.c npf diff --git a/sys/net/npf/npf_altq.h b/sys/net/npf/npf_altq.h new file mode 100644 index 0000000000000..bd46ae3e48f7a --- /dev/null +++ b/sys/net/npf/npf_altq.h @@ -0,0 +1,107 @@ +/* NetBSD */ +/*- + * Copyright (c) 2024 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Emmanuel Nyarko. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#include +#ifndef NPF_ALTQ_H_ +#define NPF_ALTQ_H_ + +#ifndef IFNAMSIZ +#define IFNAMSIZ 16 +#endif +/* queueing flags */ +#ifndef NPF_QNAME_SIZE +#define NPF_QNAME_SIZE 64 +#endif +#ifndef TAGID_MAX +#define TAGID_MAX 50000 +#endif +#ifndef NPF_TAG_NAME_SIZE +#define NPF_TAG_NAME_SIZE 64 +#endif + +/* + * options defined on the cbq, priq and hfsc when configuring them + */ +struct npf_cbq_opts { + uint32_t minburst; + uint32_t maxburst; + uint32_t pktsize; + uint32_t maxpktsize; + uint32_t ns_per_byte; + uint32_t maxidle; + int minidle; + uint32_t offtime; + int flags; +}; + +struct npf_priq_opts { + int flags; +}; + +struct npf_hfsc_opts { + /* real-time service curve */ + uint32_t rtsc_m1; /* slope of the 1st segment in bps */ + uint32_t rtsc_d; /* the x-projection of m1 in msec */ + uint32_t rtsc_m2; /* slope of the 2nd segment in bps */ + /* link-sharing service curve */ + uint32_t lssc_m1; + uint32_t lssc_d; + uint32_t lssc_m2; + /* upper-limit service curve */ + uint32_t ulsc_m1; + uint32_t ulsc_d; + uint32_t ulsc_m2; + int flags; +}; + +/* entries for our tail queue for our altqs */ +struct npf_altq { + char ifname[IFNAMSIZ]; + void *altq_disc; /* discipline-specific state */ + TAILQ_ENTRY(npf_altq) entries; + /* scheduler spec */ + uint8_t scheduler; /* scheduler type */ + uint16_t tbrsize; /* tokenbucket regulator size */ + uint32_t ifbandwidth; /* interface bandwidth */ + /* queue spec */ + char qname[NPF_QNAME_SIZE]; /* queue name */ + char parent[NPF_QNAME_SIZE]; /* parent name */ + uint32_t parent_qid; /* parent queue id */ + uint32_t bandwidth; /* queue bandwidth */ + uint8_t priority; /* priority */ + uint16_t qlimit; /* queue size limit */ + uint16_t flags; /* misc flags */ + union { + struct npf_cbq_opts cbq_opts; + struct npf_priq_opts priq_opts; + struct npf_hfsc_opts hfsc_opts; + } pq_u; + u_int32_t qid; /* return value */ +}; +#endif /* NPF_ALTQ_H_ */ From 8a05f6d99c59f274adb7748cd38d5ee93bb459df Mon Sep 17 00:00:00 2001 From: Emmankoko Date: Tue, 7 Jan 2025 11:18:14 +0000 Subject: [PATCH 2/9] ALTQ grammar in npf.conf --- usr.sbin/npf/npfctl/npf_parse.y | 410 +++++++++++++++++++++++++++++++- usr.sbin/npf/npfctl/npf_scan.l | 21 +- usr.sbin/npf/npfctl/npfctl.h | 72 ++++++ 3 files changed, 496 insertions(+), 7 deletions(-) diff --git a/usr.sbin/npf/npfctl/npf_parse.y b/usr.sbin/npf/npfctl/npf_parse.y index d77f462cd8c58..4c710cacd1a0d 100644 --- a/usr.sbin/npf/npfctl/npf_parse.y +++ b/usr.sbin/npf/npfctl/npf_parse.y @@ -3,7 +3,8 @@ * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation - * by Martin Husemann, Christos Zoulas and Mindaugas Rasiukevicius. + * by Martin Husemann, Christos Zoulas, Mindaugas Rasiukevicius and + * Emmanuel Nyarko * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -38,6 +39,11 @@ #include #endif +#include +#include +#include +#include + #include "npfctl.h" #define YYSTACKSIZE 4096 @@ -48,6 +54,9 @@ const char * yyfilename; extern int yylineno, yycolumn; extern int yylex(int); +struct node_hfsc_opts hfsc_opts; +struct queue_opts queue_opts; + void yyerror(const char *fmt, ...) { @@ -167,7 +176,21 @@ yyerror(const char *fmt, ...) %token TCP %token TO %token TREE +%token ALTQ +%token CBQ +%token PRIQ +%token HFSC +%token BANDWIDTH +%token TBRSIZE +%token LINKSHARE +%token REALTIME +%token UPPERLIMIT +%token QUEUE +%token PRIORITY +%token QLIMIT +%token RTABLE %token TYPE + %token ICMP %token ICMP6 @@ -181,8 +204,9 @@ yyerror(const char *fmt, ...) %token PARAM %token TABLE_ID %token VAR_ID +%token BW_SPEC -%type addr some_name table_store dynamic_ifaddrs +%type addr some_name table_store dynamic_ifaddrs bw_spec %type proc_param_val opt_apply ifname on_ifname ifref %type port opt_final number afamily opt_family %type block_or_pass rule_dir group_dir block_opts @@ -199,6 +223,15 @@ yyerror(const char *fmt, ...) %type filt_opts all_or_filt_opts %type rawproto %type group_opts +%type queue_opts queue_opt queue_opts_l +%type rule_queue +%type qassign qassign_list qassign_item +%type scheduler +%type cbqflags_list cbqflags_item +%type priqflags_list priqflags_item +%type hfscopts_list hfscopts_item hfsc_opts +%type bandwidth +%type queue_flags %union { char * str; @@ -209,6 +242,12 @@ yyerror(const char *fmt, ...) filt_opts_t filtopts; opt_proto_t optproto; rule_group_t rulegroup; + struct node_queue *queue; + struct node_queue_opt queue_options; + struct node_queue_bw queue_bwspec; + struct node_qassign qassign; + struct queue_opts queue_opts; + struct node_hfsc_opts hfsc_opts; } %% @@ -232,9 +271,346 @@ line | rproc | alg | set + | altq + | queuespec | ; +altq : ALTQ on_ifname queue_opts QUEUE qassign { + struct npf_altq a; + + if ($3.scheduler.qtype == ALTQT_NONE) { + yyerror("no scheduler specified!"); + YYERROR; + } + memset(&a, 0, sizeof(a)); + a.scheduler = $3.scheduler.qtype; + a.qlimit = $3.qlimit; + a.tbrsize = $3.tbrsize; + if ($5 == NULL) { + yyerror("no child queues specified"); + YYERROR; + } + if (expand_altq(&a, $2, $5, $3.queue_bwspec, + &$3.scheduler)) + YYERROR; + } + ; + +queuespec : QUEUE IDENTIFIER on_ifname queue_opts qassign { + struct npf_altq a; + + memset(&a, 0, sizeof(a)); + if (strlcpy(a.qname, $2, sizeof(a.qname)) >= + sizeof(a.qname)) { + yyerror("queue name too long (max " + "%d chars)", NPF_QNAME_SIZE-1); + free($2); + YYERROR; + } + free($2); + if ($4.tbrsize) { + yyerror("cannot specify tbrsize for queue"); + YYERROR; + } + if ($4.priority > 255) { + yyerror("priority out of range: max 255"); + YYERROR; + } + a.priority = $4.priority; + a.qlimit = $4.qlimit; + a.scheduler = $4.scheduler.qtype; + if (expand_queue(&a, $3, $5, $4.queue_bwspec, + &$4.scheduler)) { + yyerror("errors in queue definition"); + YYERROR; + } + } + ; + +queue_opts : { + memset(&queue_opts, 0, sizeof(queue_opts)); + queue_opts.priority = DEFAULT_PRIORITY; + queue_opts.qlimit = DEFAULT_QLIMIT; + queue_opts.scheduler.qtype = ALTQT_NONE; + queue_opts.queue_bwspec.bw_percent = 100; + } + queue_opts_l + { $$ = queue_opts; } + | /* empty */ { + memset(&queue_opts, 0, sizeof(queue_opts)); + queue_opts.priority = DEFAULT_PRIORITY; + queue_opts.qlimit = DEFAULT_QLIMIT; + queue_opts.scheduler.qtype = ALTQT_NONE; + queue_opts.queue_bwspec.bw_percent = 100; + $$ = queue_opts; + } + ; + +queue_opts_l : queue_opts_l queue_opt + | queue_opt + ; + +queue_opt : BANDWIDTH bandwidth { + if (queue_opts.marker & QOM_BWSPEC) { + yyerror("bandwidth cannot be respecified"); + YYERROR; + } + queue_opts.marker |= QOM_BWSPEC; + queue_opts.queue_bwspec = $2; + } + | PRIORITY number { + if (queue_opts.marker & QOM_PRIORITY) { + yyerror("priority cannot be respecified"); + YYERROR; + } + if ($2 > 255) { + yyerror("priority out of range: max 255"); + YYERROR; + } + queue_opts.marker |= QOM_PRIORITY; + queue_opts.priority = $2; + } + | QLIMIT number { + if (queue_opts.marker & QOM_QLIMIT) { + yyerror("qlimit cannot be respecified"); + YYERROR; + } + if ($2 > 65535) { + yyerror("qlimit out of range: max 65535"); + YYERROR; + } + queue_opts.marker |= QOM_QLIMIT; + queue_opts.qlimit = $2; + } + | scheduler { + if (queue_opts.marker & QOM_SCHEDULER) { + yyerror("scheduler cannot be respecified"); + YYERROR; + } + queue_opts.marker |= QOM_SCHEDULER; + queue_opts.scheduler = $1; + } + | TBRSIZE number { + if (queue_opts.marker & QOM_TBRSIZE) { + yyerror("tbrsize cannot be respecified"); + YYERROR; + } + if ($2 > 65535) { + yyerror("tbrsize too big: max 65535"); + YYERROR; + } + queue_opts.marker |= QOM_TBRSIZE; + queue_opts.tbrsize = $2; + } + ; + +bandwidth : bw_spec { + struct node_queue_bw bw; + + if (npfctl_eval_bw(&bw, $1)) { + YYERROR; + free($1); + } + $$ = bw; + } + ; + +bw_spec : BW_SPEC {$$ = $1; } + ; + +scheduler : CBQ { + $$.qtype = ALTQT_CBQ; + $$.data.cbq_opts.flags = 0; + } + | CBQ PAR_OPEN cbqflags_list PAR_CLOSE { + $$.qtype = ALTQT_CBQ; + $$.data.cbq_opts.flags = $3; + } + | PRIQ { + $$.qtype = ALTQT_PRIQ; + $$.data.priq_opts.flags = 0; + } + | PRIQ PAR_OPEN priqflags_list PAR_CLOSE { + $$.qtype = ALTQT_PRIQ; + $$.data.priq_opts.flags = $3; + } + | HFSC { + $$.qtype = ALTQT_HFSC; + memset(&$$.data.hfsc_opts, 0, + sizeof($$.data.hfsc_opts)); + } + | HFSC PAR_OPEN hfsc_opts PAR_CLOSE { + $$.qtype = ALTQT_HFSC; + $$.data.hfsc_opts = $3; + } + ; + +cbqflags_list : cbqflags_item { $$ |= $1; } + | cbqflags_list COMMA cbqflags_item { $$ |= $3; } + ; + +cbqflags_item : IDENTIFIER { +#ifdef CBQCLF_BORROW + if (!strcmp($1, "borrow")) + $$ = CBQCLF_BORROW; +#endif + else if (!strcmp($1, "red")) + $$ = CBQCLF_RED; + else if (!strcmp($1, "ecn")) + $$ = CBQCLF_RED|CBQCLF_ECN; + else if (!strcmp($1, "rio")) + $$ = CBQCLF_RIO; + else { + yyerror("unknown cbq flag \"%s\"", $1); + free($1); + YYERROR; + } + free($1); + } + | DEFAULT { $$ = CBQCLF_DEFCLASS; } + ; + +priqflags_list : priqflags_item { $$ |= $1; } + | priqflags_list COMMA priqflags_item { $$ |= $3; } + ; + +priqflags_item : IDENTIFIER { + if (!strcmp($1, "red")) + $$ = PRCF_RED; + else if (!strcmp($1, "ecn")) + $$ = PRCF_RED|PRCF_ECN; + else if (!strcmp($1, "rio")) + $$ = PRCF_RIO; + else { + yyerror("unknown priq flag \"%s\"", $1); + free($1); + YYERROR; + } + free($1); + } + | DEFAULT { $$ = PRCF_DEFAULTCLASS; } + ; + +hfsc_opts : { + memset(&hfsc_opts, 0, + sizeof(hfsc_opts)); + } + hfsc_opts_list { + $$ = hfsc_opts; + } + ; + +hfscopts_list : hfscopts_item + | hfscopts_list COMMA hfscopts_item + ; + +hfscopts_item : LINKSHARE bandwidth { + if (hfsc_opts.linkshare.used) { + yyerror("linkshare already specified"); + YYERROR; + } + hfsc_opts.linkshare.m2 = $2; + hfsc_opts.linkshare.used = 1; + } + | LINKSHARE PAR_OPEN bandwidth COMMA number COMMA bandwidth PAR_CLOSE + { + if (hfsc_opts.linkshare.used) { + yyerror("linkshare already specified"); + YYERROR; + } + hfsc_opts.linkshare.m1 = $3; + hfsc_opts.linkshare.d = $5; + hfsc_opts.linkshare.m2 = $7; + hfsc_opts.linkshare.used = 1; + } + | REALTIME bandwidth { + if (hfsc_opts.realtime.used) { + yyerror("realtime already specified"); + YYERROR; + } + hfsc_opts.realtime.m2 = $2; + hfsc_opts.realtime.used = 1; + } + | REALTIME PAR_OPEN bandwidth COMMA number COMMA bandwidth PAR_CLOSE + { + if (hfsc_opts.realtime.used) { + yyerror("realtime already specified"); + YYERROR; + } + hfsc_opts.realtime.m1 = $3; + hfsc_opts.realtime.d = $5; + hfsc_opts.realtime.m2 = $7; + hfsc_opts.realtime.used = 1; + } + | UPPERLIMIT bandwidth { + if (hfsc_opts.upperlimit.used) { + yyerror("upperlimit already specified"); + YYERROR; + } + hfsc_opts.upperlimit.m2 = $2; + hfsc_opts.upperlimit.used = 1; + } + | UPPERLIMIT PAR_OPEN bandwidth COMMA number COMMA bandwidth PAR_CLOSE + { + if (hfsc_opts.upperlimit.used) { + yyerror("upperlimit already specified"); + YYERROR; + } + hfsc_opts.upperlimit.m1 = $3; + hfsc_opts.upperlimit.d = $5; + hfsc_opts.upperlimit.m2 = $7; + hfsc_opts.upperlimit.used = 1; + } + | IDENTIFIER { + if (!strcmp($1, "red")) + hfsc_opts.flags |= HFCF_RED; + else if (!strcmp($1, "ecn")) + hfsc_opts.flags |= HFCF_RED|HFCF_ECN; + else if (!strcmp($1, "rio")) + hfsc_opts.flags |= HFCF_RIO; + else { + yyerror("unknown hfsc flag \"%s\"", $1); + free($1); + YYERROR; + } + free($1); + } + | DEFAULT { hfsc_opts.flags |= HFCF_DEFAULTCLASS; } + ; + +qassign : /* empty */ { $$ = NULL; } + | qassign_item { $$ = $1; } + | CURLY_OPEN qassign_list CURLY_CLOSE { $$ = $2; } + ; + +qassign_list : qassign_item { $$ = $1; } + | qassign_list COMMA qassign_item { + $1->tail->next = $3; + $1->tail = $3; + $$ = $1; + } + ; + +qassign_item : IDENTIFIER { + $$ = calloc(1, sizeof(struct node_queue)); + + if ($$ == NULL) + err(1, "qassign_item: calloc"); + if (strlcpy($$->queue, $1, sizeof($$->queue)) >= + sizeof($$->queue)) { + yyerror("queue name '%s' too long (max " + "%lu chars)", $1, sizeof($$->queue) -1); + free($1); + free($$); + YYERROR; + } + free($1); + $$->next = NULL; + $$->tail = $$; + } + ; + alg : ALG STRING { @@ -544,20 +920,21 @@ rule_group /* * Rule and misc. + * Make rule with queue optional */ rule : block_or_pass opt_stateful rule_dir opt_final on_ifname - opt_family opt_proto all_or_filt_opts opt_apply + opt_family opt_proto all_or_filt_opts opt_apply rule_queue { npfctl_build_rule($1 | $2 | $3 | $4, $5, - $6, $7, &$8, NULL, $9); + $6, $7, &$8, NULL, $9, $10); } | block_or_pass opt_stateful rule_dir opt_final on_ifname - PCAP_FILTER STRING opt_apply + PCAP_FILTER STRING opt_apply rule_queue { npfctl_build_rule($1 | $2 | $3 | $4, $5, - AF_UNSPEC, NULL, NULL, $7, $8); + AF_UNSPEC, NULL, NULL, $7, $8, $9); } ; @@ -678,6 +1055,27 @@ opt_apply | { $$ = NULL; } ; +rule_queue + : /* Empty */ + { + $$.qname = NULL; + $$.pqname = NULL; + } + | QUEUE STRING + { + $$.qname = $2; + } + | QUEUE PAR_OPEN STRING PAR_CLOSE + { + $$.qname = $3; + } + | QUEUE PAR_OPEN STRING COMMA STRING PAR_CLOSE + { + $$.qname = $3; + $$.pqname = $5; + } + ; + block_opts : RETURNRST { $$ = NPF_RULE_RETRST; } | RETURNICMP { $$ = NPF_RULE_RETICMP; } diff --git a/usr.sbin/npf/npfctl/npf_scan.l b/usr.sbin/npf/npfctl/npf_scan.l index 28bc5e9665742..7c272aea75c2c 100644 --- a/usr.sbin/npf/npfctl/npf_scan.l +++ b/usr.sbin/npf/npfctl/npf_scan.l @@ -3,7 +3,7 @@ * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation - * by Martin Husemann. + * by Martin Husemann and Emmanuel Nyarko * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -96,6 +96,7 @@ DID [a-zA-Z_][a-zA-Z_0-9-]* SPID [a-zA-Z][a-zA-Z_0-9.]* NUMBER [0-9]+ HEXDIG [0-9a-fA-F]+ +BW_UNIT [A-Za-z%]+ %% %{ @@ -174,6 +175,19 @@ icmp-type return ICMPTYPE; code return CODE; any return ANY; +altq return ALTQ; +cbq return CBQ; +priq return PRIQ; +hfsc return HFSC; +bandwidth return BANDWIDTH; +tbrsize return TBRSIZE; +linkshare return LINKSHARE; +realtime return REALTIME; +uppertime return UPPERLIMIT; +queue return QUEUE; +priority return PRIORITY; +qlimit return QLIMIT; + "/" return SLASH; "{" return CURLY_OPEN; "}" return CURLY_CLOSE; @@ -183,6 +197,11 @@ any return ANY; "=" return EQ; "!" return EXCL_MARK; +{NUMBER}[\.]*[0-9]*{BW_UNIT} { + yylval.str = estrndup(yytext, yyleng); + return BW_SPEC; +} + "0x"{HEXDIG} { char *endp, *buf = ecalloc(1, yyleng + 1); buf[yyleng] = 0; diff --git a/usr.sbin/npf/npfctl/npfctl.h b/usr.sbin/npf/npfctl/npfctl.h index 4a17517488079..a99d3bd59328a 100644 --- a/usr.sbin/npf/npfctl/npfctl.h +++ b/usr.sbin/npf/npfctl/npfctl.h @@ -48,6 +48,78 @@ #define NPF_CONF_PATH "/etc/npf.conf" #define NPF_DB_PATH "/var/db/npf.db" +#ifndef DEFAULT_QLIMIT +#define DEFAULT_QLIMIT 50 +#endif +#ifndef DEFAULT_PRIORITY +#define DEFAULT_PRIORITY 1 +#endif + +struct node_queue_bw { + uint32_t bw_absolute; + uint16_t bw_percent; +}; + +struct node_hfsc_sc { + struct node_queue_bw m1; /* slope of 1st segment; bps */ + u_int d; /* x-projection of m1; msec */ + struct node_queue_bw m2; /* slope of 2nd segment; bps */ + uint8_t used; +}; + +struct node_hfsc_opts { + struct node_hfsc_sc realtime; + struct node_hfsc_sc linkshare; + struct node_hfsc_sc upperlimit; + int flags; +}; + +struct node_queue_opt { + int qtype; + union { + struct npf_cbq_opts cbq_opts; + struct npf_priq_opts priq_opts; + struct node_hfsc_opts hfsc_opts; + } data; +}; + +struct queue_opts { + int marker; +/* use flags for which option is set*/ +#define QOM_BWSPEC 0x01 +#define QOM_SCHEDULER 0x02 +#define QOM_PRIORITY 0x04 +#define QOM_TBRSIZE 0x08 +#define QOM_QLIMIT 0x10 + struct node_queue_bw queue_bwspec; + struct node_queue_opt scheduler; + int priority; + int tbrsize; + int qlimit; +}; + +struct node_queue { + char queue[NPF_QNAME_SIZE]; + char parent[NPF_QNAME_SIZE]; + char ifname[IFNAMSIZ]; + int scheduler; + struct node_queue *next; + struct node_queue *tail; +}; + +struct node_qassign { + char *qname; + char *pqname; +}; + +/* + * generalized service curve used for admission control + */ +struct segment { + LIST_ENTRY(segment) _next; + double x, y, d, m; +}; + typedef struct fam_addr_mask { sa_family_t fam_family; npf_addr_t fam_addr; From b22548e311496315e975999fdd875ab5f158bf0c Mon Sep 17 00:00:00 2001 From: Emmankoko Date: Mon, 3 Feb 2025 14:18:34 +0000 Subject: [PATCH 3/9] define altq struct for ioctl communication includes the queue structure field and a trace of the number of queues being loaded into the kernel and make npf_altq available in include directory for use in altq framework --- sys/net/npf/Makefile | 2 +- sys/net/npf/npf.h | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/sys/net/npf/Makefile b/sys/net/npf/Makefile index 898c7f90575cf..d3ce52463323d 100644 --- a/sys/net/npf/Makefile +++ b/sys/net/npf/Makefile @@ -4,6 +4,6 @@ # INCSDIR= /usr/include/net -INCS= npf.h +INCS= npf.h npf_altq.h .include diff --git a/sys/net/npf/npf.h b/sys/net/npf/npf.h index d9d5f77ea0d51..048d3d8a80c3d 100644 --- a/sys/net/npf/npf.h +++ b/sys/net/npf/npf.h @@ -202,6 +202,8 @@ bool npf_autounload_p(void); #endif /* _KERNEL */ +#include "npf_altq.h" + #define NPF_SRC 0 #define NPF_DST 1 @@ -309,6 +311,12 @@ typedef struct npf_ioctl_table { } nct_data; } npf_ioctl_table_t; +/* altq struct for ioctl */ +struct npfioc_altq { + u_int32_t nq; + struct npf_altq altq; +}; + /* * IOCTL operations. */ From 0e2c46cc6b306b7bbe42f0948dcdc22ce8858e63 Mon Sep 17 00:00:00 2001 From: Emmankoko Date: Mon, 3 Feb 2025 17:12:29 +0000 Subject: [PATCH 4/9] definition of altq_expand which sends npfioc_altq to kernel it is implemented to compute all params of queues for any kind of scheduler --- sys/net/npf/npf.h | 3 + sys/net/npf/npf_altq.h | 8 + sys/net/npf/npf_os.c | 1 + usr.sbin/npf/npfctl/Makefile | 6 +- usr.sbin/npf/npfctl/npfctl.c | 2 +- usr.sbin/npf/npfctl/npfctl.h | 25 + usr.sbin/npf/npfctl/npfctl_altq.c | 1291 +++++++++++++++++++++++++++++ 7 files changed, 1332 insertions(+), 4 deletions(-) create mode 100644 usr.sbin/npf/npfctl/npfctl_altq.c diff --git a/sys/net/npf/npf.h b/sys/net/npf/npf.h index 048d3d8a80c3d..737e11db437e3 100644 --- a/sys/net/npf/npf.h +++ b/sys/net/npf/npf.h @@ -330,6 +330,9 @@ struct npfioc_altq { #define IOC_NPF_RULE _IOWR('N', 107, nvlist_ref_t) #define IOC_NPF_CONN_LOOKUP _IOWR('N', 108, nvlist_ref_t) #define IOC_NPF_TABLE_REPLACE _IOWR('N', 109, nvlist_ref_t) +#define IOC_NPF_BEGIN_ALTQ _IO('N', 112) +#define IOC_NPF_ADD_ALTQ _IOWR('N', 110, struct npfioc_altq) +#define IOC_NPF_GET_ALTQS _IOWR('N', 111, struct npfioc_altq) /* * NPF error report. diff --git a/sys/net/npf/npf_altq.h b/sys/net/npf/npf_altq.h index bd46ae3e48f7a..bfdc0970c8456 100644 --- a/sys/net/npf/npf_altq.h +++ b/sys/net/npf/npf_altq.h @@ -104,4 +104,12 @@ struct npf_altq { } pq_u; u_int32_t qid; /* return value */ }; + +TAILQ_HEAD(npf_altqqueue, npf_altq); + +extern int npf_get_altqs(void *); +extern void npf_altq_init(void); +extern int npf_begin_altq(void); +extern int npf_add_altq(void *); + #endif /* NPF_ALTQ_H_ */ diff --git a/sys/net/npf/npf_os.c b/sys/net/npf/npf_os.c index 022178236d50b..a74f256b983fd 100644 --- a/sys/net/npf/npf_os.c +++ b/sys/net/npf/npf_os.c @@ -274,6 +274,7 @@ npf_dev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l) return npfctl_table(npf, data); case IOC_NPF_STATS: return npf_stats_export(npf, data); + case IOC_NPF_LOAD: case IOC_NPF_SAVE: case IOC_NPF_RULE: diff --git a/usr.sbin/npf/npfctl/Makefile b/usr.sbin/npf/npfctl/Makefile index e840b1241fe7c..a399c3246ef3f 100644 --- a/usr.sbin/npf/npfctl/Makefile +++ b/usr.sbin/npf/npfctl/Makefile @@ -7,14 +7,14 @@ MAN= npfctl.8 npf.conf.5 BINDIR= /sbin SRCS= npfctl.c npf_cmd.c npf_var.c npf_data.c npf_build.c -SRCS+= npf_bpf_comp.c npf_show.c npf_extmod.c +SRCS+= npf_bpf_comp.c npf_show.c npf_extmod.c npfctl_altq.c CPPFLAGS+= -I${.CURDIR} SRCS+= npf_scan.l npf_parse.y YHEADER= 1 -LDADD+= -lnpf -lpcap -lutil -ly -DPADD+= ${LIBNPF} ${LIBUTIL} ${LIBPCAP} ${LIBUTIL} ${LIBY} +LDADD+= -lnpf -lpcap -lutil -ly -lm +DPADD+= ${LIBNPF} ${LIBUTIL} ${LIBPCAP} ${LIBUTIL} ${LIBY} ${LIBM} WARNS= 5 diff --git a/usr.sbin/npf/npfctl/npfctl.c b/usr.sbin/npf/npfctl/npfctl.c index c0537c9ad04cc..2701d0775af94 100644 --- a/usr.sbin/npf/npfctl/npfctl.c +++ b/usr.sbin/npf/npfctl/npfctl.c @@ -339,7 +339,7 @@ npfctl_load(int fd) return errno; } -static int +int npfctl_open_dev(const char *path) { struct stat st; diff --git a/usr.sbin/npf/npfctl/npfctl.h b/usr.sbin/npf/npfctl/npfctl.h index a99d3bd59328a..dfc7b3fe3e1fb 100644 --- a/usr.sbin/npf/npfctl/npfctl.h +++ b/usr.sbin/npf/npfctl/npfctl.h @@ -303,6 +303,31 @@ void npfctl_build_table(const char *, u_int, const char *); void npfctl_setparam(const char *, int); +/* ALTQ related */ +int npfctl_test_altqsupport(int); +extern int npfctl_open_dev(const char *); +int npfctl_eval_bw(struct node_queue_bw *, char *); +int expand_altq(struct npf_altq *, const char *, struct node_queue *, + struct node_queue_bw bwspec, struct node_queue_opt *); +int expand_queue(struct npf_altq *, const char *, struct node_queue *, + struct node_queue_bw, struct node_queue_opt *); +u_long get_ifmtu(char *); +uint32_t get_ifspeed(char *); +uint32_t npf_eval_bwspec(struct node_queue_bw *, uint32_t); +void npfaltq_store(struct npf_altq *); +int npfctl_add_altq(struct npf_altq *); +int npf_eval_queue_opts(struct npf_altq *, struct node_queue_opt *, + uint32_t); +int eval_npfaltq(struct npf_altq *, struct node_queue_bw *, + struct node_queue_opt *); +int eval_npfqueue(struct npf_altq *, struct node_queue_bw *, + struct node_queue_opt *); +struct npf_altq *qname_to_npfaltq(const char *, const char *); +uint32_t qname_to_qid(const char *); +struct npf_altq *npfaltq_lookup(const char *ifname); +char *rate2str(double); +int check_commit_altq(void); + /* * For the systems which do not define TH_ECE and TW_CRW. */ diff --git a/usr.sbin/npf/npfctl/npfctl_altq.c b/usr.sbin/npf/npfctl/npfctl_altq.c new file mode 100644 index 0000000000000..f9ea1fc827120 --- /dev/null +++ b/usr.sbin/npf/npfctl/npfctl_altq.c @@ -0,0 +1,1291 @@ +/*- + * Copyright (c) 2024 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Emmanuel Nyarko. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "npf.h" +#include +#include +#include +#include +#include "npfctl.h" + +LIST_HEAD(gen_sc, segment) rtsc, lssc; +static int npf_add_root_queue(struct npf_altq, const char *, + char *, struct node_queue_opt *); +static int eval_npfqueue_cbq(struct npf_altq *); +static int cbq_compute_idletime(struct npf_altq *); +static int check_commit_cbq(struct npf_altq *); +static int eval_npfqueue_priq(struct npf_altq *); +static int check_commit_priq(struct npf_altq *); +static int eval_npfqueue_hfsc(struct npf_altq *); +static int check_commit_hfsc(struct npf_altq *); +static void altq_append_queues(struct npf_altq pa, const char *, + char *, struct node_queue *); +static void queue_append_queues(struct npf_altq *, struct node_queue *, + struct node_queue *); +static int scheduler_check(struct npf_altq *, struct node_queue *, + struct node_queue *, struct node_queue_bw); +static void gsc_add_sc(struct gen_sc *, struct service_curve *); +static int is_gsc_under_sc(struct gen_sc *, + struct service_curve *); +static void gsc_destroy(struct gen_sc *); +static struct segment *gsc_getentry(struct gen_sc *, double); +static int gsc_add_seg(struct gen_sc *, double, double, double, + double); +static double sc_x2y(struct service_curve *, double); +int ifdisc_lookup(struct npf_altq *); + + +int npfdev; +int altqsupport; + +TAILQ_HEAD(altqs, npf_altq) altqs = TAILQ_HEAD_INITIALIZER(altqs); +#define is_sc_null(sc) (((sc) == NULL) || ((sc)->m1 == 0 && (sc)->m2 == 0)) + +struct node_queue *queues = NULL; + + +#define FREE_LIST(T,r) \ + do { \ + T *p, *node = r; \ + while (node != NULL) { \ + p = node; \ + node = node->next; \ + free(p); \ + } \ + } while (0) + +#define LOOP_THROUGH(T,n,r,C) \ + do { \ + T *n; \ + if (r == NULL) { \ + r = calloc(1, sizeof(*r)); \ + if (r == NULL) \ + err(EXIT_FAILURE, "LOOP: calloc"); \ + r->next = NULL; \ + } \ + n = r; \ + while (n != NULL) { \ + do { \ + C; \ + } while (0); \ + n = n->next; \ + } \ + } while (0) + +int +npfctl_test_altqsupport(int dev) +{ + struct npfioc_altq pa; + if (ioctl(dev, IOC_NPF_GET_ALTQS, &pa) == -1) { + if (errno == ENODEV) { + warnx("No ALTQ support in kernel\n" + "ALTQ related functions disabled\n"); + return 0; + } else + err(EXIT_FAILURE, "IOC_GET_ALTQS"); + } + return 1; +} + +/* evaluate bandwidth */ +int +npfctl_eval_bw(struct node_queue_bw *bw, char *bw_spec) +{ + double bps; + char *cp; + bw->bw_percent = 0; + bps = strtod(bw_spec, &cp); + if (cp != NULL) { + if (!strcmp(cp, "b")) + ; /* nothing */ + else if (!strcmp(cp, "Kb")) + bps *= 1000; + else if (!strcmp(cp, "Mb")) + bps *= 1000 * 1000; + else if (!strcmp(cp, "Gb")) + bps *= 1000 * 1000 * 1000; + else if (!strcmp(cp, "%")) { + if (bps < 0 || bps > 100) { + yyerror("bandwidth spec " + "out of range"); + return -1; + } + bw->bw_percent = bps; + bps = 0; + } else { + yyerror("unknown unit %s", cp); + return -1; + } + } + bw->bw_absolute = (uint32_t)bps; + return 0; +} + +/* create root queue for cbq or hfsc */ +static int +npf_add_root_queue(struct npf_altq pa, const char * ifname, + char qname[], struct node_queue_opt *opts) +{ + struct node_queue_bw bw; + struct npf_altq pb; + int errs = 0; + + /* + * we cannot use sizeof(qname) directly here as it will give sizeof(char*) + * so the copyably bytes is manually hack the using sizeof(char) * qname max size + */ + memset(&pb, 0, sizeof(pb)); + if (strlcpy(qname, "root_", (sizeof(char) * NPF_QNAME_SIZE)) >= + (sizeof(char) * NPF_QNAME_SIZE)) + errx(EXIT_FAILURE, "add_root: strlcpy"); + if (strlcat(qname, ifname, (sizeof(char) * NPF_QNAME_SIZE)) >= + (sizeof(char) * NPF_QNAME_SIZE)) + errx(EXIT_FAILURE, "add_root: strlcat"); + if (strlcpy(pb.qname, qname, + sizeof(pb.qname)) >= sizeof(pb.qname)) + errx(EXIT_FAILURE, "add_root: strlcpy"); + if (strlcpy(pb.ifname, ifname, + sizeof(pb.ifname)) >= sizeof(pb.ifname)) + errx(EXIT_FAILURE, "add_root: strlcpy"); + pb.qlimit = pa.qlimit; + pb.scheduler = pa.scheduler; + bw.bw_absolute = pa.ifbandwidth; + bw.bw_percent = 0; + if (eval_npfqueue(&pb, &bw, opts)) + errs++; + else + if (npfctl_add_altq(&pb)) + errs++; + return errs; +} + +/* + * child queues set on altq decl will be appended to global queue here: + * altq on .... queue {a,b ,c } + */ +static void +altq_append_queues(struct npf_altq pa, const char *ifname, + char qname[], struct node_queue *queue) +{ + struct node_queue *n; + n = calloc(1, sizeof(*n)); + if (n == NULL) + err(EXIT_FAILURE, "append_queue: calloc"); + if (pa.scheduler == ALTQT_CBQ || + pa.scheduler == ALTQT_HFSC) + if (strlcpy(n->parent, qname, + sizeof(n->parent)) >= + sizeof(n->parent)) + errx(EXIT_FAILURE, "append_queue: strlcpy"); + if (strlcpy(n->queue, queue->queue, + sizeof(n->queue)) >= sizeof(n->queue)) + errx(EXIT_FAILURE, "append_queue: strlcpy"); + if (strlcpy(n->ifname, ifname, + sizeof(n->ifname)) >= sizeof(n->ifname)) + errx(EXIT_FAILURE, "append_queue: strlcpy"); + n->scheduler = pa.scheduler; + n->next = NULL; + n->tail = n; + if (queues == NULL) + queues = n; + else { + queues->tail->next = n; + queues->tail = n; + } +} + +/* + * child queues set on new defining child queues appended on a global queue: + * queue .... queue {a, b, b} + */ +static void +queue_append_queues(struct npf_altq *a, struct node_queue *tqueue, + struct node_queue *nq) +{ + struct node_queue *n; + n = calloc(1, + sizeof(*n)); + if (n == NULL) + err(EXIT_FAILURE, "expand_queue: calloc"); + if (strlcpy(n->parent, a->qname, + sizeof(n->parent)) >= + sizeof(n->parent)) + errx(EXIT_FAILURE, "expand_queue strlcpy"); + if (strlcpy(n->queue, nq->queue, + sizeof(n->queue)) >= + sizeof(n->queue)) + errx(EXIT_FAILURE, "expand_queue strlcpy"); + if (strlcpy(n->ifname, tqueue->ifname, + sizeof(n->ifname)) >= + sizeof(n->ifname)) + errx(EXIT_FAILURE, "expand_queue strlcpy"); + n->scheduler = tqueue->scheduler; + n->next = NULL; + n->tail = n; + if (queues == NULL) + queues = n; + else { + queues->tail->next = n; + queues->tail = n; + } +} + +static int +scheduler_check(struct npf_altq *pa, struct node_queue *tqueue, + struct node_queue *nqueues, struct node_queue_bw bwspec) +{ + if (pa->scheduler != ALTQT_NONE && + pa->scheduler != tqueue->scheduler) { + yyerror("exactly one scheduler type " + "per interface allowed"); + return -1; + } + pa->scheduler = tqueue->scheduler; + /* scheduler dependent error checking */ + switch (pa->scheduler) { + case ALTQT_PRIQ: + if (nqueues != NULL) { + yyerror("priq queues cannot " + "have child queues"); + return -1; + } + if (bwspec.bw_absolute > 0 || + bwspec.bw_percent < 100) { + yyerror("priq doesn't take " + "bandwidth"); + return -1; + } + break; + default: + break; + } + return 0; +} + +int +expand_altq(struct npf_altq *a, const char *ifname, + struct node_queue *nqueues, struct node_queue_bw bwspec, + struct node_queue_opt *opts) +{ + struct npf_altq pa; + char qname[NPF_QNAME_SIZE]; + int errs = 0; + npfdev = npfctl_open_dev(NPF_DEV_PATH); + + memcpy(&pa, a, sizeof(pa)); + if (strlcpy(pa.ifname, ifname, + sizeof(pa.ifname)) >= sizeof(pa.ifname)) + errx(1, "expand_altq: strlcpy"); + if (ifdisc_lookup(&pa)) { + yyerror("only one scheduler per interface.\n altq already defined on %s", pa.ifname); + errs++; + } else { + if (eval_npfaltq(&pa, &bwspec, opts)) + errs++; + else + if (ioctl(npfdev, IOC_NPF_BEGIN_ALTQ) == 0) { + if (npfctl_add_altq(&pa)){ + yyerror("cannot add parent queue"); + errs++; + } + } else + errx(EXIT_FAILURE, "cannot begin altq: altq_begin"); + + if (pa.scheduler == ALTQT_CBQ || + pa.scheduler == ALTQT_HFSC) { + /* now create a root queue */ + if (npf_add_root_queue(pa, ifname, qname, opts)) + errx(EXIT_FAILURE, "cannot add root queue"); + } + LOOP_THROUGH(struct node_queue, queue, nqueues, + altq_append_queues(pa, ifname, qname, queue)); + } + FREE_LIST(struct node_queue, nqueues); + + return errs; +} + +int +expand_queue(struct npf_altq *a, const char *ifname, + struct node_queue *nqueues, struct node_queue_bw bwspec, + struct node_queue_opt *opts) +{ + struct node_queue *nq; + struct npf_altq pa; + uint8_t found = 0; + uint8_t errs = 0; + + if (queues == NULL) { + yyerror("queue %s has no parent", a->qname); + FREE_LIST(struct node_queue, nqueues); + return 1; + } + LOOP_THROUGH(struct node_queue, tqueue, queues, + if (!strncmp(a->qname, tqueue->queue, NPF_QNAME_SIZE) && + (ifname == 0 || + (!strncmp(ifname, tqueue->ifname, IFNAMSIZ)) || + (strncmp(ifname, tqueue->ifname, IFNAMSIZ)))){ + /* found ourself in the child queues */ + found++; + memcpy(&pa, a, sizeof(pa)); + if (scheduler_check(&pa, tqueue, nqueues, bwspec) == -1) + goto out; + + if (strlcpy(pa.ifname, tqueue->ifname, + sizeof(pa.ifname)) >= sizeof(pa.ifname)) + errx(1, "expand_queue: strlcpy"); + if (strlcpy(pa.parent, tqueue->parent, + sizeof(pa.parent)) >= sizeof(pa.parent)) + errx(1, "expand_queue: strlcpy"); + if (eval_npfqueue(&pa, &bwspec, opts)) + errs++; + else + if (npfctl_add_altq(&pa)) + errs++; + for (nq = nqueues; nq != NULL; nq = nq->next) { + if (!strcmp(a->qname, nq->queue)) { + yyerror("queue cannot have " + "itself as child"); + errs++; + continue; + } + queue_append_queues(a, tqueue, nq); + } + } + ); +out: + FREE_LIST(struct node_queue, nqueues); + if (!found) { + yyerror("queue %s has no parent", a->qname); + errs++; + } + if (errs) + return 1; + else + return 0; +} + +/* + * eval_npfaltq computes the discipline parameters. + */ +int +eval_npfaltq(struct npf_altq *pa, struct node_queue_bw *bw, + struct node_queue_opt *opts) +{ + u_int rate, size, errors = 0; + if (bw->bw_absolute > 0) + pa->ifbandwidth = bw->bw_absolute; + else + if ((rate = get_ifspeed(pa->ifname)) == 0) { + fprintf(stderr, "interface %s does not know its bandwidth, " + "please specify an absolute bandwidth\n", + pa->ifname); + errors++; + } else if ((pa->ifbandwidth = npf_eval_bwspec(bw, rate)) == 0) + pa->ifbandwidth = rate; + errors += npf_eval_queue_opts(pa, opts, pa->ifbandwidth); + /* if tbrsize is not specified, use heuristics */ + if (pa->tbrsize == 0) { + rate = pa->ifbandwidth; + if (rate <= 1 * 1000 * 1000) + size = 1; + else if (rate <= 10 * 1000 * 1000) + size = 4; + else if (rate <= 200 * 1000 * 1000) + size = 8; + else + size = 24; + size = size * get_ifmtu(pa->ifname); + if (size > 0xffff) + size = 0xffff; + pa->tbrsize = size; + } + return errors; +} + +int +npf_eval_queue_opts(struct npf_altq *pa, struct node_queue_opt *opts, + uint32_t ref_bw) +{ + int errors = 0; + switch (pa->scheduler) { + case ALTQT_CBQ: + pa->pq_u.cbq_opts = opts->data.cbq_opts; + break; + case ALTQT_PRIQ: + pa->pq_u.priq_opts = opts->data.priq_opts; + break; + case ALTQT_HFSC: + pa->pq_u.hfsc_opts.flags = opts->data.hfsc_opts.flags; + if (opts->data.hfsc_opts.linkshare.used) { + pa->pq_u.hfsc_opts.lssc_m1 = + npf_eval_bwspec(&opts->data.hfsc_opts.linkshare.m1, + ref_bw); + pa->pq_u.hfsc_opts.lssc_m2 = + npf_eval_bwspec(&opts->data.hfsc_opts.linkshare.m2, + ref_bw); + pa->pq_u.hfsc_opts.lssc_d = + opts->data.hfsc_opts.linkshare.d; + } + if (opts->data.hfsc_opts.realtime.used) { + pa->pq_u.hfsc_opts.rtsc_m1 = + npf_eval_bwspec(&opts->data.hfsc_opts.realtime.m1, + ref_bw); + pa->pq_u.hfsc_opts.rtsc_m2 = + npf_eval_bwspec(&opts->data.hfsc_opts.realtime.m2, + ref_bw); + pa->pq_u.hfsc_opts.rtsc_d = + opts->data.hfsc_opts.realtime.d; + } + if (opts->data.hfsc_opts.upperlimit.used) { + pa->pq_u.hfsc_opts.ulsc_m1 = + npf_eval_bwspec(&opts->data.hfsc_opts.upperlimit.m1, + ref_bw); + pa->pq_u.hfsc_opts.ulsc_m2 = + npf_eval_bwspec(&opts->data.hfsc_opts.upperlimit.m2, + ref_bw); + pa->pq_u.hfsc_opts.ulsc_d = + opts->data.hfsc_opts.upperlimit.d; + } + break; + default: + warnx("eval_queue_opts: unknown scheduler type %u", + opts->qtype); + errors++; + break; + } + return errors; +} + +int +npfctl_add_altq(struct npf_altq *a) +{ + struct npfioc_altq *npaltq; + if ((npaltq = malloc(sizeof(*npaltq))) == NULL) + err(EXIT_FAILURE, "malloc"); + memcpy(&npaltq->altq, a, sizeof(npaltq->altq)); + if (ioctl(npfdev, IOC_NPF_ADD_ALTQ, npaltq)) { + if (errno == ENXIO) + errx(1, "qtype not configured"); + else if (errno == ENODEV) + errx(1, "%s: driver does not support " + "altq", a->ifname); + else + err(EXIT_FAILURE, "NPFADDALTQ"); + } + npfaltq_store(&npaltq->altq); + free(npaltq); + return 0; +} + +void +npfaltq_store(struct npf_altq *a) +{ + struct npf_altq *altq; + if ((altq = malloc(sizeof(*altq))) == NULL) + err(EXIT_FAILURE, "malloc"); + memcpy(altq, a, sizeof(*altq)); + TAILQ_INSERT_TAIL(&altqs, altq, entries); + /* check altq presence in config */ +} + +uint32_t +npf_eval_bwspec(struct node_queue_bw *bw, uint32_t ref_bw) +{ + if (bw->bw_absolute > 0) + return (bw->bw_absolute); + if (bw->bw_percent > 0) + return (ref_bw / 100 * bw->bw_percent); + return 0; +} + +uint32_t +get_ifspeed(char *ifname) +{ + int s; + struct ifdatareq ifdr; + struct if_data *ifrdat; + if ((s = socket(AF_INET, SOCK_DGRAM, 0)) < 0) + err(EXIT_FAILURE, "getifspeed: socket"); + memset(&ifdr, 0, sizeof(ifdr)); + if (strlcpy(ifdr.ifdr_name, ifname, sizeof(ifdr.ifdr_name)) >= + sizeof(ifdr.ifdr_name)) + errx(1, "getifspeed: strlcpy"); + if (ioctl(s, SIOCGIFDATA, &ifdr) == -1) + err(EXIT_FAILURE, "getifspeed: SIOCGIFDATA"); + ifrdat = &ifdr.ifdr_data; + if (close(s) == -1) + err(EXIT_FAILURE, "getifspeed: close"); + return ((uint32_t)ifrdat->ifi_baudrate); +} + +u_long +get_ifmtu(char *ifname) +{ + int s; + struct ifreq ifr; + if ((s = socket(AF_INET, SOCK_DGRAM, 0)) < 0) + err(EXIT_FAILURE, "socket"); + bzero(&ifr, sizeof(ifr)); + if (strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)) >= + sizeof(ifr.ifr_name)) + errx(1, "getifmtu: strlcpy"); + if (ioctl(s, SIOCGIFMTU, (caddr_t)&ifr) == -1) + err(EXIT_FAILURE, "SIOCGIFMTU"); + if (close(s) == -1) + err(EXIT_FAILURE, "close"); + if (ifr.ifr_mtu > 0) + return (ifr.ifr_mtu); + else { + warnx("could not get mtu for %s, assuming 1500", ifname); + return 1500; + } +} + +/* + * eval_npfqueue computes the queue parameters. + */ +int +eval_npfqueue(struct npf_altq *pa, struct node_queue_bw *bw, + struct node_queue_opt *opts) +{ + /* should be merged with expand_queue */ + struct npf_altq *if_pa, *parent, *altq; + uint32_t bwsum; + int error = 0; + /* find the corresponding interface and copy fields used by queues */ + if ((if_pa = npfaltq_lookup(pa->ifname)) == NULL) { + fprintf(stderr, "altq not defined on %s\n", pa->ifname); + return 1; + } + pa->scheduler = if_pa->scheduler; + pa->ifbandwidth = if_pa->ifbandwidth; + if (qname_to_npfaltq(pa->qname, pa->ifname) != NULL) { + fprintf(stderr, "queue %s already exists on interface %s\n", + pa->qname, pa->ifname); + return 1; + } + pa->qid = qname_to_qid(pa->qname); + parent = NULL; + if (pa->parent[0] != 0) { + parent = qname_to_npfaltq(pa->parent, pa->ifname); + if (parent == NULL) { + fprintf(stderr, "parent %s not found for %s\n", + pa->parent, pa->qname); + return 1; + } + pa->parent_qid = parent->qid; + } + if (pa->qlimit == 0) + pa->qlimit = DEFAULT_QLIMIT; + if (pa->scheduler == ALTQT_CBQ || pa->scheduler == ALTQT_HFSC) { + pa->bandwidth = npf_eval_bwspec(bw, + parent == NULL ? 0 : parent->bandwidth); + if (pa->bandwidth > pa->ifbandwidth) { + fprintf(stderr, "bandwidth for %s higher than " + "interface\n", pa->qname); + return 1; + } + /* check the sum of the child bandwidth is under parent's */ + if (parent != NULL) { + if (pa->bandwidth > parent->bandwidth) { + warnx("bandwidth for %s higher than parent", + pa->qname); + return 1; + } + bwsum = 0; + TAILQ_FOREACH(altq, &altqs, entries) { + if (strncmp(altq->ifname, pa->ifname, + IFNAMSIZ) == 0 && + altq->qname[0] != 0 && + strncmp(altq->parent, pa->parent, + NPF_QNAME_SIZE) == 0) + bwsum += altq->bandwidth; + } + bwsum += pa->bandwidth; + if (bwsum > parent->bandwidth) { + warnx("the sum of the child bandwidth higher" + " than parent \"%s\"", parent->qname); + } + } + } + if (npf_eval_queue_opts(pa, opts, parent == NULL? 0 : parent->bandwidth)) + return 1; + switch (pa->scheduler) { + case ALTQT_CBQ: + error = eval_npfqueue_cbq(pa); + break; + case ALTQT_PRIQ: + error = eval_npfqueue_priq(pa); + break; + case ALTQT_HFSC: + error = eval_npfqueue_hfsc(pa); + break; + default: + break; + } + return error; +} + +struct npf_altq * +qname_to_npfaltq(const char *qname, const char *ifname) +{ + struct npf_altq *altq; + TAILQ_FOREACH(altq, &altqs, entries) { + if (strncmp(ifname, altq->ifname, IFNAMSIZ) == 0 && + strncmp(qname, altq->qname, NPF_QNAME_SIZE) == 0) + return altq; + } + return NULL; +} + +uint32_t +qname_to_qid(const char *qname) +{ + struct npf_altq *altq; + /* + * We guarantee that same named queues on different interfaces + * have the same qid, so we do NOT need to limit matching on + * one interface! + */ + TAILQ_FOREACH(altq, &altqs, entries) { + if (strncmp(qname, altq->qname, NPF_QNAME_SIZE) == 0) + return (altq->qid); + } + return 0; +} + +/*define only one discipline on one interface */ +int +ifdisc_lookup(struct npf_altq * altq) +{ + struct npf_altq *a; + if ((a = TAILQ_FIRST(&altqs)) != NULL) { + if ((a = npfaltq_lookup(altq->ifname)) != NULL) { + if (a->scheduler != altq->scheduler) { + return -1; + } + } + } + return 0; +} + +struct npf_altq * +npfaltq_lookup(const char *ifname) +{ + struct npf_altq *altq; + TAILQ_FOREACH(altq, &altqs, entries) { + if (strncmp(ifname, altq->ifname, IFNAMSIZ) == 0 && + altq->qname[0] == 0) + return altq; + } + return NULL; +} + +/* + * CBQ support functions + */ +#define RM_FILTER_GAIN 5 /* log2 of gain, e.g., 5 => 31/32 */ +#define RM_NS_PER_SEC (1000000000) +static int +eval_npfqueue_cbq(struct npf_altq *pa) +{ + struct npf_cbq_opts *opts; + u_int ifmtu; + if (pa->priority >= CBQ_MAXPRI) { + warnx("priority out of range: max %d", CBQ_MAXPRI - 1); + return -1; + } + ifmtu = get_ifmtu(pa->ifname); + opts = &pa->pq_u.cbq_opts; + if (opts->pktsize == 0) { /* use default */ + opts->pktsize = ifmtu; + if (opts->pktsize > MCLBYTES) /* do what TCP does */ + opts->pktsize &= ~MCLBYTES; + } else if (opts->pktsize > ifmtu) + opts->pktsize = ifmtu; + if (opts->maxpktsize == 0) /* use default */ + opts->maxpktsize = ifmtu; + else if (opts->maxpktsize > ifmtu) + opts->pktsize = ifmtu; + if (opts->pktsize > opts->maxpktsize) + opts->pktsize = opts->maxpktsize; + if (pa->parent[0] == 0) + opts->flags |= (CBQCLF_ROOTCLASS | CBQCLF_WRR); + cbq_compute_idletime(pa); + return 0; +} + +/* + * compute ns_per_byte, maxidle, minidle, and offtime + */ +static int +cbq_compute_idletime(struct npf_altq *pa) +{ + struct npf_cbq_opts *opts; + double maxidle_s, maxidle, minidle; + double offtime, nsPerByte, ifnsPerByte, ptime, cptime; + double z, g, f, gton, gtom; + u_int minburst, maxburst; + opts = &pa->pq_u.cbq_opts; + ifnsPerByte = (1.0 / (double)pa->ifbandwidth) * RM_NS_PER_SEC * 8; + minburst = opts->minburst; + maxburst = opts->maxburst; + if (pa->bandwidth == 0) + f = 0.0001; /* small enough? */ + else + f = ((double) pa->bandwidth / (double) pa->ifbandwidth); + nsPerByte = ifnsPerByte / f; + ptime = (double)opts->pktsize * ifnsPerByte; + cptime = ptime * (1.0 - f) / f; + if (nsPerByte * (double)opts->maxpktsize > (double)INT_MAX) { + /* + * this causes integer overflow in kernel! + * (bandwidth < 6Kbps when max_pkt_size=1500) + */ + if (pa->bandwidth != 0) { + warnx("queue bandwidth must be larger than %s", + rate2str(ifnsPerByte * (double)opts->maxpktsize / + (double)INT_MAX * (double)pa->ifbandwidth)); + fprintf(stderr, "cbq: queue %s is too slow!\n", + pa->qname); + } + nsPerByte = (double)(INT_MAX / opts->maxpktsize); + } + if (maxburst == 0) { /* use default */ + if (cptime > 10.0 * 1000000) + maxburst = 4; + else + maxburst = 16; + } + if (minburst == 0) /* use default */ + minburst = 2; + if (minburst > maxburst) + minburst = maxburst; + z = (double)(1 << RM_FILTER_GAIN); + g = (1.0 - 1.0 / z); + gton = pow(g, (double)maxburst); + gtom = pow(g, (double)(minburst-1)); + maxidle = ((1.0 / f - 1.0) * ((1.0 - gton) / gton)); + maxidle_s = (1.0 - g); + if (maxidle > maxidle_s) + maxidle = ptime * maxidle; + else + maxidle = ptime * maxidle_s; + offtime = cptime * (1.0 + 1.0/(1.0 - g) * (1.0 - gtom) / gtom); + minidle = -((double)opts->maxpktsize * (double)nsPerByte); + /* scale parameters */ + maxidle = ((maxidle * 8.0) / nsPerByte) * + pow(2.0, (double)RM_FILTER_GAIN); + offtime = (offtime * 8.0) / nsPerByte * + pow(2.0, (double)RM_FILTER_GAIN); + minidle = ((minidle * 8.0) / nsPerByte) * + pow(2.0, (double)RM_FILTER_GAIN); + maxidle = maxidle / 1000.0; + offtime = offtime / 1000.0; + minidle = minidle / 1000.0; + opts->minburst = minburst; + opts->maxburst = maxburst; + opts->ns_per_byte = (u_int)nsPerByte; + opts->maxidle = (u_int)fabs(maxidle); + opts->minidle = (int)minidle; + opts->offtime = (u_int)fabs(offtime); + return 0; +} + +/* + * PRIQ support functions + */ +static int +eval_npfqueue_priq(struct npf_altq *pa) +{ + struct npf_altq *altq; + if (pa->priority >= PRIQ_MAXPRI) { + warnx("priority out of range: max %d", PRIQ_MAXPRI - 1); + return -1; + } + /* the priority should be unique for the interface */ + TAILQ_FOREACH(altq, &altqs, entries) { + if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) == 0 && + altq->qname[0] != 0 && altq->priority == pa->priority) { + warnx("%s and %s have the same priority", + altq->qname, pa->qname); + return -1; + } + } + return 0; +} + +/* + * HFSC support functions + */ +static int +eval_npfqueue_hfsc(struct npf_altq *pa) +{ + struct npf_altq *altq, *parent; + struct npf_hfsc_opts *opts; + struct service_curve sc; + opts = &pa->pq_u.hfsc_opts; + if (pa->parent[0] == 0) { + /* root queue */ + opts->lssc_m1 = pa->ifbandwidth; + opts->lssc_m2 = pa->ifbandwidth; + opts->lssc_d = 0; + return 0; + } + LIST_INIT(&rtsc); + LIST_INIT(&lssc); + /* if link_share is not specified, use bandwidth */ + if (opts->lssc_m2 == 0) + opts->lssc_m2 = pa->bandwidth; + if ((opts->rtsc_m1 > 0 && opts->rtsc_m2 == 0) || + (opts->lssc_m1 > 0 && opts->lssc_m2 == 0) || + (opts->ulsc_m1 > 0 && opts->ulsc_m2 == 0)) { + warnx("m2 is zero for %s", pa->qname); + return -1; + } + if ((opts->rtsc_m1 < opts->rtsc_m2 && opts->rtsc_m1 != 0) || + (opts->lssc_m1 < opts->lssc_m2 && opts->lssc_m1 != 0) || + (opts->ulsc_m1 < opts->ulsc_m2 && opts->ulsc_m1 != 0)) { + warnx("m1 must be zero for convex curve: %s", pa->qname); + return -1; + } + /* + * admission control: + * for the real-time service curve, the sum of the service curves + * should not exceed 80% of the interface bandwidth. 20% is reserved + * not to over-commit the actual interface bandwidth. + * for the linkshare service curve, the sum of the child service + * curve should not exceed the parent service curve. + * for the upper-limit service curve, the assigned bandwidth should + * be smaller than the interface bandwidth, and the upper-limit should + * be larger than the real-time service curve when both are defined. + */ + parent = qname_to_npfaltq(pa->parent, pa->ifname); + if (parent == NULL) + errx(1, "parent %s not found for %s", pa->parent, pa->qname); + TAILQ_FOREACH(altq, &altqs, entries) { + if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0) + continue; + if (altq->qname[0] == 0) /* this is for interface */ + continue; + /* if the class has a real-time service curve, add it. */ + if (opts->rtsc_m2 != 0 && altq->pq_u.hfsc_opts.rtsc_m2 != 0) { + sc.m1 = altq->pq_u.hfsc_opts.rtsc_m1; + sc.d = altq->pq_u.hfsc_opts.rtsc_d; + sc.m2 = altq->pq_u.hfsc_opts.rtsc_m2; + gsc_add_sc(&rtsc, &sc); + } + if (strncmp(altq->parent, pa->parent, NPF_QNAME_SIZE) != 0) + continue; + /* if the class has a linkshare service curve, add it. */ + if (opts->lssc_m2 != 0 && altq->pq_u.hfsc_opts.lssc_m2 != 0) { + sc.m1 = altq->pq_u.hfsc_opts.lssc_m1; + sc.d = altq->pq_u.hfsc_opts.lssc_d; + sc.m2 = altq->pq_u.hfsc_opts.lssc_m2; + gsc_add_sc(&lssc, &sc); + } + } + /* check the real-time service curve. reserve 20% of interface bw */ + if (opts->rtsc_m2 != 0) { + /* add this queue to the sum */ + sc.m1 = opts->rtsc_m1; + sc.d = opts->rtsc_d; + sc.m2 = opts->rtsc_m2; + gsc_add_sc(&rtsc, &sc); + /* compare the sum with 80% of the interface */ + sc.m1 = 0; + sc.d = 0; + sc.m2 = pa->ifbandwidth / 100 * 80; + if (!is_gsc_under_sc(&rtsc, &sc)) { + warnx("real-time sc exceeds 80%% of the interface " + "bandwidth (%s)", rate2str((double)sc.m2)); + goto err_ret; + } + } + /* check the linkshare service curve. */ + if (opts->lssc_m2 != 0) { + /* add this queue to the child sum */ + sc.m1 = opts->lssc_m1; + sc.d = opts->lssc_d; + sc.m2 = opts->lssc_m2; + gsc_add_sc(&lssc, &sc); + /* compare the sum of the children with parent's sc */ + sc.m1 = parent->pq_u.hfsc_opts.lssc_m1; + sc.d = parent->pq_u.hfsc_opts.lssc_d; + sc.m2 = parent->pq_u.hfsc_opts.lssc_m2; + if (!is_gsc_under_sc(&lssc, &sc)) { + warnx("linkshare sc exceeds parent's sc"); + goto err_ret; + } + } + /* check the upper-limit service curve. */ + if (opts->ulsc_m2 != 0) { + if (opts->ulsc_m1 > pa->ifbandwidth || + opts->ulsc_m2 > pa->ifbandwidth) { + warnx("upper-limit larger than interface bandwidth"); + goto err_ret; + } + if (opts->rtsc_m2 != 0 && opts->rtsc_m2 > opts->ulsc_m2) { + warnx("upper-limit sc smaller than real-time sc"); + goto err_ret; + } + } + gsc_destroy(&rtsc); + gsc_destroy(&lssc); + return 0; +err_ret: + gsc_destroy(&rtsc); + gsc_destroy(&lssc); + return -1; +} + +#define R2S_BUFS 8 +#define RATESTR_MAX 16 +char * +rate2str(double rate) +{ + char *buf; + static char r2sbuf[R2S_BUFS][RATESTR_MAX]; /* ring buffer */ + static int idx = 0; + int i; + static const char unit[] = " KMG"; + buf = r2sbuf[idx++]; + if (idx == R2S_BUFS) + idx = 0; + for (i = 0; rate >= 1000 && i <= 3; i++) + rate /= 1000; + if ((int)(rate * 100) % 100) + snprintf(buf, RATESTR_MAX, "%.2f%cb", rate, unit[i]); + else + snprintf(buf, RATESTR_MAX, "%d%cb", (int)rate, unit[i]); + return buf; +} + +/* + * admission control using generalized service curve + */ +/* add a new service curve to a generalized service curve */ +static void +gsc_add_sc(struct gen_sc *gsc, struct service_curve *sc) +{ + if (is_sc_null(sc)) + return; + if (sc->d != 0) + gsc_add_seg(gsc, 0.0, 0.0, (double)sc->d, (double)sc->m1); + gsc_add_seg(gsc, (double)sc->d, 0.0, HUGE_VAL, (double)sc->m2); +} + +/* + * check whether all points of a generalized service curve have + * their y-coordinates no larger than a given two-piece linear + * service curve. + */ +static int +is_gsc_under_sc(struct gen_sc *gsc, struct service_curve *sc) +{ + struct segment *s, *last, *end; + double y; + if (is_sc_null(sc)) { + if (LIST_EMPTY(gsc)) + return 1; + LIST_FOREACH(s, gsc, _next) { + if (s->m != 0) + return 0; + } + return 1; + } + /* + * gsc has a dummy entry at the end with x = HUGE_VAL. + * loop through up to this dummy entry. + */ + end = gsc_getentry(gsc, HUGE_VAL); + if (end == NULL) + return 1; + last = NULL; + for (s = LIST_FIRST(gsc); s != end; s = LIST_NEXT(s, _next)) { + if (s->y > sc_x2y(sc, s->x)) + return 0; + last = s; + } + /* last now holds the real last segment */ + if (last == NULL) + return 1; + if (last->m > sc->m2) + return 0; + if (last->x < sc->d && last->m > sc->m1) { + y = last->y + (sc->d - last->x) * last->m; + if (y > sc_x2y(sc, sc->d)) + return 0; + } + return 1; +} + +static void +gsc_destroy(struct gen_sc *gsc) +{ + struct segment *s; + while ((s = LIST_FIRST(gsc)) != NULL) { + LIST_REMOVE(s, _next); + free(s); + } +} + +/* + * return a segment entry starting at x. + * if gsc has no entry starting at x, a new entry is created at x. + */ +static struct segment * +gsc_getentry(struct gen_sc *gsc, double x) +{ + struct segment *new, *prev, *s; + prev = NULL; + LIST_FOREACH(s, gsc, _next) { + if (s->x == x) + return s; /* matching entry found */ + else if (s->x < x) + prev = s; + else + break; + } + /* we have to create a new entry */ + if ((new = calloc(1, sizeof(*new))) == NULL) + return NULL; + new->x = x; + if (x == HUGE_VAL || s == NULL) + new->d = 0; + else if (s->x == HUGE_VAL) + new->d = HUGE_VAL; + else + new->d = s->x - x; + if (prev == NULL) { + /* insert the new entry at the head of the list */ + new->y = 0; + new->m = 0; + LIST_INSERT_HEAD(gsc, new, _next); + } else { + /* + * the start point intersects with the segment pointed by + * prev. divide prev into 2 segments + */ + if (x == HUGE_VAL) { + prev->d = HUGE_VAL; + if (prev->m == 0) + new->y = prev->y; + else + new->y = HUGE_VAL; + } else { + prev->d = x - prev->x; + new->y = prev->d * prev->m + prev->y; + } + new->m = prev->m; + LIST_INSERT_AFTER(prev, new, _next); + } + return new; +} + +/* add a segment to a generalized service curve */ +static int +gsc_add_seg(struct gen_sc *gsc, double x, double y, double d, double m) +{ + struct segment *start, *end, *s; + double x2; + if (d == HUGE_VAL) + x2 = HUGE_VAL; + else + x2 = x + d; + start = gsc_getentry(gsc, x); + end = gsc_getentry(gsc, x2); + if (start == NULL || end == NULL) + return -1; + for (s = start; s != end; s = LIST_NEXT(s, _next)) { + s->m += m; + s->y += y + (s->x - x) * m; + } + end = gsc_getentry(gsc, HUGE_VAL); + for (; s != end; s = LIST_NEXT(s, _next)) { + s->y += m * d; + } + return 0; +} + +/* get y-projection of a service curve */ +static double +sc_x2y(struct service_curve *sc, double x) +{ + double y; + if (x <= (double)sc->d) + /* y belongs to the 1st segment */ + y = x * (double)sc->m1; + else + /* y belongs to the 2nd segment */ + y = (double)sc->d * (double)sc->m1 + + (x - (double)sc->d) * (double)sc->m2; + return y; +} + +/* + * check_commit_altq does consistency check for each interface + */ +int +check_commit_altq(void) +{ + struct npf_altq *altq; + int error = 0; + /* call the discipline check for each interface. */ + TAILQ_FOREACH(altq, &altqs, entries) { + if (altq->qname[0] == 0) { + switch (altq->scheduler) { + case ALTQT_CBQ: + error = check_commit_cbq(altq); + break; + case ALTQT_PRIQ: + error = check_commit_priq(altq); + break; + case ALTQT_HFSC: + error = check_commit_hfsc(altq); + break; + default: + break; + } + } + } + return error; +} + +static int +check_commit_cbq(struct npf_altq *pa) +{ + struct npf_altq *altq; + int root_class, default_class; + int error = 0; + /* + * check if cbq has one root queue and one default queue + * for this interface + */ + root_class = default_class = 0; + TAILQ_FOREACH(altq, &altqs, entries) { + if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0) + continue; + if (altq->qname[0] == 0) /* this is for interface */ + continue; + if (altq->pq_u.cbq_opts.flags & CBQCLF_ROOTCLASS) + root_class++; + if (altq->pq_u.cbq_opts.flags & CBQCLF_DEFCLASS) + default_class++; + } + if (root_class != 1) { + warnx("should have one root queue on %s", pa->ifname); + error++; + } + if (default_class != 1) { + warnx("should have one default queue on %s", pa->ifname); + error++; + } + return error; +} + +static int +check_commit_priq(struct npf_altq *pa) +{ + struct npf_altq *altq; + int default_class; + int error = 0; + /* + * check if priq has one default class for this interface + */ + default_class = 0; + TAILQ_FOREACH(altq, &altqs, entries) { + if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0) + continue; + if (altq->qname[0] == 0) /* this is for interface */ + continue; + if (altq->pq_u.priq_opts.flags & PRCF_DEFAULTCLASS) + default_class++; + } + if (default_class != 1) { + warnx("should have one default queue on %s", pa->ifname); + error++; + } + return error; +} + +static int +check_commit_hfsc(struct npf_altq *pa) +{ + struct npf_altq *altq, *def = NULL; + int default_class; + int error = 0; + /* check if hfsc has one default queue for this interface */ + default_class = 0; + TAILQ_FOREACH(altq, &altqs, entries) { + if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0) + continue; + if (altq->qname[0] == 0) /* this is for interface */ + continue; + if (altq->parent[0] == 0) /* dummy root */ + continue; + if (altq->pq_u.hfsc_opts.flags & HFCF_DEFAULTCLASS) { + default_class++; + def = altq; + } + } + if (default_class != 1) { + warnx("should have one default queue on %s", pa->ifname); + return 1; + } + /* make sure the default queue is a leaf */ + TAILQ_FOREACH(altq, &altqs, entries) { + if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0) + continue; + if (altq->qname[0] == 0) /* this is for interface */ + continue; + if (strncmp(altq->parent, def->qname, NPF_QNAME_SIZE) == 0) { + warnx("default queue is not a leaf"); + error++; + } + } + return error; +} From bebe6213551dc54e0d89fb0a740d18f1abc3e110 Mon Sep 17 00:00:00 2001 From: Emmankoko Date: Tue, 4 Feb 2025 10:40:05 +0000 Subject: [PATCH 5/9] define the function that appends a queue to a rule checks: first checks if the queue obeys best practices(having a default queue etc) checks if the referenced queue is defined in the queues then sets the queue to queue nvpair in the rules nvlist. --- distrib/sets/lists/comp/mi | 1 + lib/libnpf/npf.c | 7 +++++++ lib/libnpf/npf.expsym | 1 + lib/libnpf/npf.h | 1 + usr.sbin/npf/npfctl/npf_build.c | 13 ++++++++++++- usr.sbin/npf/npfctl/npfctl.h | 3 ++- usr.sbin/npf/npfctl/npfctl_altq.c | 20 ++++++++++++++++++++ 7 files changed, 44 insertions(+), 2 deletions(-) diff --git a/distrib/sets/lists/comp/mi b/distrib/sets/lists/comp/mi index 55e2df75c1b95..d91e86484b4de 100644 --- a/distrib/sets/lists/comp/mi +++ b/distrib/sets/lists/comp/mi @@ -2762,6 +2762,7 @@ ./usr/include/net/net_stats.h comp-c-include ./usr/include/net/netisr.h comp-obsolete obsolete ./usr/include/net/npf.h comp-c-include +./usr/include/net/npf_altq.h comp-c-include ./usr/include/net/npf_ncode.h comp-obsolete obsolete ./usr/include/net/pfil.h comp-c-include ./usr/include/net/pfkeyv2.h comp-c-include diff --git a/lib/libnpf/npf.c b/lib/libnpf/npf.c index 80116a34799a3..ec77fd971ba84 100644 --- a/lib/libnpf/npf.c +++ b/lib/libnpf/npf.c @@ -735,6 +735,13 @@ npf_rule_setproc(nl_rule_t *rl, const char *name) return nvlist_error(rl->rule_dict); } +int +npf_rule_setqueue(nl_rule_t *rl, const char *qname) +{ + nvlist_add_string(rl->rule_dict, "queue", qname); + return nvlist_error(rl->rule_dict); +} + void * npf_rule_export(nl_rule_t *rl, size_t *length) { diff --git a/lib/libnpf/npf.expsym b/lib/libnpf/npf.expsym index 2fd9d4feff750..4c63ac53dcdaa 100644 --- a/lib/libnpf/npf.expsym +++ b/lib/libnpf/npf.expsym @@ -75,6 +75,7 @@ npf_rule_setinfo npf_rule_setkey npf_rule_setprio npf_rule_setproc +npf_rule_setqueue npf_ruleset_add npf_ruleset_flush npf_ruleset_remkey diff --git a/lib/libnpf/npf.h b/lib/libnpf/npf.h index aab769f09882a..00436593fcdbb 100644 --- a/lib/libnpf/npf.h +++ b/lib/libnpf/npf.h @@ -108,6 +108,7 @@ int npf_rule_setprio(nl_rule_t *, int); int npf_rule_setproc(nl_rule_t *, const char *); int npf_rule_setkey(nl_rule_t *, const void *, size_t); int npf_rule_setinfo(nl_rule_t *, const void *, size_t); +int npf_rule_setqueue(nl_rule_t *, const char *); const char * npf_rule_getname(nl_rule_t *); uint32_t npf_rule_getattr(nl_rule_t *); const char * npf_rule_getinterface(nl_rule_t *); diff --git a/usr.sbin/npf/npfctl/npf_build.c b/usr.sbin/npf/npfctl/npf_build.c index 2a171fed3dd75..f72a99ffe7841 100644 --- a/usr.sbin/npf/npfctl/npf_build.c +++ b/usr.sbin/npf/npfctl/npf_build.c @@ -702,7 +702,7 @@ npfctl_build_group_end(void) void npfctl_build_rule(uint32_t attr, const char *ifname, sa_family_t family, const npfvar_t *popts, const filt_opts_t *fopts, - const char *pcap_filter, const char *rproc) + const char *pcap_filter, const char *rproc, struct node_qassign queue) { nl_rule_t *rl; @@ -719,6 +719,17 @@ npfctl_build_rule(uint32_t attr, const char *ifname, sa_family_t family, npf_rule_setproc(rl, rproc); } + /* first ensure a queue is set on rule */ + if (queue.qname != NULL ) { + /* ensure altq config obeys best practices */ + if (check_commit_altq()) + errx(EXIT_FAILURE, "error in altq config"); + /* ensure the referenced queue is defined */ + if (npf_rule_qnames_exists(queue.qname)) + if (npf_rule_setqueue(rl, queue.qname)) + errx(EXIT_FAILURE, "rule queue %s cannot be set", queue.qname); + } + if (npf_conf) { nl_rule_t *cg = current_group[rule_nesting_level]; diff --git a/usr.sbin/npf/npfctl/npfctl.h b/usr.sbin/npf/npfctl/npfctl.h index dfc7b3fe3e1fb..d25556350c98b 100644 --- a/usr.sbin/npf/npfctl/npfctl.h +++ b/usr.sbin/npf/npfctl/npfctl.h @@ -294,7 +294,7 @@ void npfctl_build_group(const char *, int, const char *, bool); void npfctl_build_group_end(void); void npfctl_build_rule(uint32_t, const char *, sa_family_t, const npfvar_t *, const filt_opts_t *, - const char *, const char *); + const char *, const char *, struct node_qassign); void npfctl_build_natseg(int, int, unsigned, const char *, const addr_port_t *, const addr_port_t *, const npfvar_t *, const filt_opts_t *, unsigned); @@ -327,6 +327,7 @@ uint32_t qname_to_qid(const char *); struct npf_altq *npfaltq_lookup(const char *ifname); char *rate2str(double); int check_commit_altq(void); +int npf_rule_qnames_exists(const char *); /* * For the systems which do not define TH_ECE and TW_CRW. diff --git a/usr.sbin/npf/npfctl/npfctl_altq.c b/usr.sbin/npf/npfctl/npfctl_altq.c index f9ea1fc827120..6aa41e85b8867 100644 --- a/usr.sbin/npf/npfctl/npfctl_altq.c +++ b/usr.sbin/npf/npfctl/npfctl_altq.c @@ -1289,3 +1289,23 @@ check_commit_hfsc(struct npf_altq *pa) } return error; } + +/* this checks for undefined queues appended on a rule */ +int +npf_rule_qnames_exists(const char *qname) +{ + int found = 0; + struct npf_altq* a; + TAILQ_FOREACH(a, &altqs, entries) { + if (a->qname[0] != 0){ + if (strcmp(a->qname, qname) == 0){ + found = 1; + break; + } + } + } + if (!found) + yyerror("no qname named '%s' defined\n", qname); + + return found; +} From 9772953bbf7e521848ca8f8794f63f1adb337088 Mon Sep 17 00:00:00 2001 From: Emmankoko Date: Tue, 4 Feb 2025 14:22:39 +0000 Subject: [PATCH 6/9] functions to initialize queueing, and add queues in the kernel --- sys/altq/altq_cbq.c | 6 +- sys/altq/altq_hfsc.c | 6 +- sys/altq/altq_priq.c | 4 +- sys/altq/altq_subr.c | 14 +-- sys/altq/altq_var.h | 54 ++++----- sys/net/npf/files.npf | 3 + sys/net/npf/npf.h | 2 +- sys/net/npf/npf_altq.c | 182 +++++++++++++++++++++++++++++++ sys/net/npf/npf_altq.h | 23 +++- sys/net/npf/npf_os.c | 18 ++- sys/rump/net/lib/libnpf/Makefile | 2 +- usr.sbin/npf/npftest/Makefile | 2 +- 12 files changed, 266 insertions(+), 50 deletions(-) create mode 100644 sys/net/npf/npf_altq.c diff --git a/sys/altq/altq_cbq.c b/sys/altq/altq_cbq.c index 9b508a456e3db..550be43b2f9e8 100644 --- a/sys/altq/altq_cbq.c +++ b/sys/altq/altq_cbq.c @@ -60,7 +60,7 @@ __KERNEL_RCSID(0, "$NetBSD: altq_cbq.c,v 1.42 2025/01/08 13:00:04 joe Exp $"); #include #if NNPF > 0 -#include +#include #endif #include #include @@ -244,7 +244,7 @@ get_class_stats(class_stats_t *statsp, struct rm_class *cl) #if NNPF > 0 int -cbq_pfattach(struct npf_altq *a) +cbq_npfattach(struct npf_altq *a) { struct ifnet *ifp; int s, error; @@ -313,7 +313,7 @@ cbq_add_queue(struct npf_altq *a) struct rm_class *borrow, *parent; cbq_state_t *cbqp; struct rm_class *cl; - struct cbq_opts *opts; + struct npf_cbq_opts *opts; int i, error; if ((cbqp = a->altq_disc) == NULL) diff --git a/sys/altq/altq_hfsc.c b/sys/altq/altq_hfsc.c index dde154d422891..9f9dfe5146db6 100644 --- a/sys/altq/altq_hfsc.c +++ b/sys/altq/altq_hfsc.c @@ -71,7 +71,7 @@ __KERNEL_RCSID(0, "$NetBSD: altq_hfsc.c,v 1.31 2025/01/08 13:00:04 joe Exp $"); #include #if NNPF > 0 -#include +#include #endif #include #include @@ -175,7 +175,7 @@ static struct hfsc_if *hif_list = NULL; #if NNPF > 0 int -hfsc_pfattach(struct npf_altq *a) +hfsc_npfattach(struct npf_altq *a) { struct ifnet *ifp; int s, error; @@ -242,7 +242,7 @@ hfsc_add_queue(struct npf_altq *a) { struct hfsc_if *hif; struct hfsc_class *cl, *parent; - struct hfsc_opts *opts; + struct npf_hfsc_opts *opts; struct service_curve rtsc, lssc, ulsc; if ((hif = a->altq_disc) == NULL) diff --git a/sys/altq/altq_priq.c b/sys/altq/altq_priq.c index f3d2f80e5aa09..c25460fe9e772 100644 --- a/sys/altq/altq_priq.c +++ b/sys/altq/altq_priq.c @@ -57,7 +57,7 @@ __KERNEL_RCSID(0, "$NetBSD: altq_priq.c,v 1.29 2025/01/08 13:00:04 joe Exp $"); #include #if NNPF > 0 -#include +#include #endif #include #include @@ -107,7 +107,7 @@ static struct priq_if *pif_list = NULL; #if NNPF > 0 int -priq_pfattach(struct npf_altq *a) +priq_npfattach(struct npf_altq *a) { struct ifnet *ifp; int s, error; diff --git a/sys/altq/altq_subr.c b/sys/altq/altq_subr.c index cba90252455dc..971c3fc0074bd 100644 --- a/sys/altq/altq_subr.c +++ b/sys/altq/altq_subr.c @@ -63,7 +63,7 @@ __KERNEL_RCSID(0, "$NetBSD: altq_subr.c,v 1.34 2025/01/08 13:00:04 joe Exp $"); #include #if NNPF > 0 -#include +#include #endif #include #ifdef ALTQ3_COMPAT @@ -400,13 +400,12 @@ tbr_get(struct ifaltq *ifq, struct tb_profile *profile) return 0; } -#if NNPF > 0 /* * attach a discipline to the interface. if one already exists, it is * overridden. */ int -altq_pfattach(struct npf_altq *a) +altq_npfattach(struct npf_altq *a) { int error = 0; @@ -415,17 +414,17 @@ altq_pfattach(struct npf_altq *a) break; #ifdef ALTQ_CBQ case ALTQT_CBQ: - error = cbq_pfattach(a); + error = cbq_npfattach(a); break; #endif #ifdef ALTQ_PRIQ case ALTQT_PRIQ: - error = priq_pfattach(a); + error = priq_npfattach(a); break; #endif #ifdef ALTQ_HFSC case ALTQT_HFSC: - error = hfsc_pfattach(a); + error = hfsc_npfattach(a); break; #endif default: @@ -441,7 +440,7 @@ altq_pfattach(struct npf_altq *a) * discipline. */ int -altq_pfdetach(struct npf_altq *a) +altq_npfdetach(struct npf_altq *a) { struct ifnet *ifp; int s, error = 0; @@ -628,7 +627,6 @@ altq_getqstats(struct npf_altq *a, void *ubuf, int *nbytes) return error; } -#endif /* NNPF > 0 */ /* * read and write diffserv field in IPv4 or IPv6 header diff --git a/sys/altq/altq_var.h b/sys/altq/altq_var.h index fbe35ba0fb54e..05f3b0a9fa429 100644 --- a/sys/altq/altq_var.h +++ b/sys/altq/altq_var.h @@ -207,11 +207,13 @@ typedef void (timeout_t)(void *); #define m_pktlen(m) ((m)->m_pkthdr.len) struct ifnet; struct mbuf; -struct pf_altq; +struct npf_altq; #ifdef ALTQ3_CLFIER_COMPAT struct flowinfo; #endif +#include + void *altq_lookup(char *, int); #ifdef ALTQ3_CLFIER_COMPAT int altq_extractflow(struct mbuf *, int, struct flowinfo *, u_int32_t); @@ -227,34 +229,34 @@ void altq_assert(const char *, int, const char *); int tbr_set(struct ifaltq *, struct tb_profile *); int tbr_get(struct ifaltq *, struct tb_profile *); -int altq_pfattach(struct pf_altq *); -int altq_pfdetach(struct pf_altq *); -int altq_add(struct pf_altq *); -int altq_remove(struct pf_altq *); -int altq_add_queue(struct pf_altq *); -int altq_remove_queue(struct pf_altq *); -int altq_getqstats(struct pf_altq *, void *, int *); +int altq_npfattach(struct npf_altq *); +int altq_npfdetach(struct npf_altq *); +int altq_add(struct npf_altq *); +int altq_remove(struct npf_altq *); +int altq_add_queue(struct npf_altq *); +int altq_remove_queue(struct npf_altq *); +int altq_getqstats(struct npf_altq *, void *, int *); -int cbq_pfattach(struct pf_altq *); -int cbq_add_altq(struct pf_altq *); -int cbq_remove_altq(struct pf_altq *); -int cbq_add_queue(struct pf_altq *); -int cbq_remove_queue(struct pf_altq *); -int cbq_getqstats(struct pf_altq *, void *, int *); +int cbq_npfattach(struct npf_altq *); +int cbq_add_altq(struct npf_altq *); +int cbq_remove_altq(struct npf_altq *); +int cbq_add_queue(struct npf_altq *); +int cbq_remove_queue(struct npf_altq *); +int cbq_getqstats(struct npf_altq *, void *, int *); -int priq_pfattach(struct pf_altq *); -int priq_add_altq(struct pf_altq *); -int priq_remove_altq(struct pf_altq *); -int priq_add_queue(struct pf_altq *); -int priq_remove_queue(struct pf_altq *); -int priq_getqstats(struct pf_altq *, void *, int *); +int priq_npfattach(struct npf_altq *); +int priq_add_altq(struct npf_altq *); +int priq_remove_altq(struct npf_altq *); +int priq_add_queue(struct npf_altq *); +int priq_remove_queue(struct npf_altq *); +int priq_getqstats(struct npf_altq *, void *, int *); -int hfsc_pfattach(struct pf_altq *); -int hfsc_add_altq(struct pf_altq *); -int hfsc_remove_altq(struct pf_altq *); -int hfsc_add_queue(struct pf_altq *); -int hfsc_remove_queue(struct pf_altq *); -int hfsc_getqstats(struct pf_altq *, void *, int *); +int hfsc_npfattach(struct npf_altq *); +int hfsc_add_altq(struct npf_altq *); +int hfsc_remove_altq(struct npf_altq *); +int hfsc_add_queue(struct npf_altq *); +int hfsc_remove_queue(struct npf_altq *); +int hfsc_getqstats(struct npf_altq *, void *, int *); #endif /* _KERNEL */ #endif /* _ALTQ_ALTQ_VAR_H_ */ diff --git a/sys/net/npf/files.npf b/sys/net/npf/files.npf index c6346153abc69..26e027097b041 100644 --- a/sys/net/npf/files.npf +++ b/sys/net/npf/files.npf @@ -50,3 +50,6 @@ file net/npf/npf_alg_icmp.c npf # Interfaces file net/npf/if_npflog.c npf + +# queues +file net/npf/npf_altq.c npf diff --git a/sys/net/npf/npf.h b/sys/net/npf/npf.h index 737e11db437e3..ce1d492bc5c90 100644 --- a/sys/net/npf/npf.h +++ b/sys/net/npf/npf.h @@ -330,7 +330,7 @@ struct npfioc_altq { #define IOC_NPF_RULE _IOWR('N', 107, nvlist_ref_t) #define IOC_NPF_CONN_LOOKUP _IOWR('N', 108, nvlist_ref_t) #define IOC_NPF_TABLE_REPLACE _IOWR('N', 109, nvlist_ref_t) -#define IOC_NPF_BEGIN_ALTQ _IO('N', 112) +#define IOC_NPF_BEGIN_ALTQ _IO('N', 112) #define IOC_NPF_ADD_ALTQ _IOWR('N', 110, struct npfioc_altq) #define IOC_NPF_GET_ALTQS _IOWR('N', 111, struct npfioc_altq) diff --git a/sys/net/npf/npf_altq.c b/sys/net/npf/npf_altq.c new file mode 100644 index 0000000000000..3014281f2eb92 --- /dev/null +++ b/sys/net/npf/npf_altq.c @@ -0,0 +1,182 @@ +#ifdef _KERNEL_OPT +#include "opt_altq.h" +#include "opt_inet.h" +#endif + +#include +#include +#include +#include "npf.h" +#include + +#ifdef ALTQ + +TAILQ_HEAD(npf_tags, npf_tagname) npf_tags = TAILQ_HEAD_INITIALIZER(npf_tags), + npf_qids = TAILQ_HEAD_INITIALIZER(npf_qids); + +void tag_unref(struct npf_tags *, u_int16_t); +uint16_t npftagname2tag(struct npf_tags *, char *); + +struct npf_altqqueue *npf_altqs_active; +struct npf_altqqueue *npf_altqs_inactive; +struct npf_altqqueue npf_altqs[2]; + +struct pool npf_altq_pl; +int npf_altq_loaded = 0; + +/* npf interface to start altq */ +void +npf_altq_init(void) +{ + pool_init(&npf_altq_pl, sizeof(struct npf_altq), 0, 0, 0, "npfaltqpl", + &pool_allocator_nointr, IPL_NONE); + TAILQ_INIT(&npf_altqs[0]); + TAILQ_INIT(&npf_altqs[1]); + npf_altqs_active = &npf_altqs[0]; + npf_altqs_inactive = &npf_altqs[1]; +} + +int +npf_begin_altq(void) +{ + struct npf_altq *altq; + int error = 0; + /* Purge the old altq list */ + while ((altq = TAILQ_FIRST(npf_altqs_inactive)) != NULL) { + TAILQ_REMOVE(npf_altqs_inactive, altq, entries); + if (altq->qname[0] == 0) { + /* detach and destroy the discipline */ + if ((error = altq_remove(altq)) != 0) + return error; + } else + npf_qid_unref(altq->qid); + pool_put(&npf_altq_pl, altq); + } + + return 0; +} + +void +npf_qid_unref(u_int32_t qid) +{ + tag_unref(&npf_qids, (u_int16_t)qid); +} + +void +tag_unref(struct npf_tags *head, u_int16_t tag) +{ + struct npf_tagname *p, *next; + if (tag == 0) + return; + for (p = TAILQ_FIRST(head); p != NULL; p = next) { + next = TAILQ_NEXT(p, entries); + if (tag == p->tag) { + if (--p->ref == 0) { + TAILQ_REMOVE(head, p, entries); + free(p, M_TEMP); + } + break; + } + } +} + +int +npf_add_altq(void *data) +{ + struct npfioc_altq *paa = (struct npfioc_altq *)data; + struct npf_altq *altq, *a; + int error; + + altq = pool_get(&npf_altq_pl, PR_NOWAIT); + if (altq == NULL) { + error = ENOMEM; + return error; + } + memcpy(altq, &paa->altq, sizeof(*altq)); + /* + * if this is for a queue, find the discipline and + * copy the necessary fields + */ + if (altq->qname[0] != 0) { + if ((altq->qid = npf_qname2qid(altq->qname)) == 0) { + error = EBUSY; + pool_put(&npf_altq_pl, altq); + return error; + } + TAILQ_FOREACH(a, npf_altqs_inactive, entries) { + if (strncmp(a->ifname, altq->ifname, + IFNAMSIZ) == 0 && a->qname[0] == 0) { + altq->altq_disc = a->altq_disc; + break; + } + } + } + error = altq_add(altq); + if (error) { + pool_put(&npf_altq_pl, altq); + return error; + } + TAILQ_INSERT_TAIL(npf_altqs_inactive, altq, entries); + memcpy(&paa->altq, altq, sizeof(paa->altq)); + + if (!npf_altq_loaded) + npf_altq_loaded = 1; + return 0; +} + +u_int32_t +npf_qname2qid(char *qname) +{ + return ((u_int32_t)npftagname2tag(&npf_qids, qname)); +} + +u_int16_t +npftagname2tag(struct npf_tags *head, char *tagname) +{ + struct npf_tagname *tag, *p = NULL; + u_int16_t new_tagid = 1; + TAILQ_FOREACH(tag, head, entries) + if (strcmp(tagname, tag->name) == 0) { + tag->ref++; + return (tag->tag); + } + /* + * to avoid fragmentation, we do a linear search from the beginning + * and take the first free slot we find. if there is none or the list + * is empty, append a new entry at the end. + */ + /* new entry */ + if (!TAILQ_EMPTY(head)) + for (p = TAILQ_FIRST(head); p != NULL && + p->tag == new_tagid; p = TAILQ_NEXT(p, entries)) + new_tagid = p->tag + 1; + if (new_tagid > TAGID_MAX) + return 0; + /* allocate and fill new struct npf_tagname */ + tag = malloc(sizeof(*tag), + M_TEMP, M_NOWAIT); + if (tag == NULL) + return 0; + memset(tag, 0, sizeof(*tag)); + strlcpy(tag->name, tagname, sizeof(tag->name)); + tag->tag = new_tagid; + tag->ref++; + if (p != NULL) /* insert new entry before p */ + TAILQ_INSERT_BEFORE(p, tag, entries); + else /* either list empty or no free slot in between */ + TAILQ_INSERT_TAIL(head, tag, entries); + return (tag->tag); +} + +int +npf_get_altqs(void *data) +{ + struct npfioc_altq *paa = (struct npfioc_altq *)data; + struct npf_altq *altq; + paa->nq = 0; + TAILQ_FOREACH(altq, npf_altqs_active, entries) + paa->nq++; + return 0 ; +} + +#endif /* ALTQ */ \ No newline at end of file diff --git a/sys/net/npf/npf_altq.h b/sys/net/npf/npf_altq.h index bfdc0970c8456..36f7e2cf1d4f9 100644 --- a/sys/net/npf/npf_altq.h +++ b/sys/net/npf/npf_altq.h @@ -105,11 +105,26 @@ struct npf_altq { u_int32_t qid; /* return value */ }; +struct npf_tag { + uint16_t tag; /* tag id */ +}; + +struct npf_tagname { + TAILQ_ENTRY(npf_tagname) entries; + char name[NPF_TAG_NAME_SIZE]; + uint16_t tag; + int ref; +}; + TAILQ_HEAD(npf_altqqueue, npf_altq); -extern int npf_get_altqs(void *); -extern void npf_altq_init(void); -extern int npf_begin_altq(void); -extern int npf_add_altq(void *); +extern int npf_altq_loaded; + +extern int npf_get_altqs(void *); +extern void npf_altq_init(void); +extern int npf_begin_altq(void); +extern int npf_add_altq(void *); +void npf_qid_unref(uint32_t); +extern uint32_t npf_qname2qid(char *); #endif /* NPF_ALTQ_H_ */ diff --git a/sys/net/npf/npf_os.c b/sys/net/npf/npf_os.c index a74f256b983fd..34e8d2c1916e1 100644 --- a/sys/net/npf/npf_os.c +++ b/sys/net/npf/npf_os.c @@ -31,6 +31,11 @@ * NPF main: dynamic load/initialisation and unload routines. */ +#ifdef _KERNEL_OPT +#include "opt_altq.h" +#include "opt_inet.h" +#endif + #ifdef _KERNEL #include __KERNEL_RCSID(0, "$NetBSD: npf_os.c,v 1.21 2021/01/27 17:39:13 christos Exp $"); @@ -274,7 +279,18 @@ npf_dev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l) return npfctl_table(npf, data); case IOC_NPF_STATS: return npf_stats_export(npf, data); - +#ifdef ALTQ + case IOC_NPF_ADD_ALTQ: + return npf_add_altq(data); + case IOC_NPF_GET_ALTQS: + return npf_get_altqs(data); + case IOC_NPF_BEGIN_ALTQ: + /* initialize all queueing components on the first attempt */ + if (!npf_altq_loaded) { + npf_altq_init(); + } + return npf_begin_altq(); +#endif /* ALTQ */ case IOC_NPF_LOAD: case IOC_NPF_SAVE: case IOC_NPF_RULE: diff --git a/sys/rump/net/lib/libnpf/Makefile b/sys/rump/net/lib/libnpf/Makefile index dfea0c1f9390b..68282e8b5eb84 100644 --- a/sys/rump/net/lib/libnpf/Makefile +++ b/sys/rump/net/lib/libnpf/Makefile @@ -17,7 +17,7 @@ SRCS= npf.c npf_alg.c npf_conf.c npf_ctl.c npf_handler.c SRCS+= npf_bpf.c npf_if.c npf_inet.c npf_mbuf.c npf_nat.c SRCS+= npf_params.c npf_ruleset.c npf_rproc.c SRCS+= npf_conn.c npf_conndb.c npf_connkey.c npf_portmap.c -SRCS+= npf_state.c npf_state_tcp.c npf_tableset.c +SRCS+= npf_state.c npf_state_tcp.c npf_tableset.c npf_altq.c SRCS+= lpm.c npf_sendpkt.c npf_worker.c npf_os.c npf_ifaddr.c SRCS+= nvlist.c nvpair.c nv_kern_netbsd.c dnvlist.c diff --git a/usr.sbin/npf/npftest/Makefile b/usr.sbin/npf/npftest/Makefile index 83cfc6f7d8b9c..978c5e78aeaac 100644 --- a/usr.sbin/npf/npftest/Makefile +++ b/usr.sbin/npf/npftest/Makefile @@ -18,7 +18,7 @@ DPADD+= ${LIBNPFTEST}/libnpftest.a LDADD+= -L${LIBNPFTEST} -lnpftest LDADD+= -lrump -lrumpvfs_nofifofs -lrumpvfs -lrumpuser -LDADD+= -lrumpnet -lrumpnet_net -lrumpdev_bpf +LDADD+= -lrumpnet -lrumpnet_net -lrumpdev_bpf -lrumpnet_altq .if ${RUMP_SANITIZE:Uno} != "no" LDADD+= -fsanitize=${RUMP_SANITIZE} From 0f42f8b87205dee2d78be880273c8304bdf6cae7 Mon Sep 17 00:00:00 2001 From: Emmankoko Date: Thu, 6 Feb 2025 16:04:12 +0000 Subject: [PATCH 7/9] Starting ALTQ in NPF starting ALTQ is fully handed over to the kernel. when you start NPF, it checks if altq is loaded and not already running. if ALTQ is already running, it ignores it. --- sys/net/npf/npf_altq.c | 41 +++++++++++++++++++++++++++++++++++++++++ sys/net/npf/npf_altq.h | 3 +++ sys/net/npf/npf_os.c | 5 +++++ 3 files changed, 49 insertions(+) diff --git a/sys/net/npf/npf_altq.c b/sys/net/npf/npf_altq.c index 3014281f2eb92..ed6fbcf90aad8 100644 --- a/sys/net/npf/npf_altq.c +++ b/sys/net/npf/npf_altq.c @@ -23,6 +23,7 @@ struct npf_altqqueue npf_altqs[2]; struct pool npf_altq_pl; int npf_altq_loaded = 0; +bool npf_altq_running = false; /* npf interface to start altq */ void @@ -179,4 +180,44 @@ npf_get_altqs(void *data) return 0 ; } +int +npf_altq_start(void) +{ + int error; + struct npf_altq *altq; + /* enable all altq interfaces on active list */ + TAILQ_FOREACH(altq, npf_altqs_active, entries) { + if (altq->qname[0] == 0) { + error = npf_enable_altq(altq); + if (error != 0) + break; + } + } + + return error; +} + +int +npf_enable_altq(struct npf_altq *altq) +{ + struct ifnet *ifp; + struct tb_profile tb; + int s, error = 0; + if ((ifp = ifunit(altq->ifname)) == NULL) + return EINVAL; + if (ifp->if_snd.altq_type != ALTQT_NONE) + error = altq_enable(&ifp->if_snd); + /* set tokenbucket regulator */ + if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) { + tb.rate = altq->ifbandwidth; + tb.depth = altq->tbrsize; + s = splnet(); + error = tbr_set(&ifp->if_snd, &tb); + splx(s); + } + if (error == 0) + npf_altq_running = true; + return error; +} + #endif /* ALTQ */ \ No newline at end of file diff --git a/sys/net/npf/npf_altq.h b/sys/net/npf/npf_altq.h index 36f7e2cf1d4f9..0a9467bc07929 100644 --- a/sys/net/npf/npf_altq.h +++ b/sys/net/npf/npf_altq.h @@ -119,6 +119,7 @@ struct npf_tagname { TAILQ_HEAD(npf_altqqueue, npf_altq); extern int npf_altq_loaded; +extern bool npf_altq_running; extern int npf_get_altqs(void *); extern void npf_altq_init(void); @@ -126,5 +127,7 @@ extern int npf_begin_altq(void); extern int npf_add_altq(void *); void npf_qid_unref(uint32_t); extern uint32_t npf_qname2qid(char *); +int npf_altq_start(void); +int npf_enable_altq(struct npf_altq *); #endif /* NPF_ALTQ_H_ */ diff --git a/sys/net/npf/npf_os.c b/sys/net/npf/npf_os.c index 34e8d2c1916e1..68d734e9acd6b 100644 --- a/sys/net/npf/npf_os.c +++ b/sys/net/npf/npf_os.c @@ -248,7 +248,12 @@ npfctl_switch(void *data) if (onoff) { /* Enable: add pfil hooks. */ error = npf_pfil_register(false); +#ifdef ALTQ + if (!npf_altq_running && npf_altq_loaded) + error = npf_altq_start(); +#endif /* ALTQ */ } else { + /* Disable: remove pfil hooks. */ npf_pfil_unregister(false); error = 0; From 26db1845d14e3dcc25ca051db066eba4ca9970a5 Mon Sep 17 00:00:00 2001 From: Emmankoko Date: Thu, 6 Feb 2025 21:46:08 +0000 Subject: [PATCH 8/9] disable altq in npf altq is disabled when you stop npf. it is handled internally in kerneland checks if altq is running. in npf, no filtering no queueing so why should queueing be active whiles filtering is disabled? --- sys/net/npf/npf_altq.c | 43 ++++++++++++++++++++++++++++++++++++++++++ sys/net/npf/npf_altq.h | 3 +++ sys/net/npf/npf_os.c | 5 ++++- 3 files changed, 50 insertions(+), 1 deletion(-) diff --git a/sys/net/npf/npf_altq.c b/sys/net/npf/npf_altq.c index ed6fbcf90aad8..c4aa996bf84df 100644 --- a/sys/net/npf/npf_altq.c +++ b/sys/net/npf/npf_altq.c @@ -220,4 +220,47 @@ npf_enable_altq(struct npf_altq *altq) return error; } +int +npf_stop_altq(void) +{ + struct npf_altq *altq; + int error; + /* disable all altq interfaces on active list */ + TAILQ_FOREACH(altq, npf_altqs_active, entries) { + if (altq->qname[0] == 0) { + error = npf_disable_altq(altq); + if (error != 0) + break; + } + } + return error; +} + +int +npf_disable_altq(struct npf_altq *altq) +{ + struct ifnet *ifp; + struct tb_profile tb; + int s, error; + if ((ifp = ifunit(altq->ifname)) == NULL) + return EINVAL; + /* + * when the discipline is no longer referenced, it was overridden + * by a new one. if so, just return. + */ + if (altq->altq_disc != ifp->if_snd.altq_disc) + return 0; + error = altq_disable(&ifp->if_snd); + if (error == 0) { + /* clear tokenbucket regulator */ + tb.rate = 0; + s = splnet(); + error = tbr_set(&ifp->if_snd, &tb); + splx(s); + } + if (error == 0) + npf_altq_running = 0; + return error; +} + #endif /* ALTQ */ \ No newline at end of file diff --git a/sys/net/npf/npf_altq.h b/sys/net/npf/npf_altq.h index 0a9467bc07929..73512776d7280 100644 --- a/sys/net/npf/npf_altq.h +++ b/sys/net/npf/npf_altq.h @@ -129,5 +129,8 @@ void npf_qid_unref(uint32_t); extern uint32_t npf_qname2qid(char *); int npf_altq_start(void); int npf_enable_altq(struct npf_altq *); +int npf_disable_altq(struct npf_altq *); +int npf_stop_altq(void); + #endif /* NPF_ALTQ_H_ */ diff --git a/sys/net/npf/npf_os.c b/sys/net/npf/npf_os.c index 68d734e9acd6b..61c73db14f660 100644 --- a/sys/net/npf/npf_os.c +++ b/sys/net/npf/npf_os.c @@ -253,7 +253,10 @@ npfctl_switch(void *data) error = npf_altq_start(); #endif /* ALTQ */ } else { - +#ifdef ALTQ + if (npf_altq_running) + error = npf_stop_altq(); +#endif /* ALTQ */ /* Disable: remove pfil hooks. */ npf_pfil_unregister(false); error = 0; From 66d164a1fc4e06aee86b722425914f6db34a8708 Mon Sep 17 00:00:00 2001 From: Emmankoko Date: Fri, 7 Feb 2025 10:04:00 +0000 Subject: [PATCH 9/9] refec/queue management --- usr.sbin/npf/npfctl/npfctl_altq.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/usr.sbin/npf/npfctl/npfctl_altq.c b/usr.sbin/npf/npfctl/npfctl_altq.c index 6aa41e85b8867..da2ae0aec994d 100644 --- a/usr.sbin/npf/npfctl/npfctl_altq.c +++ b/usr.sbin/npf/npfctl/npfctl_altq.c @@ -166,7 +166,7 @@ npfctl_eval_bw(struct node_queue_bw *bw, char *bw_spec) /* create root queue for cbq or hfsc */ static int npf_add_root_queue(struct npf_altq pa, const char * ifname, - char qname[], struct node_queue_opt *opts) + char *qname, struct node_queue_opt *opts) { struct node_queue_bw bw; struct npf_altq pb; @@ -174,14 +174,12 @@ npf_add_root_queue(struct npf_altq pa, const char * ifname, /* * we cannot use sizeof(qname) directly here as it will give sizeof(char*) - * so the copyably bytes is manually hack the using sizeof(char) * qname max size + * so use the copyably bytes diectly */ memset(&pb, 0, sizeof(pb)); - if (strlcpy(qname, "root_", (sizeof(char) * NPF_QNAME_SIZE)) >= - (sizeof(char) * NPF_QNAME_SIZE)) + if (strlcpy(qname, "root_", NPF_QNAME_SIZE) >= NPF_QNAME_SIZE) errx(EXIT_FAILURE, "add_root: strlcpy"); - if (strlcat(qname, ifname, (sizeof(char) * NPF_QNAME_SIZE)) >= - (sizeof(char) * NPF_QNAME_SIZE)) + if (strlcat(qname, ifname, NPF_QNAME_SIZE) >= NPF_QNAME_SIZE) errx(EXIT_FAILURE, "add_root: strlcat"); if (strlcpy(pb.qname, qname, sizeof(pb.qname)) >= sizeof(pb.qname)) @@ -207,7 +205,7 @@ npf_add_root_queue(struct npf_altq pa, const char * ifname, */ static void altq_append_queues(struct npf_altq pa, const char *ifname, - char qname[], struct node_queue *queue) + char *qname, struct node_queue *queue) { struct node_queue *n; n = calloc(1, sizeof(*n)); @@ -319,7 +317,8 @@ expand_altq(struct npf_altq *a, const char *ifname, sizeof(pa.ifname)) >= sizeof(pa.ifname)) errx(1, "expand_altq: strlcpy"); if (ifdisc_lookup(&pa)) { - yyerror("only one scheduler per interface.\n altq already defined on %s", pa.ifname); + yyerror("only one scheduler per interface." + "\naltq already defined on %s", pa.ifname); errs++; } else { if (eval_npfaltq(&pa, &bwspec, opts))