From 2f69dbcfdd56931d5205725c0ec7640fdabd913d Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Mon, 22 Sep 2025 10:48:07 +0000 Subject: [PATCH 01/65] Initial benchmark version --- .gitignore | 3 + spec/performance/bench.sh | 177 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 180 insertions(+) create mode 100755 spec/performance/bench.sh diff --git a/.gitignore b/.gitignore index 2df55b9e3f..8cd05104c3 100644 --- a/.gitignore +++ b/.gitignore @@ -76,6 +76,9 @@ react_on_rails/spec/dummy/**/*.res.js react_on_rails_pro/spec/dummy/.bsb.lock react_on_rails_pro/spec/dummy/**/*.res.js +# Performance test results +/bench_results + # Generated by ROR FS-based Registry generated diff --git a/spec/performance/bench.sh b/spec/performance/bench.sh new file mode 100755 index 0000000000..a0f6f1de40 --- /dev/null +++ b/spec/performance/bench.sh @@ -0,0 +1,177 @@ +#!/usr/bin/env bash +set -euo pipefail +#set -x # Uncomment for debugging commands + +# Benchmark parameters +TARGET="http://${BASE_URL:-localhost:3001}/${ROUTE:-server_side_hello_world_hooks}" +# requests per second; if "max" will get maximum number of queries instead of a fixed rate +RATE=${RATE:-50} +# virtual users for k6 +VUS=${VUS:-100} +DURATION_SEC=${DURATION_SEC:-10} +DURATION="${DURATION_SEC}s" +# Tools to run (comma-separated) +TOOLS=${TOOLS:-fortio,vegeta,k6} + +# Validate input parameters +if ! { [ "$RATE" = "max" ] || { [[ "$RATE" =~ ^[0-9]+(\.[0-9]+)?$ ]] && (( $(bc -l <<< "$RATE > 0") )); }; }; then + echo "Error: RATE must be 'max' or a positive number (got: '$RATE')" >&2 + exit 1 +fi +if ! { [[ "$VUS" =~ ^[0-9]+$ ]] && [ "$VUS" -gt 0 ]; }; then + echo "Error: VUS must be a positive integer (got: '$VUS')" >&2 + exit 1 +fi +if ! { [[ "$DURATION_SEC" =~ ^[0-9]+(\.[0-9]+)?$ ]] && (( $(bc -l <<< "$DURATION_SEC > 0") )); }; then + echo "Error: DURATION_SEC must be a positive number (got: '$DURATION_SEC')" >&2 + exit 1 +fi + +OUTDIR="bench_results" + +# Precompute checks for each tool +RUN_FORTIO=0 +RUN_VEGETA=0 +RUN_K6=0 +[[ ",$TOOLS," == *",fortio,"* ]] && RUN_FORTIO=1 +[[ ",$TOOLS," == *",vegeta,"* ]] && RUN_VEGETA=1 +[[ ",$TOOLS," == *",k6,"* ]] && RUN_K6=1 + +for cmd in ${TOOLS//,/ } jq column awk tee bc; do + if ! command -v "$cmd" >/dev/null 2>&1; then + echo "Error: required tool '$cmd' is not installed" >&2 + exit 1 + fi +done + +TIMEOUT_SEC=60 +START=$(date +%s) +until curl -fsS "$TARGET" >/dev/null; do + if (( $(date +%s) - START > TIMEOUT_SEC )); then + echo "Error: Target $TARGET not responding within ${TIMEOUT_SEC}s" >&2 + exit 1 + fi + sleep 1 +done + +mkdir -p "$OUTDIR" + +if [ "$RATE" = "max" ]; then + FORTIO_ARGS=(-qps 0) + VEGETA_ARGS=(-rate=infinity) + K6_SCENARIOS="{ + max_rate: { + executor: 'shared-iterations', + vus: $VUS, + iterations: $((VUS * DURATION_SEC * 10)), + maxDuration: '$DURATION' + } + }" +else + FORTIO_ARGS=(-qps "$RATE" -uniform) + VEGETA_ARGS=(-rate="$RATE") + K6_SCENARIOS="{ + constant_rate: { + executor: 'constant-arrival-rate', + rate: $RATE, + timeUnit: '1s', + duration: '$DURATION', + preAllocatedVUs: $VUS, + maxVUs: $((VUS * 10)) + } + }" +fi + +if (( RUN_FORTIO )); then + echo "===> Fortio" + # TODO https://github.com/fortio/fortio/wiki/FAQ#i-want-to-get-the-best-results-what-flags-should-i-pass + fortio load "${FORTIO_ARGS[@]}" -t "$DURATION" -timeout 30s -json "$OUTDIR/fortio.json" "$TARGET" \ + | tee "$OUTDIR/fortio.txt" +fi + +if (( RUN_VEGETA )); then + echo + echo "===> Vegeta" + echo "GET $TARGET" | vegeta attack "${VEGETA_ARGS[@]}" -duration="$DURATION" \ + | tee "$OUTDIR/vegeta.bin" \ + | vegeta report | tee "$OUTDIR/vegeta.txt" + vegeta report -type=json "$OUTDIR/vegeta.bin" > "$OUTDIR/vegeta.json" +fi + +if (( RUN_K6 )); then + echo + echo "===> k6" + cat < "$OUTDIR/k6_test.js" +import http from 'k6/http'; +import { check } from 'k6'; + +export const options = { + scenarios: $K6_SCENARIOS, +}; + +export default function () { + const response = http.get('$TARGET'); + check(response, { + 'status=200': r => r.status === 200, + // you can add more if needed: + // 'status=500': r => r.status === 500, + }); +} +EOF + + k6 run --summary-export="$OUTDIR/k6_summary.json" --summary-trend-stats "min,avg,med,max,p(90),p(99)" "$OUTDIR/k6_test.js" | tee "$OUTDIR/k6.txt" +fi + +echo +echo "===> Parsing results and generating summary" + +echo -e "Tool\tRPS\tp50(ms)\tp90(ms)\tp99(ms)\tStatus" > "$OUTDIR/summary.txt" + +if (( RUN_FORTIO )); then + FORTIO_RPS=$(jq '.ActualQPS' "$OUTDIR/fortio.json" | awk '{printf "%.2f", $1}') + FORTIO_P50=$(jq '.DurationHistogram.Percentiles[] | select(.Percentile==50) | .Value * 1000' "$OUTDIR/fortio.json" | awk '{printf "%.2f", $1}') + FORTIO_P90=$(jq '.DurationHistogram.Percentiles[] | select(.Percentile==90) | .Value * 1000' "$OUTDIR/fortio.json" | awk '{printf "%.2f", $1}') + FORTIO_P99=$(jq '.DurationHistogram.Percentiles[] | select(.Percentile==99) | .Value * 1000' "$OUTDIR/fortio.json" | awk '{printf "%.2f", $1}') + FORTIO_STATUS=$(jq -r '.RetCodes | to_entries | map("\(.key)=\(.value)") | join(",")' "$OUTDIR/fortio.json") + echo -e "Fortio\t$FORTIO_RPS\t$FORTIO_P50\t$FORTIO_P90\t$FORTIO_P99\t$FORTIO_STATUS" >> "$OUTDIR/summary.txt" +fi + +if (( RUN_VEGETA )); then + # .throughput is successful_reqs/total_period, .rate is all_requests/attack_period + VEGETA_RPS=$(jq '.throughput' "$OUTDIR/vegeta.json" | awk '{printf "%.2f", $1}') + VEGETA_P50=$(jq '.latencies["50th"] / 1000000' "$OUTDIR/vegeta.json" | awk '{printf "%.2f", $1}') + VEGETA_P90=$(jq '.latencies["90th"] / 1000000' "$OUTDIR/vegeta.json" | awk '{printf "%.2f", $1}') + VEGETA_P99=$(jq '.latencies["99th"] / 1000000' "$OUTDIR/vegeta.json" | awk '{printf "%.2f", $1}') + VEGETA_STATUS=$(jq -r '.status_codes | to_entries | map("\(.key)=\(.value)") | join(",")' "$OUTDIR/vegeta.json") + echo -e "Vegeta\t$VEGETA_RPS\t$VEGETA_P50\t$VEGETA_P90\t$VEGETA_P99\t$VEGETA_STATUS" >> "$OUTDIR/summary.txt" +fi + +if (( RUN_K6 )); then + K6_RPS=$(jq '.metrics.iterations.rate' "$OUTDIR/k6_summary.json" | awk '{printf "%.2f", $1}') + K6_P50=$(jq '.metrics.http_req_duration.med' "$OUTDIR/k6_summary.json" | awk '{printf "%.2f", $1}') + K6_P90=$(jq '.metrics.http_req_duration["p(90)"]' "$OUTDIR/k6_summary.json" | awk '{printf "%.2f", $1}') + K6_P99=$(jq '.metrics.http_req_duration["p(99)"]' "$OUTDIR/k6_summary.json" | awk '{printf "%.2f", $1}') + # Status: compute successful vs failed requests + K6_REQS_TOTAL=$(jq '.metrics.http_reqs.count' "$OUTDIR/k6_summary.json") + K6_STATUS=$(jq -r ' + .root_group.checks + | to_entries + | map(.key[7:] + "=" + (.value.passes|tostring)) + | join(",") + ' "$OUTDIR/k6_summary.json") + K6_REQS_KNOWN_STATUS=$(jq -r ' + .root_group.checks + | to_entries + | map(.value.passes) + | add + ' "$OUTDIR/k6_summary.json") + K6_REQS_OTHER=$(( K6_REQS_TOTAL - K6_REQS_KNOWN_STATUS )) + if [ "$K6_REQS_OTHER" -gt 0 ]; then + K6_STATUS="$K6_STATUS,other=$K6_REQS_OTHER" + fi + echo -e "k6\t$K6_RPS\t$K6_P50\t$K6_P90\t$K6_P99\t$K6_STATUS" >> "$OUTDIR/summary.txt" +fi + +echo +echo "Summary saved to $OUTDIR/summary.txt" +column -t -s $'\t' "$OUTDIR/summary.txt" From 77c2e96356b6e89248b67698c60602d6c6185ba5 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Thu, 2 Oct 2025 14:27:53 +0000 Subject: [PATCH 02/65] Add production scripts --- react_on_rails/spec/dummy/bin/prod | 29 +++++++++++++++++++++++ react_on_rails/spec/dummy/bin/prod-assets | 9 +++++++ 2 files changed, 38 insertions(+) create mode 100755 react_on_rails/spec/dummy/bin/prod create mode 100755 react_on_rails/spec/dummy/bin/prod-assets diff --git a/react_on_rails/spec/dummy/bin/prod b/react_on_rails/spec/dummy/bin/prod new file mode 100755 index 0000000000..35d0d355ce --- /dev/null +++ b/react_on_rails/spec/dummy/bin/prod @@ -0,0 +1,29 @@ +#!/usr/bin/env bash + +# Run only after ./prod-assets + +# Check if assets are precompiled +MANIFEST="public/webpack/production/manifest.json" + +if [ ! -d "public/assets" ]; then + echo "ERROR: public/assets not found. Run ./bin/prod-assets first" + exit 1 +fi + +if [ ! -f "$MANIFEST" ]; then + echo "ERROR: $MANIFEST not found. Run ./bin/prod-assets first" + exit 1 +fi + +# Simple up-to-date check: warn if source files are newer than manifest.json +if find client config -type f \( -name "*.[jt]s" -o -name "*.[jt]sx" \) -newer "$MANIFEST" 2>/dev/null | grep -q .; then + echo "WARNING: client or config has changes newer than compiled assets" + echo "Consider running ./bin/prod-assets to rebuild" +fi + +if [ -f "yarn.lock" ] && [ "yarn.lock" -nt "$MANIFEST" ]; then + echo "WARNING: yarn.lock is newer than compiled assets" + echo "Consider running ./bin/prod-assets to rebuild" +fi + +NODE_ENV=production RAILS_ENV=production bundle exec rails server -p 3001 diff --git a/react_on_rails/spec/dummy/bin/prod-assets b/react_on_rails/spec/dummy/bin/prod-assets new file mode 100755 index 0000000000..cf493134fa --- /dev/null +++ b/react_on_rails/spec/dummy/bin/prod-assets @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +export NODE_ENV=production +export RAILS_ENV=production +if [ "$CI" = "true" ]; then + bundle exec bootsnap precompile --gemfile app/ lib/ config/ +fi +yarn run build:rescript +bundle exec rails assets:precompile From 0f8b7ab661ec829b12cece60daa9f362e7ff37b2 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Fri, 3 Oct 2025 16:17:25 +0000 Subject: [PATCH 03/65] Initial benchmark workflow --- .github/workflows/benchmark.yml | 359 ++++++++++++++++++++++++++++++++ 1 file changed, 359 insertions(+) create mode 100644 .github/workflows/benchmark.yml diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml new file mode 100644 index 0000000000..c4bb4740cb --- /dev/null +++ b/.github/workflows/benchmark.yml @@ -0,0 +1,359 @@ +name: Benchmark Workflow + +on: + # https://github.com/mxschmitt/action-tmate?tab=readme-ov-file#manually-triggered-debug + workflow_dispatch: + inputs: + debug_enabled: + description: 'Enable SSH access (⚠️ Security Risk - read workflow comments)' + required: false + default: false + type: boolean + rate: + description: 'Requests per second (use "max" for maximum throughput)' + required: false + default: '50' + type: string + duration_sec: + description: 'Duration in seconds' + required: false + default: 10 + type: number + vus: + description: 'Virtual users for k6' + required: false + default: 100 + type: number + tools: + description: 'Comma-separated list of tools to run' + required: false + default: 'fortio,vegeta,k6' + type: string + push: + branches: + - master + paths-ignore: + - '**.md' + - 'docs/**' + pull_request: + paths-ignore: + - '**.md' + - 'docs/**' +env: + RUBY_VERSION: "3.3.7" + BUNDLER_VERSION: "2.5.4" + FORTIO_VERSION: "1.73.0" + K6_VERSION: "1.4.2" + VEGETA_VERSION: "12.13.0" + # Benchmark parameters + RATE: ${{ github.event.inputs.rate || '50' }} + DURATION_SEC: ${{ github.event.inputs.duration_sec || '10' }} + VUS: ${{ github.event.inputs.vus || '100' }} + TOOLS: ${{ github.event.inputs.tools || 'fortio,vegeta,k6' }} + +jobs: + benchmark: + runs-on: ubuntu-latest + + steps: + # ============================================ + # STEP 1: CHECKOUT CODE + # ============================================ + - name: Checkout repository + uses: actions/checkout@v4 + + # ============================================ + # STEP 2: OPTIONAL SSH ACCESS + # ============================================ + # NOTE: Interactive confirmation is not possible in GitHub Actions. + # As a secure workaround, SSH access is gated by the workflow_dispatch + # input variable 'debug_enabled' which defaults to false. + # Users must explicitly set this to true to enable SSH. + + - name: SSH Warning + if: ${{ github.event.inputs.debug_enabled == true || github.event.inputs.debug_enabled == 'true' }} + run: | + echo "⚠️ ⚠️ ⚠️ SSH ACCESS ENABLED ⚠️ ⚠️ ⚠️" + echo "" + echo "SECURITY NOTICE:" + echo " - SSH access exposes your GitHub Actions runner" + echo " - Only proceed if you understand and accept the risks" + echo " - Do NOT store secrets or sensitive data on the runner" + echo " - Access is limited to the workflow initiator only" + echo " - The session will remain open until manually terminated" + echo "" + echo "⚠️ ⚠️ ⚠️ ⚠️ ⚠️ ⚠️ ⚠️ ⚠️ ⚠️ ⚠️ ⚠️" + + - name: Setup SSH access (if enabled) + if: ${{ github.event.inputs.debug_enabled == true || github.event.inputs.debug_enabled == 'true' }} + uses: mxschmitt/action-tmate@v3 + with: + detached: true + limit-access-to-actor: true # Only workflow trigger can access + + # ============================================ + # STEP 3: INSTALL BENCHMARKING TOOLS + # ============================================ + + - name: Add tools directory to PATH + run: | + mkdir -p ~/bin + echo "$HOME/bin" >> $GITHUB_PATH + + - name: Cache Fortio binary + id: cache-fortio + uses: actions/cache@v4 + with: + path: ~/bin/fortio + key: fortio-${{ runner.os }}-${{ runner.arch }}-${{ env.FORTIO_VERSION }} + + - name: Install Fortio + if: steps.cache-fortio.outputs.cache-hit != 'true' + run: | + echo "📦 Installing Fortio v${FORTIO_VERSION}" + + # Download and extract fortio binary + wget -q https://github.com/fortio/fortio/releases/download/v${FORTIO_VERSION}/fortio-linux_amd64-${FORTIO_VERSION}.tgz + tar -xzf fortio-linux_amd64-${FORTIO_VERSION}.tgz + + # Store in cache directory + mv usr/bin/fortio ~/bin/ + + - name: Cache Vegeta binary + id: cache-vegeta + uses: actions/cache@v4 + with: + path: ~/bin/vegeta + key: vegeta-${{ runner.os }}-${{ runner.arch }}-${{ env.VEGETA_VERSION }} + + - name: Install Vegeta + if: steps.cache-vegeta.outputs.cache-hit != 'true' + run: | + echo "📦 Installing Vegeta v${VEGETA_VERSION}" + + # Download and extract vegeta binary + wget -q https://github.com/tsenart/vegeta/releases/download/v${VEGETA_VERSION}/vegeta_${VEGETA_VERSION}_linux_amd64.tar.gz + tar -xzf vegeta_${VEGETA_VERSION}_linux_amd64.tar.gz + + # Store in cache directory + mv vegeta ~/bin/ + + - name: Setup k6 + uses: grafana/setup-k6-action@v1 + with: + k6-version: ${{ env.K6_VERSION }} + + # ============================================ + # STEP 4: START APPLICATION SERVER + # ============================================ + + - name: Setup Ruby + uses: ruby/setup-ruby@v1 + with: + ruby-version: '3.4' + bundler: 2.5.9 + + - name: Get gem home directory + run: echo "GEM_HOME_PATH=$(gem env home)" >> $GITHUB_ENV + + - name: Cache foreman gem + id: cache-foreman + uses: actions/cache@v4 + with: + path: ${{ env.GEM_HOME_PATH }} + key: foreman-gem-${{ runner.os }}-ruby-${{ env.RUBY_VERSION }} + + - name: Install foreman + if: steps.cache-foreman.outputs.cache-hit != 'true' + run: gem install foreman + + - name: Fix dependency for libyaml-dev + run: sudo apt install libyaml-dev -y + + - name: Setup Node + uses: actions/setup-node@v4 + with: + node-version: '22' + cache: yarn + cache-dependency-path: '**/yarn.lock' + + - name: Print system information + run: | + echo "Linux release: "; cat /etc/issue + echo "Current user: "; whoami + echo "Current directory: "; pwd + echo "Ruby version: "; ruby -v + echo "Node version: "; node -v + echo "Yarn version: "; yarn --version + echo "Bundler version: "; bundle --version + + - name: Install Node modules with Yarn for renderer package + run: | + yarn install --no-progress --no-emoji --frozen-lockfile + npm install --global yalc + + - name: yalc publish for react-on-rails + run: cd packages/react-on-rails && yarn install --no-progress --no-emoji --frozen-lockfile && yalc publish + + - name: yalc add react-on-rails + run: cd spec/dummy && yalc add react-on-rails + + - name: Install Node modules with Yarn for dummy app + run: cd spec/dummy && yarn install --no-progress --no-emoji + + - name: Save dummy app ruby gems to cache + uses: actions/cache@v4 + with: + path: spec/dummy/vendor/bundle + key: dummy-app-gem-cache-${{ hashFiles('spec/dummy/Gemfile.lock') }} + + - name: Install Ruby Gems for dummy app + run: | + cd spec/dummy + bundle lock --add-platform 'x86_64-linux' + if ! bundle check --path=vendor/bundle; then + bundle _2.5.9_ install --path=vendor/bundle --jobs=4 --retry=3 + fi + + - name: generate file system-based packs + run: cd spec/dummy && RAILS_ENV="production" bundle exec rake react_on_rails:generate_packs + + - name: Prepare production assets + run: | + set -e # Exit on any error + echo "🔨 Building production assets..." + cd spec/dummy + + if ! bin/prod-assets; then + echo "❌ ERROR: Failed to build production assets" + exit 1 + fi + + echo "✅ Production assets built successfully" + + - name: Start production server + run: | + set -e # Exit on any error + echo "🚀 Starting production server..." + cd spec/dummy + + # Start server in background + bin/prod & + echo "Server started in background" + + # Wait for server to be ready (max 30 seconds) + echo "⏳ Waiting for server to be ready..." + for i in {1..30}; do + if curl -fsS http://localhost:3001 > /dev/null; then + echo "✅ Server is ready and responding" + exit 0 + fi + echo " Attempt $i/30: Server not ready yet..." + sleep 1 + done + + echo "❌ ERROR: Server failed to start within 30 seconds" + exit 1 + + # ============================================ + # STEP 5: RUN BENCHMARKS + # ============================================ + + - name: Execute benchmark suite + timeout-minutes: 20 + run: | + set -e # Exit on any error + echo "🏃 Running benchmark suite..." + echo "Script: spec/performance/bench.sh" + echo "" + echo "Benchmark parameters:" + echo " - RATE: ${RATE}" + echo " - DURATION_SEC: ${DURATION_SEC}" + echo " - VUS: ${VUS}" + echo " - TOOLS: ${TOOLS}" + echo "" + + if ! spec/performance/bench.sh; then + echo "❌ ERROR: Benchmark execution failed" + exit 1 + fi + + echo "✅ Benchmark suite completed successfully" + + - name: Validate benchmark results + run: | + set -e # Exit on any error + echo "🔍 Validating benchmark output files..." + + RESULTS_DIR="bench_results" + REQUIRED_FILES=("summary.txt") + MISSING_FILES=() + + # Check if results directory exists + if [ ! -d "${RESULTS_DIR}" ]; then + echo "❌ ERROR: Benchmark results directory '${RESULTS_DIR}' not found" + exit 1 + fi + + # List all generated files + echo "Generated files:" + ls -lh ${RESULTS_DIR}/ || true + echo "" + + # Check for required files + for file in "${REQUIRED_FILES[@]}"; do + if [ ! -f "${RESULTS_DIR}/${file}" ]; then + MISSING_FILES+=("${file}") + fi + done + + # Report validation results + if [ ${#MISSING_FILES[@]} -eq 0 ]; then + echo "✅ All required benchmark output files present" + echo "📊 Summary preview:" + head -20 ${RESULTS_DIR}/summary.txt || true + else + echo "⚠️ WARNING: Some required files are missing:" + printf ' - %s\n' "${MISSING_FILES[@]}" + echo "Continuing with available results..." + fi + + # ============================================ + # STEP 6: COLLECT BENCHMARK RESULTS + # ============================================ + + - name: Upload benchmark results + uses: actions/upload-artifact@v4 + if: always() # Upload even if benchmark fails + with: + name: benchmark-results-${{ github.run_number }} + path: bench_results/ + retention-days: 30 + if-no-files-found: warn + + - name: Verify artifact upload + if: success() + run: | + echo "✅ Benchmark results uploaded as workflow artifacts" + echo "📦 Artifact name: benchmark-results-${{ github.run_number }}" + echo "🔗 Access artifacts from the Actions tab in GitHub" + + # ============================================ + # WORKFLOW COMPLETION + # ============================================ + + - name: Workflow summary + if: always() + run: | + echo "📋 Benchmark Workflow Summary" + echo "==============================" + echo "Status: ${{ job.status }}" + echo "Run number: ${{ github.run_number }}" + echo "Triggered by: ${{ github.actor }}" + echo "Branch: ${{ github.ref_name }}" + echo "" + if [ "${{ job.status }}" == "success" ]; then + echo "✅ All steps completed successfully" + else + echo "❌ Workflow encountered errors - check logs above" + fi From b0920c5428015ddac5ea472ac18176791de9ac25 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 5 Nov 2025 12:49:50 +0000 Subject: [PATCH 04/65] Add server warm-up to benchmark --- spec/performance/bench.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/spec/performance/bench.sh b/spec/performance/bench.sh index a0f6f1de40..ebafa55080 100755 --- a/spec/performance/bench.sh +++ b/spec/performance/bench.sh @@ -54,6 +54,13 @@ until curl -fsS "$TARGET" >/dev/null; do sleep 1 done +echo "Warming up server with 10 requests..." +for i in {1..10}; do + curl -fsS "$TARGET" >/dev/null || true + sleep 0.5 +done +echo "Warm-up complete" + mkdir -p "$OUTDIR" if [ "$RATE" = "max" ]; then From aadb8039a6e43e4e1246b57c746c24f3c803a336 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 5 Nov 2025 13:13:44 +0000 Subject: [PATCH 05/65] Make request timeout a parameter --- .github/workflows/benchmark.yml | 6 ++++++ spec/performance/bench.sh | 13 +++++++++++-- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index c4bb4740cb..c4590ae4bd 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -19,6 +19,11 @@ on: required: false default: 10 type: number + request_timeout: + description: 'Request timeout (e.g., "60s", "1m", "90s")' + required: false + default: '60s' + type: string vus: description: 'Virtual users for k6' required: false @@ -48,6 +53,7 @@ env: # Benchmark parameters RATE: ${{ github.event.inputs.rate || '50' }} DURATION_SEC: ${{ github.event.inputs.duration_sec || '10' }} + REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout || '60s' }} VUS: ${{ github.event.inputs.vus || '100' }} TOOLS: ${{ github.event.inputs.tools || 'fortio,vegeta,k6' }} diff --git a/spec/performance/bench.sh b/spec/performance/bench.sh index ebafa55080..5227e1080d 100755 --- a/spec/performance/bench.sh +++ b/spec/performance/bench.sh @@ -10,6 +10,8 @@ RATE=${RATE:-50} VUS=${VUS:-100} DURATION_SEC=${DURATION_SEC:-10} DURATION="${DURATION_SEC}s" +# request timeout (duration string like "60s", "1m", "90s") +REQUEST_TIMEOUT=${REQUEST_TIMEOUT:-60s} # Tools to run (comma-separated) TOOLS=${TOOLS:-fortio,vegeta,k6} @@ -26,6 +28,10 @@ if ! { [[ "$DURATION_SEC" =~ ^[0-9]+(\.[0-9]+)?$ ]] && (( $(bc -l <<< "$DURATION echo "Error: DURATION_SEC must be a positive number (got: '$DURATION_SEC')" >&2 exit 1 fi +if ! [[ "$REQUEST_TIMEOUT" =~ ^([0-9]+(\.[0-9]+)?[smh])+$ ]]; then + echo "Error: REQUEST_TIMEOUT must be a duration like '60s', '1m', '1.5m' (got: '$REQUEST_TIMEOUT')" >&2 + exit 1 +fi OUTDIR="bench_results" @@ -92,14 +98,14 @@ fi if (( RUN_FORTIO )); then echo "===> Fortio" # TODO https://github.com/fortio/fortio/wiki/FAQ#i-want-to-get-the-best-results-what-flags-should-i-pass - fortio load "${FORTIO_ARGS[@]}" -t "$DURATION" -timeout 30s -json "$OUTDIR/fortio.json" "$TARGET" \ + fortio load "${FORTIO_ARGS[@]}" -t "$DURATION" -timeout "$REQUEST_TIMEOUT" -json "$OUTDIR/fortio.json" "$TARGET" \ | tee "$OUTDIR/fortio.txt" fi if (( RUN_VEGETA )); then echo echo "===> Vegeta" - echo "GET $TARGET" | vegeta attack "${VEGETA_ARGS[@]}" -duration="$DURATION" \ + echo "GET $TARGET" | vegeta attack "${VEGETA_ARGS[@]}" -duration="$DURATION" -timeout="$REQUEST_TIMEOUT" \ | tee "$OUTDIR/vegeta.bin" \ | vegeta report | tee "$OUTDIR/vegeta.txt" vegeta report -type=json "$OUTDIR/vegeta.bin" > "$OUTDIR/vegeta.json" @@ -114,6 +120,9 @@ import { check } from 'k6'; export const options = { scenarios: $K6_SCENARIOS, + httpReq: { + timeout: '$REQUEST_TIMEOUT', + }, }; export default function () { From ee54b61cf010b40a19f641ced6bf0147e4dbe2ae Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 5 Nov 2025 13:27:16 +0000 Subject: [PATCH 06/65] Update defaults for now --- .github/workflows/benchmark.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index c4590ae4bd..b3be831eef 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -12,12 +12,12 @@ on: rate: description: 'Requests per second (use "max" for maximum throughput)' required: false - default: '50' + default: 'max' type: string duration_sec: description: 'Duration in seconds' required: false - default: 10 + default: 30 type: number request_timeout: description: 'Request timeout (e.g., "60s", "1m", "90s")' @@ -51,8 +51,8 @@ env: K6_VERSION: "1.4.2" VEGETA_VERSION: "12.13.0" # Benchmark parameters - RATE: ${{ github.event.inputs.rate || '50' }} - DURATION_SEC: ${{ github.event.inputs.duration_sec || '10' }} + RATE: ${{ github.event.inputs.rate || 'max' }} + DURATION_SEC: ${{ github.event.inputs.duration_sec || 30 }} REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout || '60s' }} VUS: ${{ github.event.inputs.vus || '100' }} TOOLS: ${{ github.event.inputs.tools || 'fortio,vegeta,k6' }} From a414491cdcee0b0110d2ca377d2bdb1d288b643d Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 5 Nov 2025 13:39:08 +0000 Subject: [PATCH 07/65] Fix knip error --- knip.ts | 3 +++ 1 file changed, 3 insertions(+) diff --git a/knip.ts b/knip.ts index 6cc0239ee3..29d76fd106 100644 --- a/knip.ts +++ b/knip.ts @@ -13,6 +13,9 @@ const config: KnipConfig = { // Pro package binaries used in Pro workflows 'playwright', 'e2e-test', + // Local binaries + 'bin/.*', + 'spec/performance/bench.sh', ], ignore: ['react_on_rails_pro/**', 'react_on_rails/vendor/**'], ignoreDependencies: [ From 3f17910bfce9536b2931d0ec56cd2ba31924271c Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 5 Nov 2025 14:04:18 +0000 Subject: [PATCH 08/65] Enable clustered mode in production --- .github/workflows/benchmark.yml | 8 +++++ react_on_rails/spec/dummy/config/puma.rb | 41 +++++++++++++++--------- 2 files changed, 33 insertions(+), 16 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index b3be831eef..57bbb76dbb 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -34,6 +34,11 @@ on: required: false default: 'fortio,vegeta,k6' type: string + web_concurrency: + description: 'Number of Puma worker processes' + required: false + default: 2 + type: number push: branches: - master @@ -56,6 +61,7 @@ env: REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout || '60s' }} VUS: ${{ github.event.inputs.vus || '100' }} TOOLS: ${{ github.event.inputs.tools || 'fortio,vegeta,k6' }} + WEB_CONCURRENCY: ${{ github.event.inputs.web_concurrency || '2' }} jobs: benchmark: @@ -275,8 +281,10 @@ jobs: echo "Benchmark parameters:" echo " - RATE: ${RATE}" echo " - DURATION_SEC: ${DURATION_SEC}" + echo " - REQUEST_TIMEOUT: ${REQUEST_TIMEOUT}" echo " - VUS: ${VUS}" echo " - TOOLS: ${TOOLS}" + echo " - WEB_CONCURRENCY: ${WEB_CONCURRENCY}" echo "" if ! spec/performance/bench.sh; then diff --git a/react_on_rails/spec/dummy/config/puma.rb b/react_on_rails/spec/dummy/config/puma.rb index de5feec982..01b93c7d91 100644 --- a/react_on_rails/spec/dummy/config/puma.rb +++ b/react_on_rails/spec/dummy/config/puma.rb @@ -10,10 +10,12 @@ min_threads_count = ENV.fetch("RAILS_MIN_THREADS") { max_threads_count } threads min_threads_count, max_threads_count +rails_env = ENV.fetch("RAILS_ENV", "development") + # Specifies the `worker_timeout` threshold that Puma will use to wait before # terminating a worker in development environments. # -worker_timeout 3600 if ENV.fetch("RAILS_ENV", "development") == "development" +worker_timeout 3600 if rails_env == "development" # Specifies the `port` that Puma will listen on to receive requests; default is 3000. # @@ -21,25 +23,32 @@ # Specifies the `environment` that Puma will run in. # -environment ENV.fetch("RAILS_ENV", "development") +environment rails_env # Specifies the `pidfile` that Puma will use. pidfile ENV.fetch("PIDFILE", "tmp/pids/server.pid") -# Specifies the number of `workers` to boot in clustered mode. -# Workers are forked web server processes. If using threads and workers together -# the concurrency of the application would be max `threads` * `workers`. -# Workers do not work on JRuby or Windows (both of which do not support -# processes). -# -# workers ENV.fetch("WEB_CONCURRENCY") { 2 } - -# Use the `preload_app!` method when specifying a `workers` number. -# This directive tells Puma to first boot the application and load code -# before forking the application. This takes advantage of Copy On Write -# process behavior so workers use less memory. -# -# preload_app! +if rails_env == "production" + # Specifies the number of `workers` to boot in clustered mode. + # Workers are forked web server processes. If using threads and workers together + # the concurrency of the application would be max `threads` * `workers`. + # Workers do not work on JRuby or Windows (both of which do not support + # processes). + # + workers ENV.fetch("WEB_CONCURRENCY", 2) + + # Use the `preload_app!` method when specifying a `workers` number. + # This directive tells Puma to first boot the application and load code + # before forking the application. This takes advantage of Copy On Write + # process behavior so workers use less memory. + # + preload_app! + + # Specifies the `worker_shutdown_timeout` threshold that Puma will use to wait before + # terminating a worker. + # + worker_shutdown_timeout 60 +end # Allow puma to be restarted by `bin/rails restart` command. plugin :tmp_restart From e72c1a27a540cb0adf484f93f79de66db56a75dd Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 5 Nov 2025 14:44:21 +0000 Subject: [PATCH 09/65] Add MAX_CONNECTIONS --- .github/workflows/benchmark.yml | 16 +++++++++++----- spec/performance/bench.sh | 34 +++++++++++++++++++++------------ 2 files changed, 33 insertions(+), 17 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 57bbb76dbb..879851b6bf 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -24,10 +24,14 @@ on: required: false default: '60s' type: string - vus: - description: 'Virtual users for k6' + connections: + description: 'Concurrent connections/virtual users' + required: false + default: 10 + type: number + max_connections: + description: 'Maximum connections/virtual users' required: false - default: 100 type: number tools: description: 'Comma-separated list of tools to run' @@ -59,7 +63,8 @@ env: RATE: ${{ github.event.inputs.rate || 'max' }} DURATION_SEC: ${{ github.event.inputs.duration_sec || 30 }} REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout || '60s' }} - VUS: ${{ github.event.inputs.vus || '100' }} + CONNECTIONS: ${{ github.event.inputs.connections || '10' }} + MAX_CONNECTIONS: ${{ github.event.inputs.max_connections || github.event.inputs.connections || '10' }} TOOLS: ${{ github.event.inputs.tools || 'fortio,vegeta,k6' }} WEB_CONCURRENCY: ${{ github.event.inputs.web_concurrency || '2' }} @@ -282,7 +287,8 @@ jobs: echo " - RATE: ${RATE}" echo " - DURATION_SEC: ${DURATION_SEC}" echo " - REQUEST_TIMEOUT: ${REQUEST_TIMEOUT}" - echo " - VUS: ${VUS}" + echo " - CONNECTIONS: ${CONNECTIONS}" + echo " - MAX_CONNECTIONS: ${MAX_CONNECTIONS}" echo " - TOOLS: ${TOOLS}" echo " - WEB_CONCURRENCY: ${WEB_CONCURRENCY}" echo "" diff --git a/spec/performance/bench.sh b/spec/performance/bench.sh index 5227e1080d..dca256fbf4 100755 --- a/spec/performance/bench.sh +++ b/spec/performance/bench.sh @@ -6,8 +6,10 @@ set -euo pipefail TARGET="http://${BASE_URL:-localhost:3001}/${ROUTE:-server_side_hello_world_hooks}" # requests per second; if "max" will get maximum number of queries instead of a fixed rate RATE=${RATE:-50} -# virtual users for k6 -VUS=${VUS:-100} +# concurrent connections/virtual users +CONNECTIONS=${CONNECTIONS:-10} +# maximum connections/virtual users +MAX_CONNECTIONS=${MAX_CONNECTIONS:-$CONNECTIONS} DURATION_SEC=${DURATION_SEC:-10} DURATION="${DURATION_SEC}s" # request timeout (duration string like "60s", "1m", "90s") @@ -20,8 +22,12 @@ if ! { [ "$RATE" = "max" ] || { [[ "$RATE" =~ ^[0-9]+(\.[0-9]+)?$ ]] && (( $(bc echo "Error: RATE must be 'max' or a positive number (got: '$RATE')" >&2 exit 1 fi -if ! { [[ "$VUS" =~ ^[0-9]+$ ]] && [ "$VUS" -gt 0 ]; }; then - echo "Error: VUS must be a positive integer (got: '$VUS')" >&2 +if ! { [[ "$CONNECTIONS" =~ ^[0-9]+$ ]] && [ "$CONNECTIONS" -gt 0 ]; }; then + echo "Error: CONNECTIONS must be a positive integer (got: '$CONNECTIONS')" >&2 + exit 1 +fi +if ! { [[ "$MAX_CONNECTIONS" =~ ^[0-9]+$ ]] && [ "$MAX_CONNECTIONS" -gt 0 ]; }; then + echo "Error: MAX_CONNECTIONS must be a positive integer (got: '$MAX_CONNECTIONS')" >&2 exit 1 fi if ! { [[ "$DURATION_SEC" =~ ^[0-9]+(\.[0-9]+)?$ ]] && (( $(bc -l <<< "$DURATION_SEC > 0") )); }; then @@ -70,27 +76,31 @@ echo "Warm-up complete" mkdir -p "$OUTDIR" if [ "$RATE" = "max" ]; then - FORTIO_ARGS=(-qps 0) - VEGETA_ARGS=(-rate=infinity) + if [ "$CONNECTIONS" != "$MAX_CONNECTIONS" ]; then + echo "For RATE=max, CONNECTIONS (got $CONNECTIONS) and MAX_CONNECTIONS (got $MAX_CONNECTIONS) should be the same" + exit 1 + fi + FORTIO_ARGS=(-qps 0 -c "$CONNECTIONS") + VEGETA_ARGS=(-rate=infinity --workers="$CONNECTIONS" --max-workers="$CONNECTIONS") K6_SCENARIOS="{ max_rate: { executor: 'shared-iterations', - vus: $VUS, - iterations: $((VUS * DURATION_SEC * 10)), + vus: $CONNECTIONS, + iterations: $((CONNECTIONS * DURATION_SEC * 10)), maxDuration: '$DURATION' } }" else - FORTIO_ARGS=(-qps "$RATE" -uniform) - VEGETA_ARGS=(-rate="$RATE") + FORTIO_ARGS=(-qps "$RATE" -uniform -c "$CONNECTIONS") + VEGETA_ARGS=(-rate="$RATE" --workers="$CONNECTIONS" --max-workers="$MAX_CONNECTIONS") K6_SCENARIOS="{ constant_rate: { executor: 'constant-arrival-rate', rate: $RATE, timeUnit: '1s', duration: '$DURATION', - preAllocatedVUs: $VUS, - maxVUs: $((VUS * 10)) + preAllocatedVUs: $CONNECTIONS, + maxVUs: $MAX_CONNECTIONS } }" fi From a0a3cff1200719e37a36f9db37d648541363a01a Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 5 Nov 2025 14:45:22 +0000 Subject: [PATCH 10/65] Fix max rate K6 scenario --- spec/performance/bench.sh | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/spec/performance/bench.sh b/spec/performance/bench.sh index dca256fbf4..d147c3e071 100755 --- a/spec/performance/bench.sh +++ b/spec/performance/bench.sh @@ -84,10 +84,9 @@ if [ "$RATE" = "max" ]; then VEGETA_ARGS=(-rate=infinity --workers="$CONNECTIONS" --max-workers="$CONNECTIONS") K6_SCENARIOS="{ max_rate: { - executor: 'shared-iterations', + executor: 'constant-vus', vus: $CONNECTIONS, - iterations: $((CONNECTIONS * DURATION_SEC * 10)), - maxDuration: '$DURATION' + duration: '$DURATION' } }" else From 8858ebaeec040118ec67d0881c285365bea5f701 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 5 Nov 2025 14:52:55 +0000 Subject: [PATCH 11/65] Reorder workflow parameters more logically --- .github/workflows/benchmark.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 879851b6bf..557c0c3322 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -33,16 +33,16 @@ on: description: 'Maximum connections/virtual users' required: false type: number - tools: - description: 'Comma-separated list of tools to run' - required: false - default: 'fortio,vegeta,k6' - type: string web_concurrency: description: 'Number of Puma worker processes' required: false default: 2 type: number + tools: + description: 'Comma-separated list of tools to run' + required: false + default: 'fortio,vegeta,k6' + type: string push: branches: - master @@ -65,8 +65,8 @@ env: REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout || '60s' }} CONNECTIONS: ${{ github.event.inputs.connections || '10' }} MAX_CONNECTIONS: ${{ github.event.inputs.max_connections || github.event.inputs.connections || '10' }} - TOOLS: ${{ github.event.inputs.tools || 'fortio,vegeta,k6' }} WEB_CONCURRENCY: ${{ github.event.inputs.web_concurrency || '2' }} + TOOLS: ${{ github.event.inputs.tools || 'fortio,vegeta,k6' }} jobs: benchmark: @@ -289,8 +289,8 @@ jobs: echo " - REQUEST_TIMEOUT: ${REQUEST_TIMEOUT}" echo " - CONNECTIONS: ${CONNECTIONS}" echo " - MAX_CONNECTIONS: ${MAX_CONNECTIONS}" - echo " - TOOLS: ${TOOLS}" echo " - WEB_CONCURRENCY: ${WEB_CONCURRENCY}" + echo " - TOOLS: ${TOOLS}" echo "" if ! spec/performance/bench.sh; then From feafe6516e07f63b7d424b6c53cc86f27bcab7bb Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 5 Nov 2025 15:15:39 +0000 Subject: [PATCH 12/65] Closer to recommended Fortio options --- spec/performance/bench.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spec/performance/bench.sh b/spec/performance/bench.sh index d147c3e071..b8a5be9256 100755 --- a/spec/performance/bench.sh +++ b/spec/performance/bench.sh @@ -90,7 +90,7 @@ if [ "$RATE" = "max" ]; then } }" else - FORTIO_ARGS=(-qps "$RATE" -uniform -c "$CONNECTIONS") + FORTIO_ARGS=(-qps "$RATE" -uniform -nocatchup -c "$CONNECTIONS") VEGETA_ARGS=(-rate="$RATE" --workers="$CONNECTIONS" --max-workers="$MAX_CONNECTIONS") K6_SCENARIOS="{ constant_rate: { From b22a99158be43fb83319e706c7789fca402c4503 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 5 Nov 2025 15:23:37 +0000 Subject: [PATCH 13/65] Allow configuring RAILS_MAX/MIN_THREADS in the workflow --- .github/workflows/benchmark.yml | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 557c0c3322..54284c7779 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -36,7 +36,16 @@ on: web_concurrency: description: 'Number of Puma worker processes' required: false - default: 2 + default: 4 + type: number + rails_max_threads: + description: 'Maximum number of Puma threads' + required: false + default: 3 + type: number + rails_min_threads: + description: 'Minimum number of Puma threads (same as maximum if not set)' + required: false type: number tools: description: 'Comma-separated list of tools to run' @@ -63,9 +72,11 @@ env: RATE: ${{ github.event.inputs.rate || 'max' }} DURATION_SEC: ${{ github.event.inputs.duration_sec || 30 }} REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout || '60s' }} - CONNECTIONS: ${{ github.event.inputs.connections || '10' }} - MAX_CONNECTIONS: ${{ github.event.inputs.max_connections || github.event.inputs.connections || '10' }} - WEB_CONCURRENCY: ${{ github.event.inputs.web_concurrency || '2' }} + CONNECTIONS: ${{ github.event.inputs.connections || 10 }} + MAX_CONNECTIONS: ${{ github.event.inputs.max_connections || github.event.inputs.connections || 10 }} + WEB_CONCURRENCY: ${{ github.event.inputs.web_concurrency || 4 }} + RAILS_MAX_THREADS: ${{ github.event.inputs.rails_max_threads || 3 }} + RAILS_MIN_THREADS: ${{ github.event.inputs.rails_min_threads || github.event.inputs.rails_max_threads || 3 }} TOOLS: ${{ github.event.inputs.tools || 'fortio,vegeta,k6' }} jobs: @@ -290,6 +301,8 @@ jobs: echo " - CONNECTIONS: ${CONNECTIONS}" echo " - MAX_CONNECTIONS: ${MAX_CONNECTIONS}" echo " - WEB_CONCURRENCY: ${WEB_CONCURRENCY}" + echo " - RAILS_MAX_THREADS: ${RAILS_MAX_THREADS}" + echo " - RAILS_MIN_THREADS: ${RAILS_MIN_THREADS}" echo " - TOOLS: ${TOOLS}" echo "" From e667e99d4af2db46b276dd79f42599b836d1e3a3 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 5 Nov 2025 15:57:09 +0000 Subject: [PATCH 14/65] Move showing benchmark params to bench.sh for simplicity --- .github/workflows/benchmark.yml | 13 ------------- spec/performance/bench.sh | 12 ++++++++++++ 2 files changed, 12 insertions(+), 13 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 54284c7779..377502d292 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -292,19 +292,6 @@ jobs: run: | set -e # Exit on any error echo "🏃 Running benchmark suite..." - echo "Script: spec/performance/bench.sh" - echo "" - echo "Benchmark parameters:" - echo " - RATE: ${RATE}" - echo " - DURATION_SEC: ${DURATION_SEC}" - echo " - REQUEST_TIMEOUT: ${REQUEST_TIMEOUT}" - echo " - CONNECTIONS: ${CONNECTIONS}" - echo " - MAX_CONNECTIONS: ${MAX_CONNECTIONS}" - echo " - WEB_CONCURRENCY: ${WEB_CONCURRENCY}" - echo " - RAILS_MAX_THREADS: ${RAILS_MAX_THREADS}" - echo " - RAILS_MIN_THREADS: ${RAILS_MIN_THREADS}" - echo " - TOOLS: ${TOOLS}" - echo "" if ! spec/performance/bench.sh; then echo "❌ ERROR: Benchmark execution failed" diff --git a/spec/performance/bench.sh b/spec/performance/bench.sh index b8a5be9256..b83d429497 100755 --- a/spec/performance/bench.sh +++ b/spec/performance/bench.sh @@ -56,6 +56,18 @@ for cmd in ${TOOLS//,/ } jq column awk tee bc; do fi done +echo "Benchmark parameters: + - RATE: ${RATE:-unset} + - DURATION_SEC: ${DURATION_SEC:-unset} + - REQUEST_TIMEOUT: ${REQUEST_TIMEOUT:-unset} + - CONNECTIONS: ${CONNECTIONS:-unset} + - MAX_CONNECTIONS: ${MAX_CONNECTIONS:-unset} + - WEB_CONCURRENCY: ${WEB_CONCURRENCY:-unset} + - RAILS_MAX_THREADS: ${RAILS_MAX_THREADS:-unset} + - RAILS_MIN_THREADS: ${RAILS_MIN_THREADS:-unset} + - TOOLS: ${TOOLS:-unset} +" + TIMEOUT_SEC=60 START=$(date +%s) until curl -fsS "$TARGET" >/dev/null; do From ea47ebcd2140ba7a486e1f14bca5e7fbeb212e87 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Fri, 7 Nov 2025 18:36:03 +0000 Subject: [PATCH 15/65] Convert the benchmark script to Ruby --- .github/workflows/benchmark.yml | 2 +- knip.ts | 1 - spec/performance/bench.rb | 318 ++++++++++++++++++++++++++++++++ spec/performance/bench.sh | 214 --------------------- 4 files changed, 319 insertions(+), 216 deletions(-) create mode 100755 spec/performance/bench.rb delete mode 100755 spec/performance/bench.sh diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 377502d292..80bde5ecd1 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -293,7 +293,7 @@ jobs: set -e # Exit on any error echo "🏃 Running benchmark suite..." - if ! spec/performance/bench.sh; then + if ! ruby spec/performance/bench.rb; then echo "❌ ERROR: Benchmark execution failed" exit 1 fi diff --git a/knip.ts b/knip.ts index 29d76fd106..36bfc74ff6 100644 --- a/knip.ts +++ b/knip.ts @@ -15,7 +15,6 @@ const config: KnipConfig = { 'e2e-test', // Local binaries 'bin/.*', - 'spec/performance/bench.sh', ], ignore: ['react_on_rails_pro/**', 'react_on_rails/vendor/**'], ignoreDependencies: [ diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb new file mode 100755 index 0000000000..6f9a9536f4 --- /dev/null +++ b/spec/performance/bench.rb @@ -0,0 +1,318 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +require "json" +require "fileutils" +require "net/http" +require "uri" + +# Benchmark parameters +BASE_URL = ENV.fetch("BASE_URL", "localhost:3001") +ROUTE = ENV.fetch("ROUTE", "server_side_hello_world_hooks") +TARGET = URI.parse("http://#{BASE_URL}/#{ROUTE}") +# requests per second; if "max" will get maximum number of queries instead of a fixed rate +RATE = ENV.fetch("RATE", "50") +# concurrent connections/virtual users +CONNECTIONS = ENV.fetch("CONNECTIONS", "10").to_i +# maximum connections/virtual users +MAX_CONNECTIONS = ENV.fetch("MAX_CONNECTIONS", CONNECTIONS.to_s).to_i +DURATION_SEC = ENV.fetch("DURATION_SEC", "10").to_f +DURATION = "#{DURATION_SEC}s".freeze +# request timeout (duration string like "60s", "1m", "90s") +REQUEST_TIMEOUT = ENV.fetch("REQUEST_TIMEOUT", "60s") +# Tools to run (comma-separated) +TOOLS = ENV.fetch("TOOLS", "fortio,vegeta,k6").split(",") + +OUTDIR = "bench_results" +FORTIO_JSON = "#{OUTDIR}/fortio.json".freeze +FORTIO_TXT = "#{OUTDIR}/fortio.txt".freeze +VEGETA_BIN = "#{OUTDIR}/vegeta.bin".freeze +VEGETA_JSON = "#{OUTDIR}/vegeta.json".freeze +VEGETA_TXT = "#{OUTDIR}/vegeta.txt".freeze +K6_TEST_JS = "#{OUTDIR}/k6_test.js".freeze +K6_SUMMARY_JSON = "#{OUTDIR}/k6_summary.json".freeze +K6_TXT = "#{OUTDIR}/k6.txt".freeze +SUMMARY_TXT = "#{OUTDIR}/summary.txt".freeze + +# Validate input parameters +def validate_rate(rate) + return if rate == "max" + + return if rate.match?(/^\d+(\.\d+)?$/) && rate.to_f.positive? + + raise "RATE must be 'max' or a positive number (got: '#{rate}')" +end + +def validate_positive_integer(value, name) + return if value.is_a?(Integer) && value.positive? + + raise "#{name} must be a positive integer (got: '#{value}')" +end + +def validate_duration(value, name) + return if value.is_a?(Numeric) && value.positive? + + raise "#{name} must be a positive number (got: '#{value}')" +end + +def validate_timeout(value) + return if value.match?(/^(\d+(\.\d+)?[smh])+$/) + + raise "REQUEST_TIMEOUT must be a duration like '60s', '1m', '1.5m' (got: '#{value}')" +end + +def parse_json_file(file_path, tool_name) + JSON.parse(File.read(file_path)) +rescue Errno::ENOENT + raise "#{tool_name} results file not found: #{file_path}" +rescue JSON::ParserError => e + raise "Failed to parse #{tool_name} JSON: #{e.message}" +rescue StandardError => e + raise "Failed to read #{tool_name} results: #{e.message}" +end + +validate_rate(RATE) +validate_positive_integer(CONNECTIONS, "CONNECTIONS") +validate_positive_integer(MAX_CONNECTIONS, "MAX_CONNECTIONS") +validate_duration(DURATION_SEC, "DURATION_SEC") +validate_timeout(REQUEST_TIMEOUT) + +raise "MAX_CONNECTIONS (#{MAX_CONNECTIONS}) must be >= CONNECTIONS (#{CONNECTIONS})" if MAX_CONNECTIONS < CONNECTIONS + +# Precompute checks for each tool +run_fortio = TOOLS.include?("fortio") +run_vegeta = TOOLS.include?("vegeta") +run_k6 = TOOLS.include?("k6") + +# Check required tools are installed +required_tools = TOOLS + %w[column tee] +required_tools.each do |cmd| + raise "required tool '#{cmd}' is not installed" unless system("command -v #{cmd} >/dev/null 2>&1") +end + +puts <<~PARAMS + Benchmark parameters: + - RATE: #{RATE} + - DURATION_SEC: #{DURATION_SEC} + - REQUEST_TIMEOUT: #{REQUEST_TIMEOUT} + - CONNECTIONS: #{CONNECTIONS} + - MAX_CONNECTIONS: #{MAX_CONNECTIONS} + - WEB_CONCURRENCY: #{ENV['WEB_CONCURRENCY'] || 'unset'} + - RAILS_MAX_THREADS: #{ENV['RAILS_MAX_THREADS'] || 'unset'} + - RAILS_MIN_THREADS: #{ENV['RAILS_MIN_THREADS'] || 'unset'} + - TOOLS: #{TOOLS.join(', ')} +PARAMS + +# Helper method to check if server is responding +def server_responding?(uri) + response = Net::HTTP.get_response(uri) + response.is_a?(Net::HTTPSuccess) +rescue StandardError + false +end + +# Wait for the server to be ready +TIMEOUT_SEC = 60 +start_time = Time.now +loop do + break if server_responding?(TARGET) + + raise "Target #{TARGET} not responding within #{TIMEOUT_SEC}s" if Time.now - start_time > TIMEOUT_SEC + + sleep 1 +end + +# Warm up server +puts "Warming up server with 10 requests..." +10.times do + server_responding?(TARGET) + sleep 0.5 +end +puts "Warm-up complete" + +FileUtils.mkdir_p(OUTDIR) + +# Configure tool-specific arguments +if RATE == "max" + if CONNECTIONS != MAX_CONNECTIONS + raise "For RATE=max, CONNECTIONS must be equal to MAX_CONNECTIONS (got #{CONNECTIONS} and #{MAX_CONNECTIONS})" + end + + fortio_args = ["-qps", 0, "-c", CONNECTIONS] + vegeta_args = ["-rate=infinity", "--workers=#{CONNECTIONS}", "--max-workers=#{CONNECTIONS}"] + k6_scenarios = <<~JS.strip + { + max_rate: { + executor: 'constant-vus', + vus: #{CONNECTIONS}, + duration: '#{DURATION}' + } + } + JS +else + fortio_args = ["-qps", RATE, "-uniform", "-nocatchup", "-c", CONNECTIONS] + vegeta_args = ["-rate=#{RATE}", "--workers=#{CONNECTIONS}", "--max-workers=#{MAX_CONNECTIONS}"] + k6_scenarios = <<~JS.strip + { + constant_rate: { + executor: 'constant-arrival-rate', + rate: #{RATE}, + timeUnit: '1s', + duration: '#{DURATION}', + preAllocatedVUs: #{CONNECTIONS}, + maxVUs: #{MAX_CONNECTIONS} + } + } + JS +end + +# Run Fortio +if run_fortio + puts "===> Fortio" + # TODO: https://github.com/fortio/fortio/wiki/FAQ#i-want-to-get-the-best-results-what-flags-should-i-pass + fortio_cmd = [ + "fortio", "load", + *fortio_args, + "-t", DURATION, + "-timeout", REQUEST_TIMEOUT, + "-json", FORTIO_JSON, + TARGET + ].join(" ") + raise "Fortio benchmark failed" unless system("#{fortio_cmd} | tee #{FORTIO_TXT}") +end + +# Run Vegeta +if run_vegeta + puts "\n===> Vegeta" + vegeta_cmd = [ + "echo", "'GET #{TARGET}'", "|", + "vegeta", "attack", + *vegeta_args, + "-duration=#{DURATION}", + "-timeout=#{REQUEST_TIMEOUT}" + ].join(" ") + raise "Vegeta attack failed" unless system("#{vegeta_cmd} | tee #{VEGETA_BIN} | vegeta report | tee #{VEGETA_TXT}") + raise "Vegeta report generation failed" unless system("vegeta report -type=json #{VEGETA_BIN} > #{VEGETA_JSON}") +end + +# Run k6 +if run_k6 + puts "\n===> k6" + k6_script = <<~JS + import http from 'k6/http'; + import { check } from 'k6'; + + export const options = { + scenarios: #{k6_scenarios}, + httpReq: { + timeout: '#{REQUEST_TIMEOUT}', + }, + }; + + export default function () { + const response = http.get('#{TARGET}'); + check(response, { + 'status=200': r => r.status === 200, + // you can add more if needed: + // 'status=500': r => r.status === 500, + }); + } + JS + File.write(K6_TEST_JS, k6_script) + k6_command = "k6 run --summary-export=#{K6_SUMMARY_JSON} --summary-trend-stats 'min,avg,med,max,p(90),p(99)'" + raise "k6 benchmark failed" unless system("#{k6_command} #{K6_TEST_JS} | tee #{K6_TXT}") +end + +puts "\n===> Parsing results and generating summary" + +# Initialize summary file +File.write(SUMMARY_TXT, "Tool\tRPS\tp50(ms)\tp90(ms)\tp99(ms)\tStatus\n") + +# Parse Fortio results +if run_fortio + begin + fortio_data = parse_json_file(FORTIO_JSON, "Fortio") + fortio_rps = fortio_data["ActualQPS"]&.round(2) || "missing" + + percentiles = fortio_data.dig("DurationHistogram", "Percentiles") || [] + p50_data = percentiles.find { |p| p["Percentile"] == 50 } + p90_data = percentiles.find { |p| p["Percentile"] == 90 } + p99_data = percentiles.find { |p| p["Percentile"] == 99 } + + raise "Fortio results missing percentile data" unless p50_data && p90_data && p99_data + + fortio_p50 = (p50_data["Value"] * 1000).round(2) + fortio_p90 = (p90_data["Value"] * 1000).round(2) + fortio_p99 = (p99_data["Value"] * 1000).round(2) + fortio_status = fortio_data["RetCodes"]&.map { |k, v| "#{k}=#{v}" }&.join(",") || "unknown" + File.open(SUMMARY_TXT, "a") do |f| + f.puts "Fortio\t#{fortio_rps}\t#{fortio_p50}\t#{fortio_p90}\t#{fortio_p99}\t#{fortio_status}" + end + rescue StandardError => e + puts "Error: #{e.message}" + File.open(SUMMARY_TXT, "a") do |f| + f.puts "Fortio\tFAILED\tFAILED\tFAILED\tFAILED\t#{e.message}" + end + end +end + +# Parse Vegeta results +if run_vegeta + begin + vegeta_data = parse_json_file(VEGETA_JSON, "Vegeta") + # .throughput is successful_reqs/total_period, .rate is all_requests/attack_period + vegeta_rps = vegeta_data["throughput"]&.round(2) || "missing" + vegeta_p50 = vegeta_data.dig("latencies", "50th")&./(1_000_000.0)&.round(2) || "missing" + vegeta_p90 = vegeta_data.dig("latencies", "90th")&./(1_000_000.0)&.round(2) || "missing" + vegeta_p99 = vegeta_data.dig("latencies", "99th")&./(1_000_000.0)&.round(2) || "missing" + vegeta_status = vegeta_data["status_codes"]&.map { |k, v| "#{k}=#{v}" }&.join(",") || "unknown" + vegeta_line = [ + "Vegeta", vegeta_rps, vegeta_p50, vegeta_p90, vegeta_p99, vegeta_status + ].join("\t") + File.open(SUMMARY_TXT, "a") do |f| + f.puts vegeta_line + end + rescue StandardError => e + puts "Error: #{e.message}" + File.open(SUMMARY_TXT, "a") do |f| + f.puts "Vegeta\tFAILED\tFAILED\tFAILED\tFAILED\t#{e.message}" + end + end +end + +# Parse k6 results +if run_k6 + begin + k6_data = parse_json_file(K6_SUMMARY_JSON, "k6") + k6_rps = k6_data.dig("metrics", "iterations", "rate")&.round(2) || "missing" + k6_p50 = k6_data.dig("metrics", "http_req_duration", "med")&.round(2) || "missing" + k6_p90 = k6_data.dig("metrics", "http_req_duration", "p(90)")&.round(2) || "missing" + k6_p99 = k6_data.dig("metrics", "http_req_duration", "p(99)")&.round(2) || "missing" + + # Status: compute successful vs failed requests + k6_reqs_total = k6_data.dig("metrics", "http_reqs", "count") || 0 + k6_checks = k6_data.dig("root_group", "checks") || {} + # Extract status code from check name (e.g., "status=200" -> "200") + # Handle both "status=XXX" format and other potential formats + k6_status_parts = k6_checks.map do |name, check| + status_label = name.start_with?("status=") ? name.delete_prefix("status=") : name + "#{status_label}=#{check['passes']}" + end + k6_reqs_known_status = k6_checks.values.sum { |check| check["passes"] || 0 } + k6_reqs_other = k6_reqs_total - k6_reqs_known_status + k6_status_parts << "other=#{k6_reqs_other}" if k6_reqs_other.positive? + k6_status = k6_status_parts.empty? ? "missing" : k6_status_parts.join(",") + + File.open(SUMMARY_TXT, "a") do |f| + f.puts "k6\t#{k6_rps}\t#{k6_p50}\t#{k6_p90}\t#{k6_p99}\t#{k6_status}" + end + rescue StandardError => e + puts "Error: #{e.message}" + File.open(SUMMARY_TXT, "a") do |f| + f.puts "k6\tFAILED\tFAILED\tFAILED\tFAILED\t#{e.message}" + end + end +end + +puts "\nSummary saved to #{SUMMARY_TXT}" +system("column", "-t", "-s", "\t", SUMMARY_TXT) diff --git a/spec/performance/bench.sh b/spec/performance/bench.sh deleted file mode 100755 index b83d429497..0000000000 --- a/spec/performance/bench.sh +++ /dev/null @@ -1,214 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail -#set -x # Uncomment for debugging commands - -# Benchmark parameters -TARGET="http://${BASE_URL:-localhost:3001}/${ROUTE:-server_side_hello_world_hooks}" -# requests per second; if "max" will get maximum number of queries instead of a fixed rate -RATE=${RATE:-50} -# concurrent connections/virtual users -CONNECTIONS=${CONNECTIONS:-10} -# maximum connections/virtual users -MAX_CONNECTIONS=${MAX_CONNECTIONS:-$CONNECTIONS} -DURATION_SEC=${DURATION_SEC:-10} -DURATION="${DURATION_SEC}s" -# request timeout (duration string like "60s", "1m", "90s") -REQUEST_TIMEOUT=${REQUEST_TIMEOUT:-60s} -# Tools to run (comma-separated) -TOOLS=${TOOLS:-fortio,vegeta,k6} - -# Validate input parameters -if ! { [ "$RATE" = "max" ] || { [[ "$RATE" =~ ^[0-9]+(\.[0-9]+)?$ ]] && (( $(bc -l <<< "$RATE > 0") )); }; }; then - echo "Error: RATE must be 'max' or a positive number (got: '$RATE')" >&2 - exit 1 -fi -if ! { [[ "$CONNECTIONS" =~ ^[0-9]+$ ]] && [ "$CONNECTIONS" -gt 0 ]; }; then - echo "Error: CONNECTIONS must be a positive integer (got: '$CONNECTIONS')" >&2 - exit 1 -fi -if ! { [[ "$MAX_CONNECTIONS" =~ ^[0-9]+$ ]] && [ "$MAX_CONNECTIONS" -gt 0 ]; }; then - echo "Error: MAX_CONNECTIONS must be a positive integer (got: '$MAX_CONNECTIONS')" >&2 - exit 1 -fi -if ! { [[ "$DURATION_SEC" =~ ^[0-9]+(\.[0-9]+)?$ ]] && (( $(bc -l <<< "$DURATION_SEC > 0") )); }; then - echo "Error: DURATION_SEC must be a positive number (got: '$DURATION_SEC')" >&2 - exit 1 -fi -if ! [[ "$REQUEST_TIMEOUT" =~ ^([0-9]+(\.[0-9]+)?[smh])+$ ]]; then - echo "Error: REQUEST_TIMEOUT must be a duration like '60s', '1m', '1.5m' (got: '$REQUEST_TIMEOUT')" >&2 - exit 1 -fi - -OUTDIR="bench_results" - -# Precompute checks for each tool -RUN_FORTIO=0 -RUN_VEGETA=0 -RUN_K6=0 -[[ ",$TOOLS," == *",fortio,"* ]] && RUN_FORTIO=1 -[[ ",$TOOLS," == *",vegeta,"* ]] && RUN_VEGETA=1 -[[ ",$TOOLS," == *",k6,"* ]] && RUN_K6=1 - -for cmd in ${TOOLS//,/ } jq column awk tee bc; do - if ! command -v "$cmd" >/dev/null 2>&1; then - echo "Error: required tool '$cmd' is not installed" >&2 - exit 1 - fi -done - -echo "Benchmark parameters: - - RATE: ${RATE:-unset} - - DURATION_SEC: ${DURATION_SEC:-unset} - - REQUEST_TIMEOUT: ${REQUEST_TIMEOUT:-unset} - - CONNECTIONS: ${CONNECTIONS:-unset} - - MAX_CONNECTIONS: ${MAX_CONNECTIONS:-unset} - - WEB_CONCURRENCY: ${WEB_CONCURRENCY:-unset} - - RAILS_MAX_THREADS: ${RAILS_MAX_THREADS:-unset} - - RAILS_MIN_THREADS: ${RAILS_MIN_THREADS:-unset} - - TOOLS: ${TOOLS:-unset} -" - -TIMEOUT_SEC=60 -START=$(date +%s) -until curl -fsS "$TARGET" >/dev/null; do - if (( $(date +%s) - START > TIMEOUT_SEC )); then - echo "Error: Target $TARGET not responding within ${TIMEOUT_SEC}s" >&2 - exit 1 - fi - sleep 1 -done - -echo "Warming up server with 10 requests..." -for i in {1..10}; do - curl -fsS "$TARGET" >/dev/null || true - sleep 0.5 -done -echo "Warm-up complete" - -mkdir -p "$OUTDIR" - -if [ "$RATE" = "max" ]; then - if [ "$CONNECTIONS" != "$MAX_CONNECTIONS" ]; then - echo "For RATE=max, CONNECTIONS (got $CONNECTIONS) and MAX_CONNECTIONS (got $MAX_CONNECTIONS) should be the same" - exit 1 - fi - FORTIO_ARGS=(-qps 0 -c "$CONNECTIONS") - VEGETA_ARGS=(-rate=infinity --workers="$CONNECTIONS" --max-workers="$CONNECTIONS") - K6_SCENARIOS="{ - max_rate: { - executor: 'constant-vus', - vus: $CONNECTIONS, - duration: '$DURATION' - } - }" -else - FORTIO_ARGS=(-qps "$RATE" -uniform -nocatchup -c "$CONNECTIONS") - VEGETA_ARGS=(-rate="$RATE" --workers="$CONNECTIONS" --max-workers="$MAX_CONNECTIONS") - K6_SCENARIOS="{ - constant_rate: { - executor: 'constant-arrival-rate', - rate: $RATE, - timeUnit: '1s', - duration: '$DURATION', - preAllocatedVUs: $CONNECTIONS, - maxVUs: $MAX_CONNECTIONS - } - }" -fi - -if (( RUN_FORTIO )); then - echo "===> Fortio" - # TODO https://github.com/fortio/fortio/wiki/FAQ#i-want-to-get-the-best-results-what-flags-should-i-pass - fortio load "${FORTIO_ARGS[@]}" -t "$DURATION" -timeout "$REQUEST_TIMEOUT" -json "$OUTDIR/fortio.json" "$TARGET" \ - | tee "$OUTDIR/fortio.txt" -fi - -if (( RUN_VEGETA )); then - echo - echo "===> Vegeta" - echo "GET $TARGET" | vegeta attack "${VEGETA_ARGS[@]}" -duration="$DURATION" -timeout="$REQUEST_TIMEOUT" \ - | tee "$OUTDIR/vegeta.bin" \ - | vegeta report | tee "$OUTDIR/vegeta.txt" - vegeta report -type=json "$OUTDIR/vegeta.bin" > "$OUTDIR/vegeta.json" -fi - -if (( RUN_K6 )); then - echo - echo "===> k6" - cat < "$OUTDIR/k6_test.js" -import http from 'k6/http'; -import { check } from 'k6'; - -export const options = { - scenarios: $K6_SCENARIOS, - httpReq: { - timeout: '$REQUEST_TIMEOUT', - }, -}; - -export default function () { - const response = http.get('$TARGET'); - check(response, { - 'status=200': r => r.status === 200, - // you can add more if needed: - // 'status=500': r => r.status === 500, - }); -} -EOF - - k6 run --summary-export="$OUTDIR/k6_summary.json" --summary-trend-stats "min,avg,med,max,p(90),p(99)" "$OUTDIR/k6_test.js" | tee "$OUTDIR/k6.txt" -fi - -echo -echo "===> Parsing results and generating summary" - -echo -e "Tool\tRPS\tp50(ms)\tp90(ms)\tp99(ms)\tStatus" > "$OUTDIR/summary.txt" - -if (( RUN_FORTIO )); then - FORTIO_RPS=$(jq '.ActualQPS' "$OUTDIR/fortio.json" | awk '{printf "%.2f", $1}') - FORTIO_P50=$(jq '.DurationHistogram.Percentiles[] | select(.Percentile==50) | .Value * 1000' "$OUTDIR/fortio.json" | awk '{printf "%.2f", $1}') - FORTIO_P90=$(jq '.DurationHistogram.Percentiles[] | select(.Percentile==90) | .Value * 1000' "$OUTDIR/fortio.json" | awk '{printf "%.2f", $1}') - FORTIO_P99=$(jq '.DurationHistogram.Percentiles[] | select(.Percentile==99) | .Value * 1000' "$OUTDIR/fortio.json" | awk '{printf "%.2f", $1}') - FORTIO_STATUS=$(jq -r '.RetCodes | to_entries | map("\(.key)=\(.value)") | join(",")' "$OUTDIR/fortio.json") - echo -e "Fortio\t$FORTIO_RPS\t$FORTIO_P50\t$FORTIO_P90\t$FORTIO_P99\t$FORTIO_STATUS" >> "$OUTDIR/summary.txt" -fi - -if (( RUN_VEGETA )); then - # .throughput is successful_reqs/total_period, .rate is all_requests/attack_period - VEGETA_RPS=$(jq '.throughput' "$OUTDIR/vegeta.json" | awk '{printf "%.2f", $1}') - VEGETA_P50=$(jq '.latencies["50th"] / 1000000' "$OUTDIR/vegeta.json" | awk '{printf "%.2f", $1}') - VEGETA_P90=$(jq '.latencies["90th"] / 1000000' "$OUTDIR/vegeta.json" | awk '{printf "%.2f", $1}') - VEGETA_P99=$(jq '.latencies["99th"] / 1000000' "$OUTDIR/vegeta.json" | awk '{printf "%.2f", $1}') - VEGETA_STATUS=$(jq -r '.status_codes | to_entries | map("\(.key)=\(.value)") | join(",")' "$OUTDIR/vegeta.json") - echo -e "Vegeta\t$VEGETA_RPS\t$VEGETA_P50\t$VEGETA_P90\t$VEGETA_P99\t$VEGETA_STATUS" >> "$OUTDIR/summary.txt" -fi - -if (( RUN_K6 )); then - K6_RPS=$(jq '.metrics.iterations.rate' "$OUTDIR/k6_summary.json" | awk '{printf "%.2f", $1}') - K6_P50=$(jq '.metrics.http_req_duration.med' "$OUTDIR/k6_summary.json" | awk '{printf "%.2f", $1}') - K6_P90=$(jq '.metrics.http_req_duration["p(90)"]' "$OUTDIR/k6_summary.json" | awk '{printf "%.2f", $1}') - K6_P99=$(jq '.metrics.http_req_duration["p(99)"]' "$OUTDIR/k6_summary.json" | awk '{printf "%.2f", $1}') - # Status: compute successful vs failed requests - K6_REQS_TOTAL=$(jq '.metrics.http_reqs.count' "$OUTDIR/k6_summary.json") - K6_STATUS=$(jq -r ' - .root_group.checks - | to_entries - | map(.key[7:] + "=" + (.value.passes|tostring)) - | join(",") - ' "$OUTDIR/k6_summary.json") - K6_REQS_KNOWN_STATUS=$(jq -r ' - .root_group.checks - | to_entries - | map(.value.passes) - | add - ' "$OUTDIR/k6_summary.json") - K6_REQS_OTHER=$(( K6_REQS_TOTAL - K6_REQS_KNOWN_STATUS )) - if [ "$K6_REQS_OTHER" -gt 0 ]; then - K6_STATUS="$K6_STATUS,other=$K6_REQS_OTHER" - fi - echo -e "k6\t$K6_RPS\t$K6_P50\t$K6_P90\t$K6_P99\t$K6_STATUS" >> "$OUTDIR/summary.txt" -fi - -echo -echo "Summary saved to $OUTDIR/summary.txt" -column -t -s $'\t' "$OUTDIR/summary.txt" From 5dccbe7ef1295fd415f501551f1ac57d38c966c9 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Fri, 7 Nov 2025 18:39:59 +0000 Subject: [PATCH 16/65] Fix k6 timeout --- spec/performance/bench.rb | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb index 6f9a9536f4..9021b41235 100755 --- a/spec/performance/bench.rb +++ b/spec/performance/bench.rb @@ -204,13 +204,10 @@ def server_responding?(uri) export const options = { scenarios: #{k6_scenarios}, - httpReq: { - timeout: '#{REQUEST_TIMEOUT}', - }, }; export default function () { - const response = http.get('#{TARGET}'); + const response = http.get('#{TARGET}', { timeout: '#{REQUEST_TIMEOUT}' }); check(response, { 'status=200': r => r.status === 200, // you can add more if needed: From 77e01ae92a622fef18fa134da9a7637b5473e6e3 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Fri, 7 Nov 2025 18:49:21 +0000 Subject: [PATCH 17/65] Replace DURATION_SEC with DURATION --- .github/workflows/benchmark.yml | 10 +++++----- spec/performance/bench.rb | 22 ++++++++-------------- 2 files changed, 13 insertions(+), 19 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 80bde5ecd1..a16b74cccc 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -14,11 +14,11 @@ on: required: false default: 'max' type: string - duration_sec: - description: 'Duration in seconds' + duration: + description: 'Duration (e.g., "30s", "1m", "90s")' required: false - default: 30 - type: number + default: '30s' + type: string request_timeout: description: 'Request timeout (e.g., "60s", "1m", "90s")' required: false @@ -70,7 +70,7 @@ env: VEGETA_VERSION: "12.13.0" # Benchmark parameters RATE: ${{ github.event.inputs.rate || 'max' }} - DURATION_SEC: ${{ github.event.inputs.duration_sec || 30 }} + DURATION: ${{ github.event.inputs.duration || '30s' }} REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout || '60s' }} CONNECTIONS: ${{ github.event.inputs.connections || 10 }} MAX_CONNECTIONS: ${{ github.event.inputs.max_connections || github.event.inputs.connections || 10 }} diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb index 9021b41235..ac12eba048 100755 --- a/spec/performance/bench.rb +++ b/spec/performance/bench.rb @@ -15,10 +15,10 @@ # concurrent connections/virtual users CONNECTIONS = ENV.fetch("CONNECTIONS", "10").to_i # maximum connections/virtual users -MAX_CONNECTIONS = ENV.fetch("MAX_CONNECTIONS", CONNECTIONS.to_s).to_i -DURATION_SEC = ENV.fetch("DURATION_SEC", "10").to_f -DURATION = "#{DURATION_SEC}s".freeze -# request timeout (duration string like "60s", "1m", "90s") +MAX_CONNECTIONS = ENV.fetch("MAX_CONNECTIONS", CONNECTIONS).to_i +# benchmark duration (duration string like "30s", "1m", "90s") +DURATION = ENV.fetch("DURATION", "30s") +# request timeout (duration string as above) REQUEST_TIMEOUT = ENV.fetch("REQUEST_TIMEOUT", "60s") # Tools to run (comma-separated) TOOLS = ENV.fetch("TOOLS", "fortio,vegeta,k6").split(",") @@ -50,15 +50,9 @@ def validate_positive_integer(value, name) end def validate_duration(value, name) - return if value.is_a?(Numeric) && value.positive? - - raise "#{name} must be a positive number (got: '#{value}')" -end - -def validate_timeout(value) return if value.match?(/^(\d+(\.\d+)?[smh])+$/) - raise "REQUEST_TIMEOUT must be a duration like '60s', '1m', '1.5m' (got: '#{value}')" + raise "#{name} must be a duration like '10s', '1m', '1.5m' (got: '#{value}')" end def parse_json_file(file_path, tool_name) @@ -74,8 +68,8 @@ def parse_json_file(file_path, tool_name) validate_rate(RATE) validate_positive_integer(CONNECTIONS, "CONNECTIONS") validate_positive_integer(MAX_CONNECTIONS, "MAX_CONNECTIONS") -validate_duration(DURATION_SEC, "DURATION_SEC") -validate_timeout(REQUEST_TIMEOUT) +validate_duration(DURATION, "DURATION") +validate_duration(REQUEST_TIMEOUT, "REQUEST_TIMEOUT") raise "MAX_CONNECTIONS (#{MAX_CONNECTIONS}) must be >= CONNECTIONS (#{CONNECTIONS})" if MAX_CONNECTIONS < CONNECTIONS @@ -93,7 +87,7 @@ def parse_json_file(file_path, tool_name) puts <<~PARAMS Benchmark parameters: - RATE: #{RATE} - - DURATION_SEC: #{DURATION_SEC} + - DURATION: #{DURATION} - REQUEST_TIMEOUT: #{REQUEST_TIMEOUT} - CONNECTIONS: #{CONNECTIONS} - MAX_CONNECTIONS: #{MAX_CONNECTIONS} From 72c16cf761c2fc7b09ea33a4fdf9ca31ed3a4e3e Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Fri, 7 Nov 2025 19:16:49 +0000 Subject: [PATCH 18/65] Group all code for a tool into a single block --- spec/performance/bench.rb | 225 +++++++++++++++++++------------------- 1 file changed, 115 insertions(+), 110 deletions(-) diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb index ac12eba048..a6f31292aa 100755 --- a/spec/performance/bench.rb +++ b/spec/performance/bench.rb @@ -24,14 +24,6 @@ TOOLS = ENV.fetch("TOOLS", "fortio,vegeta,k6").split(",") OUTDIR = "bench_results" -FORTIO_JSON = "#{OUTDIR}/fortio.json".freeze -FORTIO_TXT = "#{OUTDIR}/fortio.txt".freeze -VEGETA_BIN = "#{OUTDIR}/vegeta.bin".freeze -VEGETA_JSON = "#{OUTDIR}/vegeta.json".freeze -VEGETA_TXT = "#{OUTDIR}/vegeta.txt".freeze -K6_TEST_JS = "#{OUTDIR}/k6_test.js".freeze -K6_SUMMARY_JSON = "#{OUTDIR}/k6_summary.json".freeze -K6_TXT = "#{OUTDIR}/k6.txt".freeze SUMMARY_TXT = "#{OUTDIR}/summary.txt".freeze # Validate input parameters @@ -73,11 +65,6 @@ def parse_json_file(file_path, tool_name) raise "MAX_CONNECTIONS (#{MAX_CONNECTIONS}) must be >= CONNECTIONS (#{CONNECTIONS})" if MAX_CONNECTIONS < CONNECTIONS -# Precompute checks for each tool -run_fortio = TOOLS.include?("fortio") -run_vegeta = TOOLS.include?("vegeta") -run_k6 = TOOLS.include?("k6") - # Check required tools are installed required_tools = TOOLS + %w[column tee] required_tools.each do |cmd| @@ -126,103 +113,43 @@ def server_responding?(uri) FileUtils.mkdir_p(OUTDIR) -# Configure tool-specific arguments -if RATE == "max" - if CONNECTIONS != MAX_CONNECTIONS - raise "For RATE=max, CONNECTIONS must be equal to MAX_CONNECTIONS (got #{CONNECTIONS} and #{MAX_CONNECTIONS})" - end - - fortio_args = ["-qps", 0, "-c", CONNECTIONS] - vegeta_args = ["-rate=infinity", "--workers=#{CONNECTIONS}", "--max-workers=#{CONNECTIONS}"] - k6_scenarios = <<~JS.strip - { - max_rate: { - executor: 'constant-vus', - vus: #{CONNECTIONS}, - duration: '#{DURATION}' - } - } - JS -else - fortio_args = ["-qps", RATE, "-uniform", "-nocatchup", "-c", CONNECTIONS] - vegeta_args = ["-rate=#{RATE}", "--workers=#{CONNECTIONS}", "--max-workers=#{MAX_CONNECTIONS}"] - k6_scenarios = <<~JS.strip - { - constant_rate: { - executor: 'constant-arrival-rate', - rate: #{RATE}, - timeUnit: '1s', - duration: '#{DURATION}', - preAllocatedVUs: #{CONNECTIONS}, - maxVUs: #{MAX_CONNECTIONS} - } - } - JS -end - -# Run Fortio -if run_fortio - puts "===> Fortio" - # TODO: https://github.com/fortio/fortio/wiki/FAQ#i-want-to-get-the-best-results-what-flags-should-i-pass - fortio_cmd = [ - "fortio", "load", - *fortio_args, - "-t", DURATION, - "-timeout", REQUEST_TIMEOUT, - "-json", FORTIO_JSON, - TARGET - ].join(" ") - raise "Fortio benchmark failed" unless system("#{fortio_cmd} | tee #{FORTIO_TXT}") +# Validate RATE=max constraint +is_max_rate = RATE == "max" +if is_max_rate && CONNECTIONS != MAX_CONNECTIONS + raise "For RATE=max, CONNECTIONS must be equal to MAX_CONNECTIONS (got #{CONNECTIONS} and #{MAX_CONNECTIONS})" end -# Run Vegeta -if run_vegeta - puts "\n===> Vegeta" - vegeta_cmd = [ - "echo", "'GET #{TARGET}'", "|", - "vegeta", "attack", - *vegeta_args, - "-duration=#{DURATION}", - "-timeout=#{REQUEST_TIMEOUT}" - ].join(" ") - raise "Vegeta attack failed" unless system("#{vegeta_cmd} | tee #{VEGETA_BIN} | vegeta report | tee #{VEGETA_TXT}") - raise "Vegeta report generation failed" unless system("vegeta report -type=json #{VEGETA_BIN} > #{VEGETA_JSON}") -end - -# Run k6 -if run_k6 - puts "\n===> k6" - k6_script = <<~JS - import http from 'k6/http'; - import { check } from 'k6'; - - export const options = { - scenarios: #{k6_scenarios}, - }; - - export default function () { - const response = http.get('#{TARGET}', { timeout: '#{REQUEST_TIMEOUT}' }); - check(response, { - 'status=200': r => r.status === 200, - // you can add more if needed: - // 'status=500': r => r.status === 500, - }); - } - JS - File.write(K6_TEST_JS, k6_script) - k6_command = "k6 run --summary-export=#{K6_SUMMARY_JSON} --summary-trend-stats 'min,avg,med,max,p(90),p(99)'" - raise "k6 benchmark failed" unless system("#{k6_command} #{K6_TEST_JS} | tee #{K6_TXT}") -end - -puts "\n===> Parsing results and generating summary" - # Initialize summary file File.write(SUMMARY_TXT, "Tool\tRPS\tp50(ms)\tp90(ms)\tp99(ms)\tStatus\n") -# Parse Fortio results -if run_fortio +# Fortio +if TOOLS.include?("fortio") begin - fortio_data = parse_json_file(FORTIO_JSON, "Fortio") + puts "===> Fortio" + + fortio_json = "#{OUTDIR}/fortio.json" + fortio_txt = "#{OUTDIR}/fortio.txt" + + # Configure Fortio arguments + # See https://github.com/fortio/fortio/wiki/FAQ#i-want-to-get-the-best-results-what-flags-should-i-pass + fortio_args = + if is_max_rate + ["-qps", 0, "-c", CONNECTIONS] + else + ["-qps", RATE, "-uniform", "-nocatchup", "-c", CONNECTIONS] + end + + fortio_cmd = [ + "fortio", "load", + *fortio_args, + "-t", DURATION, + "-timeout", REQUEST_TIMEOUT, + "-json", fortio_json, + TARGET + ].join(" ") + raise "Fortio benchmark failed" unless system("#{fortio_cmd} | tee #{fortio_txt}") + + fortio_data = parse_json_file(fortio_json, "Fortio") fortio_rps = fortio_data["ActualQPS"]&.round(2) || "missing" percentiles = fortio_data.dig("DurationHistogram", "Percentiles") || [] @@ -247,10 +174,34 @@ def server_responding?(uri) end end -# Parse Vegeta results -if run_vegeta +# Vegeta +if TOOLS.include?("vegeta") begin - vegeta_data = parse_json_file(VEGETA_JSON, "Vegeta") + puts "\n===> Vegeta" + + vegeta_bin = "#{OUTDIR}/vegeta.bin" + vegeta_json = "#{OUTDIR}/vegeta.json" + vegeta_txt = "#{OUTDIR}/vegeta.txt" + + # Configure Vegeta arguments + vegeta_args = + if is_max_rate + ["-rate=0", "--workers=#{CONNECTIONS}", "--max-workers=#{CONNECTIONS}"] + else + ["-rate=#{RATE}", "--workers=#{CONNECTIONS}", "--max-workers=#{MAX_CONNECTIONS}"] + end + + vegeta_cmd = [ + "echo 'GET #{TARGET}' |", + "vegeta", "attack", + *vegeta_args, + "-duration=#{DURATION}", + "-timeout=#{REQUEST_TIMEOUT}" + ].join(" ") + raise "Vegeta attack failed" unless system("#{vegeta_cmd} | tee #{vegeta_bin} | vegeta report | tee #{vegeta_txt}") + raise "Vegeta report generation failed" unless system("vegeta report -type=json #{vegeta_bin} > #{vegeta_json}") + + vegeta_data = parse_json_file(vegeta_json, "Vegeta") # .throughput is successful_reqs/total_period, .rate is all_requests/attack_period vegeta_rps = vegeta_data["throughput"]&.round(2) || "missing" vegeta_p50 = vegeta_data.dig("latencies", "50th")&./(1_000_000.0)&.round(2) || "missing" @@ -271,10 +222,64 @@ def server_responding?(uri) end end -# Parse k6 results -if run_k6 +# k6 +if TOOLS.include?("k6") begin - k6_data = parse_json_file(K6_SUMMARY_JSON, "k6") + puts "\n===> k6" + + k6_script_file = "#{OUTDIR}/k6_test.js" + k6_summary_json = "#{OUTDIR}/k6_summary.json" + k6_txt = "#{OUTDIR}/k6.txt" + + # Configure k6 scenarios + k6_scenarios = + if is_max_rate + <<~JS.strip + { + max_rate: { + executor: 'constant-vus', + vus: #{CONNECTIONS}, + duration: '#{DURATION}' + } + } + JS + else + <<~JS.strip + { + constant_rate: { + executor: 'constant-arrival-rate', + rate: #{RATE}, + timeUnit: '1s', + duration: '#{DURATION}', + preAllocatedVUs: #{CONNECTIONS}, + maxVUs: #{MAX_CONNECTIONS} + } + } + JS + end + + k6_script = <<~JS + import http from 'k6/http'; + import { check } from 'k6'; + + export const options = { + scenarios: #{k6_scenarios}, + }; + + export default function () { + const response = http.get('#{TARGET}', { timeout: '#{REQUEST_TIMEOUT}' }); + check(response, { + 'status=200': r => r.status === 200, + // you can add more if needed: + // 'status=500': r => r.status === 500, + }); + } + JS + File.write(k6_script_file, k6_script) + k6_command = "k6 run --summary-export=#{k6_summary_json} --summary-trend-stats 'min,avg,med,max,p(90),p(99)'" + raise "k6 benchmark failed" unless system("#{k6_command} #{k6_script_file} | tee #{k6_txt}") + + k6_data = parse_json_file(k6_summary_json, "k6") k6_rps = k6_data.dig("metrics", "iterations", "rate")&.round(2) || "missing" k6_p50 = k6_data.dig("metrics", "http_req_duration", "med")&.round(2) || "missing" k6_p90 = k6_data.dig("metrics", "http_req_duration", "p(90)")&.round(2) || "missing" From 6446084c6b82cac89fc748e33b086d36152b99a2 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Fri, 7 Nov 2025 19:54:44 +0000 Subject: [PATCH 19/65] Remove duplication in adding summaries --- spec/performance/bench.rb | 58 +++++++++++++++++++++------------------ 1 file changed, 31 insertions(+), 27 deletions(-) diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb index a6f31292aa..0bfb160966 100755 --- a/spec/performance/bench.rb +++ b/spec/performance/bench.rb @@ -57,6 +57,16 @@ def parse_json_file(file_path, tool_name) raise "Failed to read #{tool_name} results: #{e.message}" end +def failure_metrics(error) + ["FAILED", "FAILED", "FAILED", "FAILED", error.message] +end + +def add_summary_line(*parts) + File.open(SUMMARY_TXT, "a") do |f| + f.puts parts.join("\t") + end +end + validate_rate(RATE) validate_positive_integer(CONNECTIONS, "CONNECTIONS") validate_positive_integer(MAX_CONNECTIONS, "MAX_CONNECTIONS") @@ -120,11 +130,12 @@ def server_responding?(uri) end # Initialize summary file -File.write(SUMMARY_TXT, "Tool\tRPS\tp50(ms)\tp90(ms)\tp99(ms)\tStatus\n") +File.write(SUMMARY_TXT, "") +add_summary_line("Tool", "RPS", "p50(ms)", "p90(ms)", "p99(ms)", "Status") # Fortio if TOOLS.include?("fortio") - begin + fortio_metrics = begin puts "===> Fortio" fortio_json = "#{OUTDIR}/fortio.json" @@ -162,21 +173,20 @@ def server_responding?(uri) fortio_p50 = (p50_data["Value"] * 1000).round(2) fortio_p90 = (p90_data["Value"] * 1000).round(2) fortio_p99 = (p99_data["Value"] * 1000).round(2) - fortio_status = fortio_data["RetCodes"]&.map { |k, v| "#{k}=#{v}" }&.join(",") || "unknown" - File.open(SUMMARY_TXT, "a") do |f| - f.puts "Fortio\t#{fortio_rps}\t#{fortio_p50}\t#{fortio_p90}\t#{fortio_p99}\t#{fortio_status}" - end + fortio_status = fortio_data["RetCodes"]&.map { |k, v| "#{k}=#{v}" }&.join(",") || "missing" + + [fortio_rps, fortio_p50, fortio_p90, fortio_p99, fortio_status] rescue StandardError => e puts "Error: #{e.message}" - File.open(SUMMARY_TXT, "a") do |f| - f.puts "Fortio\tFAILED\tFAILED\tFAILED\tFAILED\t#{e.message}" - end + failure_metrics(e) end + + add_summary_line("Fortio", *fortio_metrics) end # Vegeta if TOOLS.include?("vegeta") - begin + vegeta_metrics = begin puts "\n===> Vegeta" vegeta_bin = "#{OUTDIR}/vegeta.bin" @@ -207,24 +217,20 @@ def server_responding?(uri) vegeta_p50 = vegeta_data.dig("latencies", "50th")&./(1_000_000.0)&.round(2) || "missing" vegeta_p90 = vegeta_data.dig("latencies", "90th")&./(1_000_000.0)&.round(2) || "missing" vegeta_p99 = vegeta_data.dig("latencies", "99th")&./(1_000_000.0)&.round(2) || "missing" - vegeta_status = vegeta_data["status_codes"]&.map { |k, v| "#{k}=#{v}" }&.join(",") || "unknown" - vegeta_line = [ - "Vegeta", vegeta_rps, vegeta_p50, vegeta_p90, vegeta_p99, vegeta_status - ].join("\t") - File.open(SUMMARY_TXT, "a") do |f| - f.puts vegeta_line - end + vegeta_status = vegeta_data["status_codes"]&.map { |k, v| "#{k}=#{v}" }&.join(",") || "missing" + + [vegeta_rps, vegeta_p50, vegeta_p90, vegeta_p99, vegeta_status] rescue StandardError => e puts "Error: #{e.message}" - File.open(SUMMARY_TXT, "a") do |f| - f.puts "Vegeta\tFAILED\tFAILED\tFAILED\tFAILED\t#{e.message}" - end + failure_metrics(e) end + + add_summary_line("Vegeta", *vegeta_metrics) end # k6 if TOOLS.include?("k6") - begin + k6_metrics = begin puts "\n===> k6" k6_script_file = "#{OUTDIR}/k6_test.js" @@ -299,15 +305,13 @@ def server_responding?(uri) k6_status_parts << "other=#{k6_reqs_other}" if k6_reqs_other.positive? k6_status = k6_status_parts.empty? ? "missing" : k6_status_parts.join(",") - File.open(SUMMARY_TXT, "a") do |f| - f.puts "k6\t#{k6_rps}\t#{k6_p50}\t#{k6_p90}\t#{k6_p99}\t#{k6_status}" - end + [k6_rps, k6_p50, k6_p90, k6_p99, k6_status] rescue StandardError => e puts "Error: #{e.message}" - File.open(SUMMARY_TXT, "a") do |f| - f.puts "k6\tFAILED\tFAILED\tFAILED\tFAILED\t#{e.message}" - end + failure_metrics(e) end + + add_summary_line("k6", *k6_metrics) end puts "\nSummary saved to #{SUMMARY_TXT}" From 4f305453fa7326b3769861a57bd92dedee811480 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Sat, 8 Nov 2025 12:19:03 +0000 Subject: [PATCH 20/65] Benchmark all routes --- spec/performance/bench.rb | 159 +++++++++++++++++++++++++------------- 1 file changed, 106 insertions(+), 53 deletions(-) diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb index 0bfb160966..78e9e951ef 100755 --- a/spec/performance/bench.rb +++ b/spec/performance/bench.rb @@ -1,15 +1,16 @@ #!/usr/bin/env ruby # frozen_string_literal: true +require "English" require "json" require "fileutils" require "net/http" require "uri" # Benchmark parameters +PRO = ENV.fetch("PRO", "false") == "true" +APP_DIR = PRO ? "react_on_rails_pro/spec/dummy" : "spec/dummy" BASE_URL = ENV.fetch("BASE_URL", "localhost:3001") -ROUTE = ENV.fetch("ROUTE", "server_side_hello_world_hooks") -TARGET = URI.parse("http://#{BASE_URL}/#{ROUTE}") # requests per second; if "max" will get maximum number of queries instead of a fixed rate RATE = ENV.fetch("RATE", "50") # concurrent connections/virtual users @@ -67,6 +68,30 @@ def add_summary_line(*parts) end end +# Get routes from the Rails app filtered by pages# and react_router# controllers +def get_benchmark_routes(app_dir) + routes_output = `cd #{app_dir} && bundle exec rails routes 2>&1` + raise "Failed to get routes from #{app_dir}" unless $CHILD_STATUS.success? + + routes = [] + routes_output.each_line do |line| + # Parse lines like: "server_side_hello_world GET /server_side_hello_world(.:format) pages#server_side_hello_world" + # We want GET routes only (not POST, etc.) served by pages# or react_router# controllers + # Capture path up to (.:format) part using [^(\s]+ (everything except '(' and whitespace) + next unless (match = line.match(/GET\s+([^(\s]+).*(pages|react_router)#/)) + + path = match[1] + path = "/" if path.empty? # Handle root route + routes << path + end + raise "No pages# or react_router# routes found in #{app_dir}" if routes.empty? + + routes +end + +# Get all routes to benchmark +routes = get_benchmark_routes(APP_DIR) + validate_rate(RATE) validate_positive_integer(CONNECTIONS, "CONNECTIONS") validate_positive_integer(MAX_CONNECTIONS, "MAX_CONNECTIONS") @@ -83,6 +108,8 @@ def add_summary_line(*parts) puts <<~PARAMS Benchmark parameters: + - APP_DIR: #{APP_DIR} + - BASE_URL: #{BASE_URL} - RATE: #{RATE} - DURATION: #{DURATION} - REQUEST_TIMEOUT: #{REQUEST_TIMEOUT} @@ -104,47 +131,42 @@ def server_responding?(uri) # Wait for the server to be ready TIMEOUT_SEC = 60 +puts "Checking server availability at #{BASE_URL}..." +test_uri = URI.parse("http://#{BASE_URL}#{routes.first}") start_time = Time.now loop do - break if server_responding?(TARGET) + break if server_responding?(test_uri) - raise "Target #{TARGET} not responding within #{TIMEOUT_SEC}s" if Time.now - start_time > TIMEOUT_SEC + raise "Server at #{BASE_URL} not responding within #{TIMEOUT_SEC}s" if Time.now - start_time > TIMEOUT_SEC sleep 1 end - -# Warm up server -puts "Warming up server with 10 requests..." -10.times do - server_responding?(TARGET) - sleep 0.5 -end -puts "Warm-up complete" +puts "Server is ready!" FileUtils.mkdir_p(OUTDIR) # Validate RATE=max constraint -is_max_rate = RATE == "max" -if is_max_rate && CONNECTIONS != MAX_CONNECTIONS +IS_MAX_RATE = RATE == "max" +if IS_MAX_RATE && CONNECTIONS != MAX_CONNECTIONS raise "For RATE=max, CONNECTIONS must be equal to MAX_CONNECTIONS (got #{CONNECTIONS} and #{MAX_CONNECTIONS})" end -# Initialize summary file -File.write(SUMMARY_TXT, "") -add_summary_line("Tool", "RPS", "p50(ms)", "p90(ms)", "p99(ms)", "Status") +# rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity, Metrics/MethodLength -# Fortio -if TOOLS.include?("fortio") - fortio_metrics = begin - puts "===> Fortio" +# Benchmark a single route with Fortio +def run_fortio_benchmark(target, route_name) + return nil unless TOOLS.include?("fortio") - fortio_json = "#{OUTDIR}/fortio.json" - fortio_txt = "#{OUTDIR}/fortio.txt" + begin + puts "===> Fortio: #{route_name}" + + fortio_json = "#{OUTDIR}/#{route_name}_fortio.json" + fortio_txt = "#{OUTDIR}/#{route_name}_fortio.txt" # Configure Fortio arguments # See https://github.com/fortio/fortio/wiki/FAQ#i-want-to-get-the-best-results-what-flags-should-i-pass fortio_args = - if is_max_rate + if IS_MAX_RATE ["-qps", 0, "-c", CONNECTIONS] else ["-qps", RATE, "-uniform", "-nocatchup", "-c", CONNECTIONS] @@ -156,7 +178,7 @@ def server_responding?(uri) "-t", DURATION, "-timeout", REQUEST_TIMEOUT, "-json", fortio_json, - TARGET + target ].join(" ") raise "Fortio benchmark failed" unless system("#{fortio_cmd} | tee #{fortio_txt}") @@ -180,29 +202,29 @@ def server_responding?(uri) puts "Error: #{e.message}" failure_metrics(e) end - - add_summary_line("Fortio", *fortio_metrics) end -# Vegeta -if TOOLS.include?("vegeta") - vegeta_metrics = begin - puts "\n===> Vegeta" +# Benchmark a single route with Vegeta +def run_vegeta_benchmark(target, route_name) + return nil unless TOOLS.include?("vegeta") + + begin + puts "\n===> Vegeta: #{route_name}" - vegeta_bin = "#{OUTDIR}/vegeta.bin" - vegeta_json = "#{OUTDIR}/vegeta.json" - vegeta_txt = "#{OUTDIR}/vegeta.txt" + vegeta_bin = "#{OUTDIR}/#{route_name}_vegeta.bin" + vegeta_json = "#{OUTDIR}/#{route_name}_vegeta.json" + vegeta_txt = "#{OUTDIR}/#{route_name}_vegeta.txt" # Configure Vegeta arguments vegeta_args = - if is_max_rate + if IS_MAX_RATE ["-rate=0", "--workers=#{CONNECTIONS}", "--max-workers=#{CONNECTIONS}"] else ["-rate=#{RATE}", "--workers=#{CONNECTIONS}", "--max-workers=#{MAX_CONNECTIONS}"] end vegeta_cmd = [ - "echo 'GET #{TARGET}' |", + "echo 'GET #{target}' |", "vegeta", "attack", *vegeta_args, "-duration=#{DURATION}", @@ -212,7 +234,6 @@ def server_responding?(uri) raise "Vegeta report generation failed" unless system("vegeta report -type=json #{vegeta_bin} > #{vegeta_json}") vegeta_data = parse_json_file(vegeta_json, "Vegeta") - # .throughput is successful_reqs/total_period, .rate is all_requests/attack_period vegeta_rps = vegeta_data["throughput"]&.round(2) || "missing" vegeta_p50 = vegeta_data.dig("latencies", "50th")&./(1_000_000.0)&.round(2) || "missing" vegeta_p90 = vegeta_data.dig("latencies", "90th")&./(1_000_000.0)&.round(2) || "missing" @@ -224,22 +245,22 @@ def server_responding?(uri) puts "Error: #{e.message}" failure_metrics(e) end - - add_summary_line("Vegeta", *vegeta_metrics) end -# k6 -if TOOLS.include?("k6") - k6_metrics = begin - puts "\n===> k6" +# Benchmark a single route with k6 +def run_k6_benchmark(target, route_name) + return nil unless TOOLS.include?("k6") + + begin + puts "\n===> k6: #{route_name}" - k6_script_file = "#{OUTDIR}/k6_test.js" - k6_summary_json = "#{OUTDIR}/k6_summary.json" - k6_txt = "#{OUTDIR}/k6.txt" + k6_script_file = "#{OUTDIR}/#{route_name}_k6_test.js" + k6_summary_json = "#{OUTDIR}/#{route_name}_k6_summary.json" + k6_txt = "#{OUTDIR}/#{route_name}_k6.txt" # Configure k6 scenarios k6_scenarios = - if is_max_rate + if IS_MAX_RATE <<~JS.strip { max_rate: { @@ -273,11 +294,9 @@ def server_responding?(uri) }; export default function () { - const response = http.get('#{TARGET}', { timeout: '#{REQUEST_TIMEOUT}' }); + const response = http.get('#{target}', { timeout: '#{REQUEST_TIMEOUT}' }); check(response, { 'status=200': r => r.status === 200, - // you can add more if needed: - // 'status=500': r => r.status === 500, }); } JS @@ -294,8 +313,6 @@ def server_responding?(uri) # Status: compute successful vs failed requests k6_reqs_total = k6_data.dig("metrics", "http_reqs", "count") || 0 k6_checks = k6_data.dig("root_group", "checks") || {} - # Extract status code from check name (e.g., "status=200" -> "200") - # Handle both "status=XXX" format and other potential formats k6_status_parts = k6_checks.map do |name, check| status_label = name.start_with?("status=") ? name.delete_prefix("status=") : name "#{status_label}=#{check['passes']}" @@ -310,8 +327,44 @@ def server_responding?(uri) puts "Error: #{e.message}" failure_metrics(e) end +end + +# rubocop:enable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity, Metrics/MethodLength + +# Initialize summary file +File.write(SUMMARY_TXT, "") +add_summary_line("Route", "Tool", "RPS", "p50(ms)", "p90(ms)", "p99(ms)", "Status") + +# Run benchmarks for each route +routes.each do |route| + separator = "=" * 80 + puts "\n#{separator}" + puts "Benchmarking route: #{route}" + puts separator + + target = URI.parse("http://#{BASE_URL}#{route}") + + # Warm up server for this route + puts "Warming up server for #{route} with 10 requests..." + 10.times do + server_responding?(target) + sleep 0.5 + end + puts "Warm-up complete for #{route}" + + # Sanitize route name for filenames + route_name = route.gsub(%r{^/}, "").tr("/", "_") + route_name = "root" if route_name.empty? + + # Run each benchmark tool + fortio_metrics = run_fortio_benchmark(target, route_name) + add_summary_line(route, "Fortio", *fortio_metrics) if fortio_metrics + + vegeta_metrics = run_vegeta_benchmark(target, route_name) + add_summary_line(route, "Vegeta", *vegeta_metrics) if vegeta_metrics - add_summary_line("k6", *k6_metrics) + k6_metrics = run_k6_benchmark(target, route_name) + add_summary_line(route, "k6", *k6_metrics) if k6_metrics end puts "\nSummary saved to #{SUMMARY_TXT}" From dc2c0cda124bf429fc3126640f2406b235f20df9 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Sat, 8 Nov 2025 12:26:49 +0000 Subject: [PATCH 21/65] Fix Fortio failure on server_side_log_throw_raise --- spec/performance/bench.rb | 2 ++ 1 file changed, 2 insertions(+) diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb index 78e9e951ef..87bf68960b 100755 --- a/spec/performance/bench.rb +++ b/spec/performance/bench.rb @@ -177,6 +177,8 @@ def run_fortio_benchmark(target, route_name) *fortio_args, "-t", DURATION, "-timeout", REQUEST_TIMEOUT, + # Allow redirects. Could use -L instead, but it uses the slower HTTP client. + "-allow-initial-errors", "-json", fortio_json, target ].join(" ") From 0fbb2804b5df1435cbfd1eef73ea0a704f7a17f8 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Sat, 8 Nov 2025 12:48:49 +0000 Subject: [PATCH 22/65] Allow specifying routes --- .github/workflows/benchmark.yml | 5 +++++ spec/performance/bench.rb | 9 ++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index a16b74cccc..695ef7ba3b 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -9,6 +9,10 @@ on: required: false default: false type: boolean + routes: + description: 'Comma-separated routes to benchmark (e.g., "/,/hello"). Leave empty to auto-detect from Rails.' + required: false + type: string rate: description: 'Requests per second (use "max" for maximum throughput)' required: false @@ -69,6 +73,7 @@ env: K6_VERSION: "1.4.2" VEGETA_VERSION: "12.13.0" # Benchmark parameters + ROUTES: ${{ github.event.inputs.routes }} RATE: ${{ github.event.inputs.rate || 'max' }} DURATION: ${{ github.event.inputs.duration || '30s' }} REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout || '60s' }} diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb index 87bf68960b..6b57d7cb46 100755 --- a/spec/performance/bench.rb +++ b/spec/performance/bench.rb @@ -10,6 +10,7 @@ # Benchmark parameters PRO = ENV.fetch("PRO", "false") == "true" APP_DIR = PRO ? "react_on_rails_pro/spec/dummy" : "spec/dummy" +ROUTES = ENV.fetch("ROUTES", nil) BASE_URL = ENV.fetch("BASE_URL", "localhost:3001") # requests per second; if "max" will get maximum number of queries instead of a fixed rate RATE = ENV.fetch("RATE", "50") @@ -90,7 +91,12 @@ def get_benchmark_routes(app_dir) end # Get all routes to benchmark -routes = get_benchmark_routes(APP_DIR) +routes = + if ROUTES + ROUTES.split(",").map(&:strip) + else + get_benchmark_routes(APP_DIR) + end validate_rate(RATE) validate_positive_integer(CONNECTIONS, "CONNECTIONS") @@ -109,6 +115,7 @@ def get_benchmark_routes(app_dir) puts <<~PARAMS Benchmark parameters: - APP_DIR: #{APP_DIR} + - ROUTES: #{ROUTES || 'auto-detect from Rails'} - BASE_URL: #{BASE_URL} - RATE: #{RATE} - DURATION: #{DURATION} From c2085722339bda15b6d613dbb9e4b1cedfa75119 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Sat, 8 Nov 2025 14:10:20 +0000 Subject: [PATCH 23/65] Add pro benchmarks --- .github/workflows/benchmark.yml | 227 +++++++++++++++--- react_on_rails_pro/spec/dummy/Procfile.prod | 6 + react_on_rails_pro/spec/dummy/bin/prod | 40 +++ react_on_rails_pro/spec/dummy/bin/prod-assets | 8 + .../dummy/config/environments/production.rb | 3 + 5 files changed, 248 insertions(+), 36 deletions(-) create mode 100644 react_on_rails_pro/spec/dummy/Procfile.prod create mode 100755 react_on_rails_pro/spec/dummy/bin/prod create mode 100755 react_on_rails_pro/spec/dummy/bin/prod-assets diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 695ef7ba3b..11cf5381db 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -29,33 +29,34 @@ on: default: '60s' type: string connections: - description: 'Concurrent connections/virtual users' + description: 'Concurrent connections/virtual users (also used as max)' required: false default: 10 type: number - max_connections: - description: 'Maximum connections/virtual users' - required: false - type: number web_concurrency: description: 'Number of Puma worker processes' required: false default: 4 type: number - rails_max_threads: - description: 'Maximum number of Puma threads' + rails_threads: + description: 'Number of Puma threads (min and max will be same)' required: false default: 3 type: number - rails_min_threads: - description: 'Minimum number of Puma threads (same as maximum if not set)' - required: false - type: number tools: description: 'Comma-separated list of tools to run' required: false default: 'fortio,vegeta,k6' type: string + app_version: + description: 'Which app version to benchmark' + required: false + default: 'both' + type: choice + options: + - 'both' + - 'core_only' + - 'pro_only' push: branches: - master @@ -78,15 +79,17 @@ env: DURATION: ${{ github.event.inputs.duration || '30s' }} REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout || '60s' }} CONNECTIONS: ${{ github.event.inputs.connections || 10 }} - MAX_CONNECTIONS: ${{ github.event.inputs.max_connections || github.event.inputs.connections || 10 }} + MAX_CONNECTIONS: ${{ github.event.inputs.connections || 10 }} WEB_CONCURRENCY: ${{ github.event.inputs.web_concurrency || 4 }} - RAILS_MAX_THREADS: ${{ github.event.inputs.rails_max_threads || 3 }} - RAILS_MIN_THREADS: ${{ github.event.inputs.rails_min_threads || github.event.inputs.rails_max_threads || 3 }} + RAILS_MAX_THREADS: ${{ github.event.inputs.rails_threads || 3 }} + RAILS_MIN_THREADS: ${{ github.event.inputs.rails_threads || 3 }} TOOLS: ${{ github.event.inputs.tools || 'fortio,vegeta,k6' }} jobs: benchmark: runs-on: ubuntu-latest + env: + REACT_ON_RAILS_PRO_LICENSE: ${{ secrets.REACT_ON_RAILS_PRO_LICENSE }} steps: # ============================================ @@ -183,8 +186,8 @@ jobs: - name: Setup Ruby uses: ruby/setup-ruby@v1 with: - ruby-version: '3.4' - bundler: 2.5.9 + ruby-version: ${{ env.RUBY_VERSION }} + bundler: ${{ env.BUNDLER_VERSION }} - name: Get gem home directory run: echo "GEM_HOME_PATH=$(gem env home)" >> $GITHUB_ENV @@ -229,29 +232,34 @@ jobs: run: cd packages/react-on-rails && yarn install --no-progress --no-emoji --frozen-lockfile && yalc publish - name: yalc add react-on-rails + if: github.event.inputs.app_version != 'pro_only' run: cd spec/dummy && yalc add react-on-rails - name: Install Node modules with Yarn for dummy app + if: github.event.inputs.app_version != 'pro_only' run: cd spec/dummy && yarn install --no-progress --no-emoji - name: Save dummy app ruby gems to cache + if: github.event.inputs.app_version != 'pro_only' uses: actions/cache@v4 with: path: spec/dummy/vendor/bundle key: dummy-app-gem-cache-${{ hashFiles('spec/dummy/Gemfile.lock') }} - name: Install Ruby Gems for dummy app + if: github.event.inputs.app_version != 'pro_only' run: | cd spec/dummy bundle lock --add-platform 'x86_64-linux' if ! bundle check --path=vendor/bundle; then - bundle _2.5.9_ install --path=vendor/bundle --jobs=4 --retry=3 + bundle _2.5.4_ install --path=vendor/bundle --jobs=4 --retry=3 fi - name: generate file system-based packs run: cd spec/dummy && RAILS_ENV="production" bundle exec rake react_on_rails:generate_packs - name: Prepare production assets + if: github.event.inputs.app_version != 'pro_only' run: | set -e # Exit on any error echo "🔨 Building production assets..." @@ -265,6 +273,7 @@ jobs: echo "✅ Production assets built successfully" - name: Start production server + if: github.event.inputs.app_version != 'pro_only' run: | set -e # Exit on any error echo "🚀 Starting production server..." @@ -289,14 +298,15 @@ jobs: exit 1 # ============================================ - # STEP 5: RUN BENCHMARKS + # STEP 5: RUN CORE BENCHMARKS # ============================================ - - name: Execute benchmark suite - timeout-minutes: 20 + - name: Execute Core benchmark suite + if: github.event.inputs.app_version != 'pro_only' + timeout-minutes: 120 run: | set -e # Exit on any error - echo "🏃 Running benchmark suite..." + echo "🏃 Running Core benchmark suite..." if ! ruby spec/performance/bench.rb; then echo "❌ ERROR: Benchmark execution failed" @@ -305,7 +315,8 @@ jobs: echo "✅ Benchmark suite completed successfully" - - name: Validate benchmark results + - name: Validate Core benchmark results + if: github.event.inputs.app_version != 'pro_only' run: | set -e # Exit on any error echo "🔍 Validating benchmark output files..." @@ -343,39 +354,183 @@ jobs: echo "Continuing with available results..." fi - # ============================================ - # STEP 6: COLLECT BENCHMARK RESULTS - # ============================================ - - - name: Upload benchmark results + - name: Upload Core benchmark results uses: actions/upload-artifact@v4 - if: always() # Upload even if benchmark fails + if: github.event.inputs.app_version != 'pro_only' && always() with: - name: benchmark-results-${{ github.run_number }} + name: benchmark-core-results-${{ github.run_number }} path: bench_results/ retention-days: 30 if-no-files-found: warn - - name: Verify artifact upload - if: success() + # ============================================ + # STEP 6: SETUP PRO APPLICATION SERVER + # ============================================ + - name: Cache Pro package node modules + if: github.event.inputs.app_version != 'core_only' + uses: actions/cache@v4 + with: + path: react_on_rails_pro/node_modules + key: v4-pro-package-node-modules-cache-${{ hashFiles('react_on_rails_pro/yarn.lock') }} + + - name: Cache Pro dummy app node modules + if: github.event.inputs.app_version != 'core_only' + uses: actions/cache@v4 + with: + path: react_on_rails_pro/spec/dummy/node_modules + key: v4-pro-dummy-app-node-modules-cache-${{ hashFiles('react_on_rails_pro/spec/dummy/yarn.lock') }} + + - name: Cache Pro dummy app Ruby gems + if: github.event.inputs.app_version != 'core_only' + uses: actions/cache@v4 + with: + path: react_on_rails_pro/spec/dummy/vendor/bundle + key: v4-pro-dummy-app-gem-cache-${{ hashFiles('react_on_rails_pro/spec/dummy/Gemfile.lock') }} + + - name: Install Node modules with Yarn for Pro package + if: github.event.inputs.app_version != 'core_only' + run: | + cd react_on_rails_pro + sudo yarn global add yalc + yarn install --frozen-lockfile --no-progress --no-emoji + + - name: Install Node modules with Yarn for Pro dummy app + if: github.event.inputs.app_version != 'core_only' + run: cd react_on_rails_pro/spec/dummy && yarn install --frozen-lockfile --no-progress --no-emoji + + - name: Cache Pro dummy app Ruby gems + if: env.RUN_PRO + uses: actions/cache@v4 + with: + path: react_on_rails_pro/spec/dummy/vendor/bundle + key: v4-pro-dummy-app-gem-cache-${{ hashFiles('react_on_rails_pro/spec/dummy/Gemfile.lock') }} + + - name: Install Ruby Gems for Pro dummy app + if: github.event.inputs.app_version != 'core_only' + run: | + cd react_on_rails_pro/spec/dummy + bundle lock --add-platform 'x86_64-linux' + bundle config set path vendor/bundle + bundle _2.5.4_ check || bundle _2.5.4_ install --jobs=4 --retry=3 + + - name: Generate file-system based entrypoints for Pro + if: github.event.inputs.app_version != 'core_only' + run: cd react_on_rails_pro/spec/dummy && bundle exec rake react_on_rails:generate_packs + + - name: Prepare Pro production assets + if: github.event.inputs.app_version != 'core_only' run: | - echo "✅ Benchmark results uploaded as workflow artifacts" - echo "📦 Artifact name: benchmark-results-${{ github.run_number }}" - echo "🔗 Access artifacts from the Actions tab in GitHub" + set -e + echo "🔨 Building Pro production assets..." + cd react_on_rails_pro/spec/dummy + + if ! bin/prod-assets; then + echo "❌ ERROR: Failed to build production assets" + exit 1 + fi + + echo "✅ Production assets built successfully" + + - name: Start Pro production server + if: github.event.inputs.app_version != 'core_only' + run: | + set -e + echo "🚀 Starting Pro production server..." + cd react_on_rails_pro/spec/dummy + + # Start server in background + bin/prod & + echo "Server started in background" + + # Wait for server to be ready (max 30 seconds) + echo "⏳ Waiting for server to be ready..." + for i in {1..30}; do + if curl -fsS http://localhost:3001 > /dev/null; then + echo "✅ Server is ready and responding" + exit 0 + fi + echo " Attempt $i/30: Server not ready yet..." + sleep 1 + done + + echo "❌ ERROR: Server failed to start within 30 seconds" + exit 1 # ============================================ - # WORKFLOW COMPLETION + # STEP 7: RUN PRO BENCHMARKS # ============================================ + - name: Execute Pro benchmark suite + if: github.event.inputs.app_version != 'core_only' + timeout-minutes: 120 + run: | + set -e + echo "🏃 Running Pro benchmark suite..." + + if ! PRO=true ruby spec/performance/bench.rb; then + echo "❌ ERROR: Benchmark execution failed" + exit 1 + fi + + echo "✅ Benchmark suite completed successfully" + + - name: Validate Pro benchmark results + if: github.event.inputs.app_version != 'core_only' + run: | + set -e + echo "🔍 Validating Pro benchmark output files..." + + RESULTS_DIR="bench_results" + REQUIRED_FILES=("summary.txt") + MISSING_FILES=() + + if [ ! -d "${RESULTS_DIR}" ]; then + echo "❌ ERROR: Benchmark results directory '${RESULTS_DIR}' not found" + exit 1 + fi + + echo "Generated files:" + ls -lh ${RESULTS_DIR}/ || true + echo "" + + for file in "${REQUIRED_FILES[@]}"; do + if [ ! -f "${RESULTS_DIR}/${file}" ]; then + MISSING_FILES+=("${file}") + fi + done + + if [ ${#MISSING_FILES[@]} -eq 0 ]; then + echo "✅ All required benchmark output files present" + echo "📊 Summary preview:" + head -20 ${RESULTS_DIR}/summary.txt || true + else + echo "⚠️ WARNING: Some required files are missing:" + printf ' - %s\n' "${MISSING_FILES[@]}" + echo "Continuing with available results..." + fi + + - name: Upload Pro benchmark results + uses: actions/upload-artifact@v4 + if: github.event.inputs.app_version != 'core_only' && always() + with: + name: benchmark-pro-results-${{ github.run_number }} + path: bench_results/ + retention-days: 30 + if-no-files-found: warn + + # ============================================ + # STEP 8: WORKFLOW COMPLETION + # ============================================ - name: Workflow summary if: always() run: | echo "📋 Benchmark Workflow Summary" - echo "==============================" + echo "====================================" echo "Status: ${{ job.status }}" echo "Run number: ${{ github.run_number }}" echo "Triggered by: ${{ github.actor }}" echo "Branch: ${{ github.ref_name }}" + echo "App version: ${{ github.event.inputs.app_version || 'both' }}" echo "" if [ "${{ job.status }}" == "success" ]; then echo "✅ All steps completed successfully" diff --git a/react_on_rails_pro/spec/dummy/Procfile.prod b/react_on_rails_pro/spec/dummy/Procfile.prod new file mode 100644 index 0000000000..d47e98ef15 --- /dev/null +++ b/react_on_rails_pro/spec/dummy/Procfile.prod @@ -0,0 +1,6 @@ +# Procfile for production mode (precompiled assets) + +rails: RAILS_ENV=production NODE_ENV=production bin/rails s -p 3001 + +# Start Node server for server rendering. +node-renderer: NODE_ENV=production RENDERER_LOG_LEVEL=error RENDERER_PORT=3800 node client/node-renderer.js diff --git a/react_on_rails_pro/spec/dummy/bin/prod b/react_on_rails_pro/spec/dummy/bin/prod new file mode 100755 index 0000000000..647a9c2d96 --- /dev/null +++ b/react_on_rails_pro/spec/dummy/bin/prod @@ -0,0 +1,40 @@ +#!/usr/bin/env bash + +# Run only after ./prod-assets + +# Check if assets are precompiled +MANIFEST="public/webpack/production/manifest.json" + +if [ ! -d "public/assets" ]; then + echo "ERROR: public/assets not found. Run ./bin/prod-assets first" + exit 1 +fi + +if [ ! -f "$MANIFEST" ]; then + echo "ERROR: $MANIFEST not found. Run ./bin/prod-assets first" + exit 1 +fi + +# Simple up-to-date check: warn if source files are newer than manifest.json +if find client config -type f \( -name "*.[jt]s" -o -name "*.[jt]sx" \) -newer "$MANIFEST" 2>/dev/null | grep -q .; then + echo "WARNING: client or config has changes newer than compiled assets" + echo "Consider running ./bin/prod-assets to rebuild" +fi + +if [ -f "yarn.lock" ] && [ "yarn.lock" -nt "$MANIFEST" ]; then + echo "WARNING: yarn.lock is newer than compiled assets" + echo "Consider running ./bin/prod-assets to rebuild" +fi + +export NODE_ENV=production +export RAILS_ENV=production + +if command -v overmind &> /dev/null; then + overmind start -f Procfile.prod +elif command -v foreman &> /dev/null; then + foreman start -f Procfile.prod +else + echo "Installing foreman..." + gem install foreman + foreman start -f Procfile.prod +fi diff --git a/react_on_rails_pro/spec/dummy/bin/prod-assets b/react_on_rails_pro/spec/dummy/bin/prod-assets new file mode 100755 index 0000000000..96be6c50e8 --- /dev/null +++ b/react_on_rails_pro/spec/dummy/bin/prod-assets @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +export NODE_ENV=production +export RAILS_ENV=production +if [ "$CI" = "true" ]; then + bundle exec bootsnap precompile --gemfile app/ lib/ config/ +fi +bundle exec rails assets:precompile diff --git a/react_on_rails_pro/spec/dummy/config/environments/production.rb b/react_on_rails_pro/spec/dummy/config/environments/production.rb index 519aa382d6..1c8daba3b2 100644 --- a/react_on_rails_pro/spec/dummy/config/environments/production.rb +++ b/react_on_rails_pro/spec/dummy/config/environments/production.rb @@ -3,6 +3,9 @@ Rails.application.configure do # Settings specified here will take precedence over those in config/application.rb. + # Use a hardcoded secret for this test/dummy app (not for real production use) + config.secret_key_base = ENV.fetch("SECRET_KEY_BASE", "dummy-secret-key-base-for-testing-only") + # Code is not reloaded between requests. config.cache_classes = true From 561a4b56f20e157e3bc597b429c54e13405ad07a Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Sat, 8 Nov 2025 14:11:11 +0000 Subject: [PATCH 24/65] Update Claude instructions --- CLAUDE.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CLAUDE.md b/CLAUDE.md index 1ebbea84ee..0f2460f780 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -157,6 +157,8 @@ Pre-commit hooks automatically run: - All linters: `rake lint` (runs ESLint and RuboCop) - ESLint only: `pnpm run lint` or `rake lint:eslint` - RuboCop only: `rake lint:rubocop` + - GitHub Action files (workflows, reusable actions, etc.): `actionlint` + - YAML files: `yamllint` (or validate the syntax with Ruby if it isn't installed). Do _not_ try to run RuboCop on `.yml` files. - **Code Formatting**: - Format code with Prettier: `rake autofix` - Check formatting without fixing: `pnpm run format.listDifferent` From 2035deea1f29912ecf0c8f8c745dbb592b241c22 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Tue, 11 Nov 2025 20:08:30 +0000 Subject: [PATCH 25/65] Update prod-assets to include generate_packs --- .github/workflows/benchmark.yml | 11 ++++------- react_on_rails_pro/spec/dummy/bin/prod-assets | 1 + 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 11cf5381db..ddef809d4f 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -89,7 +89,8 @@ jobs: benchmark: runs-on: ubuntu-latest env: - REACT_ON_RAILS_PRO_LICENSE: ${{ secrets.REACT_ON_RAILS_PRO_LICENSE }} + SECRET_KEY_BASE: 'dummy-secret-key-for-ci-testing-not-used-in-production' + REACT_ON_RAILS_PRO_LICENSE: ${{ secrets.REACT_ON_RAILS_PRO_LICENSE_V2 }} steps: # ============================================ @@ -251,12 +252,8 @@ jobs: run: | cd spec/dummy bundle lock --add-platform 'x86_64-linux' - if ! bundle check --path=vendor/bundle; then - bundle _2.5.4_ install --path=vendor/bundle --jobs=4 --retry=3 - fi - - - name: generate file system-based packs - run: cd spec/dummy && RAILS_ENV="production" bundle exec rake react_on_rails:generate_packs + bundle config set path vendor/bundle + bundle _2.5.4_ check || bundle _2.5.4_ install --jobs=4 --retry=3 - name: Prepare production assets if: github.event.inputs.app_version != 'pro_only' diff --git a/react_on_rails_pro/spec/dummy/bin/prod-assets b/react_on_rails_pro/spec/dummy/bin/prod-assets index 96be6c50e8..828b1e6ae8 100755 --- a/react_on_rails_pro/spec/dummy/bin/prod-assets +++ b/react_on_rails_pro/spec/dummy/bin/prod-assets @@ -5,4 +5,5 @@ export RAILS_ENV=production if [ "$CI" = "true" ]; then bundle exec bootsnap precompile --gemfile app/ lib/ config/ fi +bundle exec rails react_on_rails:generate_packs bundle exec rails assets:precompile From 6c3c55b95c15ede3c0bab00ea37f97710a53c984 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Tue, 11 Nov 2025 20:34:07 +0000 Subject: [PATCH 26/65] Disable js_compressor and css_compressor --- react_on_rails_pro/Gemfile.development_dependencies | 1 - react_on_rails_pro/Gemfile.lock | 3 --- react_on_rails_pro/spec/dummy/Gemfile.lock | 3 --- .../spec/dummy/config/environments/production.rb | 5 +++-- 4 files changed, 3 insertions(+), 9 deletions(-) diff --git a/react_on_rails_pro/Gemfile.development_dependencies b/react_on_rails_pro/Gemfile.development_dependencies index 9c63e492d8..034cf5ea09 100644 --- a/react_on_rails_pro/Gemfile.development_dependencies +++ b/react_on_rails_pro/Gemfile.development_dependencies @@ -23,7 +23,6 @@ gem "pg" # Turbolinks makes following links in your web application faster. Read more: https://github.com/rails/turbolinks gem "turbolinks" gem "sqlite3", "~> 1.4" -gem "uglifier" gem "jquery-rails" gem "sprockets" gem "sass-rails" diff --git a/react_on_rails_pro/Gemfile.lock b/react_on_rails_pro/Gemfile.lock index 0872d940a7..4bcb787a23 100644 --- a/react_on_rails_pro/Gemfile.lock +++ b/react_on_rails_pro/Gemfile.lock @@ -458,8 +458,6 @@ GEM turbolinks-source (5.2.0) tzinfo (2.0.6) concurrent-ruby (~> 1.0) - uglifier (4.2.0) - execjs (>= 0.3.0, < 3) unicode-display_width (2.6.0) uri (1.0.3) useragent (0.16.11) @@ -538,7 +536,6 @@ DEPENDENCIES sprockets sqlite3 (~> 1.4) turbolinks - uglifier web-console webdrivers (= 5.3.0) webmock diff --git a/react_on_rails_pro/spec/dummy/Gemfile.lock b/react_on_rails_pro/spec/dummy/Gemfile.lock index f440bc32d4..a47b8f6147 100644 --- a/react_on_rails_pro/spec/dummy/Gemfile.lock +++ b/react_on_rails_pro/spec/dummy/Gemfile.lock @@ -487,8 +487,6 @@ GEM turbolinks-source (5.2.0) tzinfo (2.0.6) concurrent-ruby (~> 1.0) - uglifier (4.2.0) - execjs (>= 0.3.0, < 3) unicode-display_width (2.6.0) uri (1.0.3) useragent (0.16.11) @@ -580,7 +578,6 @@ DEPENDENCIES sprockets sqlite3 (~> 1.4) turbolinks - uglifier web-console webdrivers (= 5.3.0) webmock diff --git a/react_on_rails_pro/spec/dummy/config/environments/production.rb b/react_on_rails_pro/spec/dummy/config/environments/production.rb index 1c8daba3b2..330291b114 100644 --- a/react_on_rails_pro/spec/dummy/config/environments/production.rb +++ b/react_on_rails_pro/spec/dummy/config/environments/production.rb @@ -22,8 +22,9 @@ config.public_file_server.enabled = true # Compress JavaScripts and CSS. - config.assets.js_compressor = Uglifier.new(harmony: true) - config.assets.css_compressor = :csso + # JS/CSS compression handled by Webpack/Shakapacker, not needed for Sprockets + # config.assets.js_compressor = Uglifier.new(harmony: true) + # config.assets.css_compressor = :csso # Do not fallback to assets pipeline if a precompiled asset is missed. config.assets.compile = false From f93b2f0cef6c30535c74853342aac64f4e1e5b8d Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Tue, 11 Nov 2025 21:22:11 +0000 Subject: [PATCH 27/65] Handle empty inputs correctly --- .github/workflows/benchmark.yml | 12 ++++++------ spec/performance/bench.rb | 27 ++++++++++++++++++--------- 2 files changed, 24 insertions(+), 15 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index ddef809d4f..a4ee561756 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -73,17 +73,17 @@ env: FORTIO_VERSION: "1.73.0" K6_VERSION: "1.4.2" VEGETA_VERSION: "12.13.0" - # Benchmark parameters + # Benchmark parameters (defaults in bench.rb unless overridden here for CI) ROUTES: ${{ github.event.inputs.routes }} RATE: ${{ github.event.inputs.rate || 'max' }} - DURATION: ${{ github.event.inputs.duration || '30s' }} - REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout || '60s' }} - CONNECTIONS: ${{ github.event.inputs.connections || 10 }} - MAX_CONNECTIONS: ${{ github.event.inputs.connections || 10 }} + DURATION: ${{ github.event.inputs.duration }} + REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout }} + CONNECTIONS: ${{ github.event.inputs.connections }} + MAX_CONNECTIONS: ${{ github.event.inputs.connections }} WEB_CONCURRENCY: ${{ github.event.inputs.web_concurrency || 4 }} RAILS_MAX_THREADS: ${{ github.event.inputs.rails_threads || 3 }} RAILS_MIN_THREADS: ${{ github.event.inputs.rails_threads || 3 }} - TOOLS: ${{ github.event.inputs.tools || 'fortio,vegeta,k6' }} + TOOLS: ${{ github.event.inputs.tools }} jobs: benchmark: diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb index 6b57d7cb46..ffb3f714e8 100755 --- a/spec/performance/bench.rb +++ b/spec/performance/bench.rb @@ -7,23 +7,30 @@ require "net/http" require "uri" +# Helper to get env var with default, +# treating empty string and "0" as unset since they can come from the benchmark workflow. +def env_or_default(key, default) + value = ENV[key].to_s + value.empty? || value == "0" ? default : value +end + # Benchmark parameters PRO = ENV.fetch("PRO", "false") == "true" APP_DIR = PRO ? "react_on_rails_pro/spec/dummy" : "spec/dummy" -ROUTES = ENV.fetch("ROUTES", nil) -BASE_URL = ENV.fetch("BASE_URL", "localhost:3001") +ROUTES = env_or_default("ROUTES", nil) +BASE_URL = env_or_default("BASE_URL", "localhost:3001") # requests per second; if "max" will get maximum number of queries instead of a fixed rate -RATE = ENV.fetch("RATE", "50") +RATE = env_or_default("RATE", "50") # concurrent connections/virtual users -CONNECTIONS = ENV.fetch("CONNECTIONS", "10").to_i +CONNECTIONS = env_or_default("CONNECTIONS", 10).to_i # maximum connections/virtual users -MAX_CONNECTIONS = ENV.fetch("MAX_CONNECTIONS", CONNECTIONS).to_i +MAX_CONNECTIONS = env_or_default("MAX_CONNECTIONS", CONNECTIONS).to_i # benchmark duration (duration string like "30s", "1m", "90s") -DURATION = ENV.fetch("DURATION", "30s") +DURATION = env_or_default("DURATION", "30s") # request timeout (duration string as above) -REQUEST_TIMEOUT = ENV.fetch("REQUEST_TIMEOUT", "60s") +REQUEST_TIMEOUT = env_or_default("REQUEST_TIMEOUT", "60s") # Tools to run (comma-separated) -TOOLS = ENV.fetch("TOOLS", "fortio,vegeta,k6").split(",") +TOOLS = env_or_default("TOOLS", "fortio,vegeta,k6").split(",") OUTDIR = "bench_results" SUMMARY_TXT = "#{OUTDIR}/summary.txt".freeze @@ -93,11 +100,13 @@ def get_benchmark_routes(app_dir) # Get all routes to benchmark routes = if ROUTES - ROUTES.split(",").map(&:strip) + ROUTES.split(",").map(&:strip).reject(&:empty?) else get_benchmark_routes(APP_DIR) end +raise "No routes to benchmark" if routes.empty? + validate_rate(RATE) validate_positive_integer(CONNECTIONS, "CONNECTIONS") validate_positive_integer(MAX_CONNECTIONS, "MAX_CONNECTIONS") From da8d33a8ac5ce1d110c9935556d0dbf82a016712 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Tue, 11 Nov 2025 21:32:51 +0000 Subject: [PATCH 28/65] Fix app version handling in the benchmark workflow --- .github/workflows/benchmark.yml | 58 ++++++++++++++++++--------------- 1 file changed, 31 insertions(+), 27 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index a4ee561756..416efbf9b1 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -73,6 +73,9 @@ env: FORTIO_VERSION: "1.73.0" K6_VERSION: "1.4.2" VEGETA_VERSION: "12.13.0" + # Determine which apps to run (default is 'pro_only' for all triggers) + RUN_CORE: ${{ (github.event.inputs.app_version || 'pro_only') != 'pro_only' && 'true' || '' }} + RUN_PRO: ${{ (github.event.inputs.app_version || 'pro_only') != 'core_only' && 'true' || '' }} # Benchmark parameters (defaults in bench.rb unless overridden here for CI) ROUTES: ${{ github.event.inputs.routes }} RATE: ${{ github.event.inputs.rate || 'max' }} @@ -233,30 +236,30 @@ jobs: run: cd packages/react-on-rails && yarn install --no-progress --no-emoji --frozen-lockfile && yalc publish - name: yalc add react-on-rails - if: github.event.inputs.app_version != 'pro_only' + if: env.RUN_CORE run: cd spec/dummy && yalc add react-on-rails - - name: Install Node modules with Yarn for dummy app - if: github.event.inputs.app_version != 'pro_only' + - name: Install Node modules with Yarn for Core dummy app + if: env.RUN_CORE run: cd spec/dummy && yarn install --no-progress --no-emoji - - name: Save dummy app ruby gems to cache - if: github.event.inputs.app_version != 'pro_only' + - name: Save Core dummy app ruby gems to cache + if: env.RUN_CORE uses: actions/cache@v4 with: path: spec/dummy/vendor/bundle key: dummy-app-gem-cache-${{ hashFiles('spec/dummy/Gemfile.lock') }} - - name: Install Ruby Gems for dummy app - if: github.event.inputs.app_version != 'pro_only' + - name: Install Ruby Gems for Core dummy app + if: env.RUN_CORE run: | cd spec/dummy bundle lock --add-platform 'x86_64-linux' bundle config set path vendor/bundle bundle _2.5.4_ check || bundle _2.5.4_ install --jobs=4 --retry=3 - - name: Prepare production assets - if: github.event.inputs.app_version != 'pro_only' + - name: Prepare Core production assets + if: env.RUN_CORE run: | set -e # Exit on any error echo "🔨 Building production assets..." @@ -269,8 +272,8 @@ jobs: echo "✅ Production assets built successfully" - - name: Start production server - if: github.event.inputs.app_version != 'pro_only' + - name: Start Core production server + if: env.RUN_CORE run: | set -e # Exit on any error echo "🚀 Starting production server..." @@ -299,7 +302,7 @@ jobs: # ============================================ - name: Execute Core benchmark suite - if: github.event.inputs.app_version != 'pro_only' + if: env.RUN_CORE timeout-minutes: 120 run: | set -e # Exit on any error @@ -313,7 +316,7 @@ jobs: echo "✅ Benchmark suite completed successfully" - name: Validate Core benchmark results - if: github.event.inputs.app_version != 'pro_only' + if: env.RUN_CORE run: | set -e # Exit on any error echo "🔍 Validating benchmark output files..." @@ -353,7 +356,7 @@ jobs: - name: Upload Core benchmark results uses: actions/upload-artifact@v4 - if: github.event.inputs.app_version != 'pro_only' && always() + if: env.RUN_CORE && always() with: name: benchmark-core-results-${{ github.run_number }} path: bench_results/ @@ -364,35 +367,35 @@ jobs: # STEP 6: SETUP PRO APPLICATION SERVER # ============================================ - name: Cache Pro package node modules - if: github.event.inputs.app_version != 'core_only' + if: env.RUN_PRO uses: actions/cache@v4 with: path: react_on_rails_pro/node_modules key: v4-pro-package-node-modules-cache-${{ hashFiles('react_on_rails_pro/yarn.lock') }} - name: Cache Pro dummy app node modules - if: github.event.inputs.app_version != 'core_only' + if: env.RUN_PRO uses: actions/cache@v4 with: path: react_on_rails_pro/spec/dummy/node_modules key: v4-pro-dummy-app-node-modules-cache-${{ hashFiles('react_on_rails_pro/spec/dummy/yarn.lock') }} - name: Cache Pro dummy app Ruby gems - if: github.event.inputs.app_version != 'core_only' + if: env.RUN_PRO uses: actions/cache@v4 with: path: react_on_rails_pro/spec/dummy/vendor/bundle key: v4-pro-dummy-app-gem-cache-${{ hashFiles('react_on_rails_pro/spec/dummy/Gemfile.lock') }} - name: Install Node modules with Yarn for Pro package - if: github.event.inputs.app_version != 'core_only' + if: env.RUN_PRO run: | cd react_on_rails_pro sudo yarn global add yalc yarn install --frozen-lockfile --no-progress --no-emoji - name: Install Node modules with Yarn for Pro dummy app - if: github.event.inputs.app_version != 'core_only' + if: env.RUN_PRO run: cd react_on_rails_pro/spec/dummy && yarn install --frozen-lockfile --no-progress --no-emoji - name: Cache Pro dummy app Ruby gems @@ -403,7 +406,7 @@ jobs: key: v4-pro-dummy-app-gem-cache-${{ hashFiles('react_on_rails_pro/spec/dummy/Gemfile.lock') }} - name: Install Ruby Gems for Pro dummy app - if: github.event.inputs.app_version != 'core_only' + if: env.RUN_PRO run: | cd react_on_rails_pro/spec/dummy bundle lock --add-platform 'x86_64-linux' @@ -411,11 +414,11 @@ jobs: bundle _2.5.4_ check || bundle _2.5.4_ install --jobs=4 --retry=3 - name: Generate file-system based entrypoints for Pro - if: github.event.inputs.app_version != 'core_only' + if: env.RUN_PRO run: cd react_on_rails_pro/spec/dummy && bundle exec rake react_on_rails:generate_packs - name: Prepare Pro production assets - if: github.event.inputs.app_version != 'core_only' + if: env.RUN_PRO run: | set -e echo "🔨 Building Pro production assets..." @@ -429,7 +432,7 @@ jobs: echo "✅ Production assets built successfully" - name: Start Pro production server - if: github.event.inputs.app_version != 'core_only' + if: env.RUN_PRO run: | set -e echo "🚀 Starting Pro production server..." @@ -458,7 +461,7 @@ jobs: # ============================================ - name: Execute Pro benchmark suite - if: github.event.inputs.app_version != 'core_only' + if: env.RUN_PRO timeout-minutes: 120 run: | set -e @@ -472,7 +475,7 @@ jobs: echo "✅ Benchmark suite completed successfully" - name: Validate Pro benchmark results - if: github.event.inputs.app_version != 'core_only' + if: env.RUN_PRO run: | set -e echo "🔍 Validating Pro benchmark output files..." @@ -508,7 +511,7 @@ jobs: - name: Upload Pro benchmark results uses: actions/upload-artifact@v4 - if: github.event.inputs.app_version != 'core_only' && always() + if: env.RUN_PRO && always() with: name: benchmark-pro-results-${{ github.run_number }} path: bench_results/ @@ -527,7 +530,8 @@ jobs: echo "Run number: ${{ github.run_number }}" echo "Triggered by: ${{ github.actor }}" echo "Branch: ${{ github.ref_name }}" - echo "App version: ${{ github.event.inputs.app_version || 'both' }}" + echo "Run Core: ${{ env.RUN_CORE }}" + echo "Run Pro: ${{ env.RUN_PRO }}" echo "" if [ "${{ job.status }}" == "success" ]; then echo "✅ All steps completed successfully" From 19bfb9ce4d8203166a554d7289b3613333c8db0b Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Tue, 11 Nov 2025 22:11:00 +0000 Subject: [PATCH 29/65] Fix starting/stopping servers --- .github/workflows/benchmark.yml | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 416efbf9b1..c0d5bbe426 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -315,6 +315,18 @@ jobs: echo "✅ Benchmark suite completed successfully" + - name: Stop Core production server + if: env.RUN_CORE && always() + run: | + echo "🛑 Stopping Core production server..." + # Find and kill the Puma process on port 3001 + if lsof -ti:3001 > /dev/null 2>&1; then + kill "$(lsof -ti:3001)" || true + echo "✅ Server stopped" + else + echo "ℹ️ No server running on port 3001" + fi + - name: Validate Core benchmark results if: env.RUN_CORE run: | @@ -474,6 +486,18 @@ jobs: echo "✅ Benchmark suite completed successfully" + - name: Stop Pro production server + if: env.RUN_PRO && always() + run: | + echo "🛑 Stopping Pro production server..." + # Find and kill the Puma process on port 3001 + if lsof -ti:3001 > /dev/null 2>&1; then + kill "$(lsof -ti:3001)" || true + echo "✅ Server stopped" + else + echo "ℹ️ No server running on port 3001" + fi + - name: Validate Pro benchmark results if: env.RUN_PRO run: | From 696526c38349026131b90d49fe123b5dc1d2d077 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 12 Nov 2025 09:55:31 +0000 Subject: [PATCH 30/65] Simplify validate steps --- .github/workflows/benchmark.yml | 76 +++++++++------------------------ 1 file changed, 20 insertions(+), 56 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index c0d5bbe426..ff1ea15358 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -330,41 +330,21 @@ jobs: - name: Validate Core benchmark results if: env.RUN_CORE run: | - set -e # Exit on any error - echo "🔍 Validating benchmark output files..." - - RESULTS_DIR="bench_results" - REQUIRED_FILES=("summary.txt") - MISSING_FILES=() + set -e + echo "🔍 Validating benchmark results..." - # Check if results directory exists - if [ ! -d "${RESULTS_DIR}" ]; then - echo "❌ ERROR: Benchmark results directory '${RESULTS_DIR}' not found" + if [ ! -f "bench_results/summary.txt" ]; then + echo "❌ ERROR: benchmark summary file not found" exit 1 fi - - # List all generated files - echo "Generated files:" - ls -lh ${RESULTS_DIR}/ || true - echo "" - # Check for required files - for file in "${REQUIRED_FILES[@]}"; do - if [ ! -f "${RESULTS_DIR}/${file}" ]; then - MISSING_FILES+=("${file}") - fi - done - - # Report validation results - if [ ${#MISSING_FILES[@]} -eq 0 ]; then - echo "✅ All required benchmark output files present" - echo "📊 Summary preview:" - head -20 ${RESULTS_DIR}/summary.txt || true - else - echo "⚠️ WARNING: Some required files are missing:" - printf ' - %s\n' "${MISSING_FILES[@]}" - echo "Continuing with available results..." - fi + echo "✅ Benchmark results found" + echo "" + echo "📊 Summary:" + column -t -s $'\t' bench_results/summary.txt + echo "" + echo "Generated files:" + ls -lh bench_results/ - name: Upload Core benchmark results uses: actions/upload-artifact@v4 @@ -502,36 +482,20 @@ jobs: if: env.RUN_PRO run: | set -e - echo "🔍 Validating Pro benchmark output files..." - - RESULTS_DIR="bench_results" - REQUIRED_FILES=("summary.txt") - MISSING_FILES=() + echo "🔍 Validating benchmark results..." - if [ ! -d "${RESULTS_DIR}" ]; then - echo "❌ ERROR: Benchmark results directory '${RESULTS_DIR}' not found" + if [ ! -f "bench_results/summary.txt" ]; then + echo "❌ ERROR: benchmark summary file not found" exit 1 fi - echo "Generated files:" - ls -lh ${RESULTS_DIR}/ || true + echo "✅ Benchmark results found" echo "" - - for file in "${REQUIRED_FILES[@]}"; do - if [ ! -f "${RESULTS_DIR}/${file}" ]; then - MISSING_FILES+=("${file}") - fi - done - - if [ ${#MISSING_FILES[@]} -eq 0 ]; then - echo "✅ All required benchmark output files present" - echo "📊 Summary preview:" - head -20 ${RESULTS_DIR}/summary.txt || true - else - echo "⚠️ WARNING: Some required files are missing:" - printf ' - %s\n' "${MISSING_FILES[@]}" - echo "Continuing with available results..." - fi + echo "📊 Summary:" + column -t -s $'\t' bench_results/summary.txt + echo "" + echo "Generated files:" + ls -lh bench_results/ - name: Upload Pro benchmark results uses: actions/upload-artifact@v4 From c57688d2aecd77bb53231e8b6b9712113261df48 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 12 Nov 2025 10:04:27 +0000 Subject: [PATCH 31/65] Temp config to speed up --- .github/workflows/benchmark.yml | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index ff1ea15358..e4b8eb592e 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -74,19 +74,20 @@ env: K6_VERSION: "1.4.2" VEGETA_VERSION: "12.13.0" # Determine which apps to run (default is 'pro_only' for all triggers) - RUN_CORE: ${{ (github.event.inputs.app_version || 'pro_only') != 'pro_only' && 'true' || '' }} - RUN_PRO: ${{ (github.event.inputs.app_version || 'pro_only') != 'core_only' && 'true' || '' }} + RUN_CORE: ${{ (github.event.inputs.app_version || 'both') != 'pro_only' && 'true' || '' }} + RUN_PRO: ${{ (github.event.inputs.app_version || 'both') != 'core_only' && 'true' || '' }} # Benchmark parameters (defaults in bench.rb unless overridden here for CI) - ROUTES: ${{ github.event.inputs.routes }} + # FIXME: default ROUTES, TOOLS and DURATION are set to speed up tests, remove before merging + ROUTES: ${{ github.event.inputs.routes || '/' }} RATE: ${{ github.event.inputs.rate || 'max' }} - DURATION: ${{ github.event.inputs.duration }} + DURATION: ${{ github.event.inputs.duration || '5s' }} REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout }} CONNECTIONS: ${{ github.event.inputs.connections }} MAX_CONNECTIONS: ${{ github.event.inputs.connections }} WEB_CONCURRENCY: ${{ github.event.inputs.web_concurrency || 4 }} RAILS_MAX_THREADS: ${{ github.event.inputs.rails_threads || 3 }} RAILS_MIN_THREADS: ${{ github.event.inputs.rails_threads || 3 }} - TOOLS: ${{ github.event.inputs.tools }} + TOOLS: ${{ github.event.inputs.tools || 'fortio' }} jobs: benchmark: From d1b4eb3fe6601102a1d874003143acc64d0eb283 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 12 Nov 2025 10:10:56 +0000 Subject: [PATCH 32/65] Optimize tools installation --- .github/workflows/benchmark.yml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index e4b8eb592e..b6d0d09811 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -143,13 +143,14 @@ jobs: - name: Cache Fortio binary id: cache-fortio + if: contains(env.TOOLS, 'fortio') uses: actions/cache@v4 with: path: ~/bin/fortio key: fortio-${{ runner.os }}-${{ runner.arch }}-${{ env.FORTIO_VERSION }} - name: Install Fortio - if: steps.cache-fortio.outputs.cache-hit != 'true' + if: contains(env.TOOLS, 'fortio') && steps.cache-fortio.outputs.cache-hit != 'true' run: | echo "📦 Installing Fortio v${FORTIO_VERSION}" @@ -162,13 +163,14 @@ jobs: - name: Cache Vegeta binary id: cache-vegeta + if: contains(env.TOOLS, 'vegeta') uses: actions/cache@v4 with: path: ~/bin/vegeta key: vegeta-${{ runner.os }}-${{ runner.arch }}-${{ env.VEGETA_VERSION }} - name: Install Vegeta - if: steps.cache-vegeta.outputs.cache-hit != 'true' + if: contains(env.TOOLS, 'vegeta') && steps.cache-vegeta.outputs.cache-hit != 'true' run: | echo "📦 Installing Vegeta v${VEGETA_VERSION}" @@ -180,6 +182,7 @@ jobs: mv vegeta ~/bin/ - name: Setup k6 + if: contains(env.TOOLS, 'k6') uses: grafana/setup-k6-action@v1 with: k6-version: ${{ env.K6_VERSION }} From 0748d81a586219034c52376693902c569489aa03 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 12 Nov 2025 11:48:59 +0000 Subject: [PATCH 33/65] Add logging to server check --- spec/performance/bench.rb | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb index ffb3f714e8..d861cb2061 100755 --- a/spec/performance/bench.rb +++ b/spec/performance/bench.rb @@ -140,9 +140,9 @@ def get_benchmark_routes(app_dir) # Helper method to check if server is responding def server_responding?(uri) response = Net::HTTP.get_response(uri) - response.is_a?(Net::HTTPSuccess) -rescue StandardError - false + { success: response.is_a?(Net::HTTPSuccess), info: "HTTP #{response.code} #{response.message}" } +rescue StandardError => e + { success: false, info: "#{e.class.name}: #{e.message}" } end # Wait for the server to be ready @@ -150,10 +150,24 @@ def server_responding?(uri) puts "Checking server availability at #{BASE_URL}..." test_uri = URI.parse("http://#{BASE_URL}#{routes.first}") start_time = Time.now +attempt_count = 0 loop do - break if server_responding?(test_uri) + attempt_count += 1 + attempt_start = Time.now + result = server_responding?(test_uri) + attempt_duration = Time.now - attempt_start + elapsed = Time.now - start_time + + # rubocop:disable Layout/LineLength + if result[:success] + puts " ✅ Attempt #{attempt_count} at #{elapsed.round(2)}s: SUCCESS - #{result[:info]} (took #{attempt_duration.round(3)}s)" + break + else + puts " ❌ Attempt #{attempt_count} at #{elapsed.round(2)}s: FAILED - #{result[:info]} (took #{attempt_duration.round(3)}s)" + end + # rubocop:enable Layout/LineLength - raise "Server at #{BASE_URL} not responding within #{TIMEOUT_SEC}s" if Time.now - start_time > TIMEOUT_SEC + raise "Server at #{BASE_URL} not responding within #{TIMEOUT_SEC}s" if elapsed > TIMEOUT_SEC sleep 1 end From fe3de7c71bc06250332355e5ab5f9b10970abc9e Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 12 Nov 2025 12:02:21 +0000 Subject: [PATCH 34/65] Make installs frozen --- .github/workflows/benchmark.yml | 10 +++++----- react_on_rails/spec/dummy/Gemfile.lock | 3 +++ 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index b6d0d09811..e42552f9ff 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -245,7 +245,7 @@ jobs: - name: Install Node modules with Yarn for Core dummy app if: env.RUN_CORE - run: cd spec/dummy && yarn install --no-progress --no-emoji + run: cd spec/dummy && yarn install --frozen-lockfile --no-progress --no-emoji - name: Save Core dummy app ruby gems to cache if: env.RUN_CORE @@ -258,9 +258,9 @@ jobs: if: env.RUN_CORE run: | cd spec/dummy - bundle lock --add-platform 'x86_64-linux' bundle config set path vendor/bundle - bundle _2.5.4_ check || bundle _2.5.4_ install --jobs=4 --retry=3 + bundle config set frozen true + bundle _${BUNDLER_VERSION}_ install --jobs=4 --retry=3 - name: Prepare Core production assets if: env.RUN_CORE @@ -405,9 +405,9 @@ jobs: if: env.RUN_PRO run: | cd react_on_rails_pro/spec/dummy - bundle lock --add-platform 'x86_64-linux' bundle config set path vendor/bundle - bundle _2.5.4_ check || bundle _2.5.4_ install --jobs=4 --retry=3 + bundle config set frozen true + bundle _${BUNDLER_VERSION}_ install --jobs=4 --retry=3 - name: Generate file-system based entrypoints for Pro if: env.RUN_PRO diff --git a/react_on_rails/spec/dummy/Gemfile.lock b/react_on_rails/spec/dummy/Gemfile.lock index 27fe392450..19085e901f 100644 --- a/react_on_rails/spec/dummy/Gemfile.lock +++ b/react_on_rails/spec/dummy/Gemfile.lock @@ -197,6 +197,8 @@ GEM nokogiri (1.18.10) mini_portile2 (~> 2.8.2) racc (~> 1.4) + nokogiri (1.18.10-x86_64-linux-gnu) + racc (~> 1.4) ostruct (0.6.3) package_json (0.1.0) parallel (1.24.0) @@ -423,6 +425,7 @@ GEM PLATFORMS ruby + x86_64-linux DEPENDENCIES amazing_print From 504f35cab29dc1712e8a2cd46b740fe1fa3986f0 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 12 Nov 2025 12:05:34 +0000 Subject: [PATCH 35/65] Allow redirects in server_responding --- spec/performance/bench.rb | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb index d861cb2061..dab5bce43a 100755 --- a/spec/performance/bench.rb +++ b/spec/performance/bench.rb @@ -140,7 +140,11 @@ def get_benchmark_routes(app_dir) # Helper method to check if server is responding def server_responding?(uri) response = Net::HTTP.get_response(uri) - { success: response.is_a?(Net::HTTPSuccess), info: "HTTP #{response.code} #{response.message}" } + # Accept both success (2xx) and redirect (3xx) responses as "server is responding" + success = response.is_a?(Net::HTTPSuccess) || response.is_a?(Net::HTTPRedirection) + info = "HTTP #{response.code} #{response.message}" + info += " -> #{response['location']}" if response.is_a?(Net::HTTPRedirection) && response["location"] + { success: success, info: info } rescue StandardError => e { success: false, info: "#{e.class.name}: #{e.message}" } end From fb6d679ba6cc6f38dc9e99784d0af539c848499b Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 12 Nov 2025 12:24:32 +0000 Subject: [PATCH 36/65] Try full Pro benchmark --- .github/workflows/benchmark.yml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index e42552f9ff..57c7907bcd 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -77,17 +77,16 @@ env: RUN_CORE: ${{ (github.event.inputs.app_version || 'both') != 'pro_only' && 'true' || '' }} RUN_PRO: ${{ (github.event.inputs.app_version || 'both') != 'core_only' && 'true' || '' }} # Benchmark parameters (defaults in bench.rb unless overridden here for CI) - # FIXME: default ROUTES, TOOLS and DURATION are set to speed up tests, remove before merging - ROUTES: ${{ github.event.inputs.routes || '/' }} + ROUTES: ${{ github.event.inputs.routes }} RATE: ${{ github.event.inputs.rate || 'max' }} - DURATION: ${{ github.event.inputs.duration || '5s' }} + DURATION: ${{ github.event.inputs.duration }} REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout }} CONNECTIONS: ${{ github.event.inputs.connections }} MAX_CONNECTIONS: ${{ github.event.inputs.connections }} WEB_CONCURRENCY: ${{ github.event.inputs.web_concurrency || 4 }} RAILS_MAX_THREADS: ${{ github.event.inputs.rails_threads || 3 }} RAILS_MIN_THREADS: ${{ github.event.inputs.rails_threads || 3 }} - TOOLS: ${{ github.event.inputs.tools || 'fortio' }} + TOOLS: ${{ github.event.inputs.tools || 'fortio,vegeta,k6' }} jobs: benchmark: From 2c615bc4cc5075ba08ee9b012c3e88f88d534c8e Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Mon, 1 Dec 2025 12:13:17 +0000 Subject: [PATCH 37/65] Update Core dummy path --- .github/workflows/benchmark.yml | 14 +++++++------- spec/performance/bench.rb | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 57c7907bcd..de930b738c 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -240,23 +240,23 @@ jobs: - name: yalc add react-on-rails if: env.RUN_CORE - run: cd spec/dummy && yalc add react-on-rails + run: cd react_on_rails/spec/dummy && yalc add react-on-rails - name: Install Node modules with Yarn for Core dummy app if: env.RUN_CORE - run: cd spec/dummy && yarn install --frozen-lockfile --no-progress --no-emoji + run: cd react_on_rails/spec/dummy && yarn install --frozen-lockfile --no-progress --no-emoji - name: Save Core dummy app ruby gems to cache if: env.RUN_CORE uses: actions/cache@v4 with: - path: spec/dummy/vendor/bundle - key: dummy-app-gem-cache-${{ hashFiles('spec/dummy/Gemfile.lock') }} + path: react_on_rails/spec/dummy/vendor/bundle + key: v4-core-dummy-app-gem-cache-${{ hashFiles('react_on_rails/spec/dummy/Gemfile.lock') }} - name: Install Ruby Gems for Core dummy app if: env.RUN_CORE run: | - cd spec/dummy + cd react_on_rails/spec/dummy bundle config set path vendor/bundle bundle config set frozen true bundle _${BUNDLER_VERSION}_ install --jobs=4 --retry=3 @@ -266,7 +266,7 @@ jobs: run: | set -e # Exit on any error echo "🔨 Building production assets..." - cd spec/dummy + cd react_on_rails/spec/dummy if ! bin/prod-assets; then echo "❌ ERROR: Failed to build production assets" @@ -280,7 +280,7 @@ jobs: run: | set -e # Exit on any error echo "🚀 Starting production server..." - cd spec/dummy + cd react_on_rails/spec/dummy # Start server in background bin/prod & diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb index dab5bce43a..87a27eccd3 100755 --- a/spec/performance/bench.rb +++ b/spec/performance/bench.rb @@ -16,7 +16,7 @@ def env_or_default(key, default) # Benchmark parameters PRO = ENV.fetch("PRO", "false") == "true" -APP_DIR = PRO ? "react_on_rails_pro/spec/dummy" : "spec/dummy" +APP_DIR = PRO ? "react_on_rails_pro/spec/dummy" : "react_on_rails/spec/dummy" ROUTES = env_or_default("ROUTES", nil) BASE_URL = env_or_default("BASE_URL", "localhost:3001") # requests per second; if "max" will get maximum number of queries instead of a fixed rate From b7a43b45e7875377e4e0954dea90188602fd2b25 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Tue, 2 Dec 2025 08:15:50 +0000 Subject: [PATCH 38/65] Switch benchmark.yml to pnpm and new directory structure --- .github/workflows/benchmark.yml | 68 ++++++++++++----------- react_on_rails/spec/dummy/bin/prod | 4 +- react_on_rails/spec/dummy/bin/prod-assets | 2 +- react_on_rails_pro/spec/dummy/bin/prod | 4 +- 4 files changed, 41 insertions(+), 37 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index de930b738c..628f76c3e6 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -213,12 +213,18 @@ jobs: - name: Fix dependency for libyaml-dev run: sudo apt install libyaml-dev -y + # Follow https://github.com/pnpm/action-setup?tab=readme-ov-file#use-cache-to-reduce-installation-time + - name: Setup pnpm + uses: pnpm/action-setup@v4 + with: + cache: true + cache_dependency_path: '**/pnpm-lock.yaml' + run_install: false + - name: Setup Node uses: actions/setup-node@v4 with: node-version: '22' - cache: yarn - cache-dependency-path: '**/yarn.lock' - name: Print system information run: | @@ -227,24 +233,34 @@ jobs: echo "Current directory: "; pwd echo "Ruby version: "; ruby -v echo "Node version: "; node -v - echo "Yarn version: "; yarn --version + echo "Pnpm version: "; pnpm --version echo "Bundler version: "; bundle --version - - name: Install Node modules with Yarn for renderer package + - name: Install Node modules with Pnpm for all packages run: | - yarn install --no-progress --no-emoji --frozen-lockfile - npm install --global yalc + pnpm install --recursive --frozen-lockfile + pnpm add --global yalc - name: yalc publish for react-on-rails - run: cd packages/react-on-rails && yarn install --no-progress --no-emoji --frozen-lockfile && yalc publish + run: cd packages/react-on-rails && yalc publish - - name: yalc add react-on-rails + # TODO only needed while we use --ignore-workspace below + - name: Cache core dummy app node modules if: env.RUN_CORE - run: cd react_on_rails/spec/dummy && yalc add react-on-rails + uses: actions/cache@v4 + with: + path: react_on_rails/spec/dummy/node_modules + key: v4-pro-dummy-app-node-modules-cache-${{ hashFiles('pnpm-lock.yaml', 'react_on_rails/spec/dummy/package.json') }} - - name: Install Node modules with Yarn for Core dummy app + - name: Install Node modules for the dummy app if: env.RUN_CORE - run: cd react_on_rails/spec/dummy && yarn install --frozen-lockfile --no-progress --no-emoji + # TODO simplify this both here and in integration tests + # --ignore-workspace prevents pnpm from treating this as part of the parent workspace + # The dummy app doesn't have a pnpm-lock.yaml and shouldn't use frozen lockfile + run: | + cd react_on_rails/spec/dummy + yalc add react-on-rails + pnpm install --ignore-workspace - name: Save Core dummy app ruby gems to cache if: env.RUN_CORE @@ -361,37 +377,25 @@ jobs: # ============================================ # STEP 6: SETUP PRO APPLICATION SERVER # ============================================ - - name: Cache Pro package node modules - if: env.RUN_PRO - uses: actions/cache@v4 - with: - path: react_on_rails_pro/node_modules - key: v4-pro-package-node-modules-cache-${{ hashFiles('react_on_rails_pro/yarn.lock') }} - - name: Cache Pro dummy app node modules if: env.RUN_PRO uses: actions/cache@v4 with: path: react_on_rails_pro/spec/dummy/node_modules - key: v4-pro-dummy-app-node-modules-cache-${{ hashFiles('react_on_rails_pro/spec/dummy/yarn.lock') }} + key: v4-pro-dummy-app-node-modules-cache-${{ hashFiles('pnpm-lock.yaml', 'react_on_rails_pro/spec/dummy/package.json') }} - - name: Cache Pro dummy app Ruby gems + - name: yalc publish for react-on-rails-pro if: env.RUN_PRO - uses: actions/cache@v4 - with: - path: react_on_rails_pro/spec/dummy/vendor/bundle - key: v4-pro-dummy-app-gem-cache-${{ hashFiles('react_on_rails_pro/spec/dummy/Gemfile.lock') }} + run: cd packages/react-on-rails-pro && yalc publish - - name: Install Node modules with Yarn for Pro package + # Same reason for --ignore-workspace as for core + # TODO Remove it here as well + - name: Install Node modules with Pnpm for Pro dummy app if: env.RUN_PRO run: | - cd react_on_rails_pro - sudo yarn global add yalc - yarn install --frozen-lockfile --no-progress --no-emoji - - - name: Install Node modules with Yarn for Pro dummy app - if: env.RUN_PRO - run: cd react_on_rails_pro/spec/dummy && yarn install --frozen-lockfile --no-progress --no-emoji + cd react_on_rails_pro/spec/dummy + yalc add react-on-rails-pro + pnpm install --ignore-workspace - name: Cache Pro dummy app Ruby gems if: env.RUN_PRO diff --git a/react_on_rails/spec/dummy/bin/prod b/react_on_rails/spec/dummy/bin/prod index 35d0d355ce..81658fa284 100755 --- a/react_on_rails/spec/dummy/bin/prod +++ b/react_on_rails/spec/dummy/bin/prod @@ -21,8 +21,8 @@ if find client config -type f \( -name "*.[jt]s" -o -name "*.[jt]sx" \) -newer " echo "Consider running ./bin/prod-assets to rebuild" fi -if [ -f "yarn.lock" ] && [ "yarn.lock" -nt "$MANIFEST" ]; then - echo "WARNING: yarn.lock is newer than compiled assets" +if [ -f "pnpm-lock.yaml" ] && [ "pnpm-lock.yaml" -nt "$MANIFEST" ]; then + echo "WARNING: pnpm-lock.yaml is newer than compiled assets" echo "Consider running ./bin/prod-assets to rebuild" fi diff --git a/react_on_rails/spec/dummy/bin/prod-assets b/react_on_rails/spec/dummy/bin/prod-assets index cf493134fa..f5f08249b1 100755 --- a/react_on_rails/spec/dummy/bin/prod-assets +++ b/react_on_rails/spec/dummy/bin/prod-assets @@ -5,5 +5,5 @@ export RAILS_ENV=production if [ "$CI" = "true" ]; then bundle exec bootsnap precompile --gemfile app/ lib/ config/ fi -yarn run build:rescript +pnpm run build:rescript bundle exec rails assets:precompile diff --git a/react_on_rails_pro/spec/dummy/bin/prod b/react_on_rails_pro/spec/dummy/bin/prod index 647a9c2d96..2e38289c15 100755 --- a/react_on_rails_pro/spec/dummy/bin/prod +++ b/react_on_rails_pro/spec/dummy/bin/prod @@ -21,8 +21,8 @@ if find client config -type f \( -name "*.[jt]s" -o -name "*.[jt]sx" \) -newer " echo "Consider running ./bin/prod-assets to rebuild" fi -if [ -f "yarn.lock" ] && [ "yarn.lock" -nt "$MANIFEST" ]; then - echo "WARNING: yarn.lock is newer than compiled assets" +if [ -f "pnpm-lock.yaml" ] && [ "pnpm-lock.yaml" -nt "$MANIFEST" ]; then + echo "WARNING: pnpm-lock.yaml is newer than compiled assets" echo "Consider running ./bin/prod-assets to rebuild" fi From 1f1dba53b01399c5deb7629e01ed87743a12af4f Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Thu, 4 Dec 2025 16:48:39 +0000 Subject: [PATCH 39/65] generate_packs is now needed in core as well --- react_on_rails/spec/dummy/bin/prod-assets | 1 + 1 file changed, 1 insertion(+) diff --git a/react_on_rails/spec/dummy/bin/prod-assets b/react_on_rails/spec/dummy/bin/prod-assets index f5f08249b1..d4651b9961 100755 --- a/react_on_rails/spec/dummy/bin/prod-assets +++ b/react_on_rails/spec/dummy/bin/prod-assets @@ -6,4 +6,5 @@ if [ "$CI" = "true" ]; then bundle exec bootsnap precompile --gemfile app/ lib/ config/ fi pnpm run build:rescript +bundle exec rake react_on_rails:generate_packs bundle exec rails assets:precompile From d0068c063bce63a8029d383b19988e18b01cbcc0 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 10 Dec 2025 01:05:42 +0000 Subject: [PATCH 40/65] Improve route handling --- spec/performance/bench.rb | 39 +++++++++++++++++++++++++++++++++++---- 1 file changed, 35 insertions(+), 4 deletions(-) diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb index 87a27eccd3..cb86e7dfcc 100755 --- a/spec/performance/bench.rb +++ b/spec/performance/bench.rb @@ -76,6 +76,31 @@ def add_summary_line(*parts) end end +# Check if a route has required parameters (e.g., /rsc_payload/:component_name) +# Required parameters are :param NOT inside parentheses +# Optional parameters are inside parentheses like (/:optional_param) +def route_has_required_params?(path) + # Remove optional parameter sections (anything in parentheses) + path_without_optional = path.gsub(/\([^)]*\)/, "") + # Check if remaining path contains :param + path_without_optional.include?(":") +end + +# Strip optional parameters from route path for use in URLs +# e.g., "/route(/:optional)(.:format)" -> "/route" +def strip_optional_params(route) + route.gsub(/\([^)]*\)/, "") +end + +# Sanitize route name for use in filenames +# Removes characters that GitHub Actions disallows in artifacts +def sanitize_route_name(route) + name = strip_optional_params(route).gsub(%r{^/}, "").tr("/", "_") + name = "root" if name.empty? + # Replace invalid characters: " : < > | * ? \r \n + name.gsub(/[":.<>|*?\r\n]+/, "_").squeeze("_").gsub(/^_|_$/, "") +end + # Get routes from the Rails app filtered by pages# and react_router# controllers def get_benchmark_routes(app_dir) routes_output = `cd #{app_dir} && bundle exec rails routes 2>&1` @@ -90,6 +115,13 @@ def get_benchmark_routes(app_dir) path = match[1] path = "/" if path.empty? # Handle root route + + # Skip routes with required parameters (e.g., /rsc_payload/:component_name) + if route_has_required_params?(path) + puts "Skipping route with required parameters: #{path}" + next + end + routes << path end raise "No pages# or react_router# routes found in #{app_dir}" if routes.empty? @@ -378,7 +410,8 @@ def run_k6_benchmark(target, route_name) puts "Benchmarking route: #{route}" puts separator - target = URI.parse("http://#{BASE_URL}#{route}") + # Strip optional parameters from route for URL (e.g., "(/:locale)" -> "") + target = URI.parse("http://#{BASE_URL}#{strip_optional_params(route)}") # Warm up server for this route puts "Warming up server for #{route} with 10 requests..." @@ -388,9 +421,7 @@ def run_k6_benchmark(target, route_name) end puts "Warm-up complete for #{route}" - # Sanitize route name for filenames - route_name = route.gsub(%r{^/}, "").tr("/", "_") - route_name = "root" if route_name.empty? + route_name = sanitize_route_name(route) # Run each benchmark tool fortio_metrics = run_fortio_benchmark(target, route_name) From b7fd5cc30d128ca8f152c69e0d470c3711160a6c Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 10 Dec 2025 01:42:39 +0000 Subject: [PATCH 41/65] Fix killing the server --- .github/workflows/benchmark.yml | 36 +++++++++++++++++++-------------- 1 file changed, 21 insertions(+), 15 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 628f76c3e6..8971bcd8f4 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -298,7 +298,7 @@ jobs: echo "🚀 Starting production server..." cd react_on_rails/spec/dummy - # Start server in background + # Start server in background (Core uses rails directly, not foreman) bin/prod & echo "Server started in background" @@ -338,13 +338,23 @@ jobs: if: env.RUN_CORE && always() run: | echo "🛑 Stopping Core production server..." - # Find and kill the Puma process on port 3001 - if lsof -ti:3001 > /dev/null 2>&1; then - kill "$(lsof -ti:3001)" || true - echo "✅ Server stopped" - else - echo "ℹ️ No server running on port 3001" - fi + # Kill all server-related processes (safe in isolated CI environment) + pkill -9 -f "ruby|node|foreman|overmind|puma" || true + + # Wait for port 3001 to be free + echo "⏳ Waiting for port 3001 to be free..." + for _ in {1..10}; do + if ! lsof -ti:3001 > /dev/null 2>&1; then + echo "✅ Port 3001 is now free" + exit 0 + fi + sleep 1 + done + + echo "❌ ERROR: Port 3001 is still in use after 10 seconds" + echo "Processes using port 3001:" + lsof -i:3001 || true + exit 1 - name: Validate Core benchmark results if: env.RUN_CORE @@ -477,13 +487,9 @@ jobs: if: env.RUN_PRO && always() run: | echo "🛑 Stopping Pro production server..." - # Find and kill the Puma process on port 3001 - if lsof -ti:3001 > /dev/null 2>&1; then - kill "$(lsof -ti:3001)" || true - echo "✅ Server stopped" - else - echo "ℹ️ No server running on port 3001" - fi + # Kill all server-related processes (safe in isolated CI environment) + pkill -9 -f "ruby|node|foreman|overmind|puma" || true + echo "✅ Server stopped" - name: Validate Pro benchmark results if: env.RUN_PRO From 1f7f1e9318b19623b2a115115b89d279c54eb19d Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 10 Dec 2025 09:36:35 +0000 Subject: [PATCH 42/65] Move stopping the servers after validating/uploading benchmark results --- .github/workflows/benchmark.yml | 60 ++++++++++++++++----------------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 8971bcd8f4..d9e9fdfd32 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -334,28 +334,6 @@ jobs: echo "✅ Benchmark suite completed successfully" - - name: Stop Core production server - if: env.RUN_CORE && always() - run: | - echo "🛑 Stopping Core production server..." - # Kill all server-related processes (safe in isolated CI environment) - pkill -9 -f "ruby|node|foreman|overmind|puma" || true - - # Wait for port 3001 to be free - echo "⏳ Waiting for port 3001 to be free..." - for _ in {1..10}; do - if ! lsof -ti:3001 > /dev/null 2>&1; then - echo "✅ Port 3001 is now free" - exit 0 - fi - sleep 1 - done - - echo "❌ ERROR: Port 3001 is still in use after 10 seconds" - echo "Processes using port 3001:" - lsof -i:3001 || true - exit 1 - - name: Validate Core benchmark results if: env.RUN_CORE run: | @@ -384,6 +362,28 @@ jobs: retention-days: 30 if-no-files-found: warn + - name: Stop Core production server + if: env.RUN_CORE && always() + run: | + echo "🛑 Stopping Core production server..." + # Kill all server-related processes (safe in isolated CI environment) + pkill -9 -f "ruby|node|foreman|overmind|puma" || true + + # Wait for port 3001 to be free + echo "⏳ Waiting for port 3001 to be free..." + for _ in {1..10}; do + if ! lsof -ti:3001 > /dev/null 2>&1; then + echo "✅ Port 3001 is now free" + exit 0 + fi + sleep 1 + done + + echo "❌ ERROR: Port 3001 is still in use after 10 seconds" + echo "Processes using port 3001:" + lsof -i:3001 || true + exit 1 + # ============================================ # STEP 6: SETUP PRO APPLICATION SERVER # ============================================ @@ -483,14 +483,6 @@ jobs: echo "✅ Benchmark suite completed successfully" - - name: Stop Pro production server - if: env.RUN_PRO && always() - run: | - echo "🛑 Stopping Pro production server..." - # Kill all server-related processes (safe in isolated CI environment) - pkill -9 -f "ruby|node|foreman|overmind|puma" || true - echo "✅ Server stopped" - - name: Validate Pro benchmark results if: env.RUN_PRO run: | @@ -519,6 +511,14 @@ jobs: retention-days: 30 if-no-files-found: warn + - name: Stop Pro production server + if: env.RUN_PRO && always() + run: | + echo "🛑 Stopping Pro production server..." + # Kill all server-related processes (safe in isolated CI environment) + pkill -9 -f "ruby|node|foreman|overmind|puma" || true + echo "✅ Server stopped" + # ============================================ # STEP 8: WORKFLOW COMPLETION # ============================================ From 3e0100dc8b2d54ad39417bc7313eb55a8d6a3137 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 10 Dec 2025 09:51:31 +0000 Subject: [PATCH 43/65] Run benchmark workflow only on PRs with full-ci or benchmark labels --- .github/workflows/benchmark.yml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index d9e9fdfd32..baa7514d43 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -64,6 +64,7 @@ on: - '**.md' - 'docs/**' pull_request: + types: [opened, synchronize, reopened, labeled] paths-ignore: - '**.md' - 'docs/**' @@ -90,6 +91,12 @@ env: jobs: benchmark: + # Run on: push to master, workflow_dispatch, or PRs with 'full-ci' or 'benchmark' labels + if: | + github.event_name == 'push' || + github.event_name == 'workflow_dispatch' || + contains(github.event.pull_request.labels.*.name, 'full-ci') || + contains(github.event.pull_request.labels.*.name, 'benchmark') runs-on: ubuntu-latest env: SECRET_KEY_BASE: 'dummy-secret-key-for-ci-testing-not-used-in-production' From 3f4e85980c1b241f964ec7ac6a04288dbfc15a16 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Fri, 12 Dec 2025 12:40:13 +0000 Subject: [PATCH 44/65] Skip testing routes --- spec/performance/bench.rb | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb index cb86e7dfcc..60e91be49f 100755 --- a/spec/performance/bench.rb +++ b/spec/performance/bench.rb @@ -122,6 +122,12 @@ def get_benchmark_routes(app_dir) next end + # Skip "_for_testing" routes (test-only endpoints not meant for benchmarking) + if path.include?("_for_testing") + puts "Skipping test-only route: #{path}" + next + end + routes << path end raise "No pages# or react_router# routes found in #{app_dir}" if routes.empty? From ba5e7aba67fea469164e1b01ed202f2f8f3d5f00 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Fri, 12 Dec 2025 13:23:41 +0000 Subject: [PATCH 45/65] Add doc --- docs/planning/library-benchmarking.md | 61 +++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 docs/planning/library-benchmarking.md diff --git a/docs/planning/library-benchmarking.md b/docs/planning/library-benchmarking.md new file mode 100644 index 0000000000..f97a6cf288 --- /dev/null +++ b/docs/planning/library-benchmarking.md @@ -0,0 +1,61 @@ +# Library Benchmarking Strategy + +## Current Approach + +We use **max rate benchmarking** - each route is tested at maximum throughput to measure its capacity. + +### Configuration + +- `RATE=max` - Tests maximum throughput +- `CONNECTIONS=10` - Concurrent connections +- `DURATION=30s` - Test duration per route + +## Trade-offs: Max Rate vs Fixed Rate + +### Max Rate (Current) + +**Pros:** + +- Measures actual throughput capacity +- Self-adjusting - no need to maintain per-route rate configs +- Identifies bottlenecks and ceilings + +**Cons:** + +- Results vary with CI runner performance +- Harder to compare across commits when capacity changes significantly +- Noise from shared CI infrastructure + +### Fixed Rate + +**Pros:** + +- Consistent baseline across runs +- Latency comparisons are meaningful +- Detects regressions at a specific load level + +**Cons:** + +- Must be set below the slowest route's capacity +- If route capacity changes, historical data becomes incomparable +- Requires maintaining rate configuration per route + +## Why We Chose Max Rate + +Different routes have vastly different capacities: + +- `/empty` - ~1500 RPS +- SSR routes - ~50-200 RPS depending on component complexity + +A fixed rate low enough for all routes would under-utilize fast routes. A per-route fixed rate config would be painful to maintain and would break comparisons when capacity changes. + +For library benchmarking in CI, we accept some noise and focus on detecting significant regressions (>15-20%). + +## Future Considerations + +Options to improve accuracy if needed: + +1. **Multiple samples** - Run each benchmark 2-3 times, average results, flag high variance +2. **Adaptive rate** - Quick max-rate probe, then benchmark at 70% capacity +3. **Per-route fixed rates** - Maintain target RPS config (high maintenance burden) +4. **Dedicated benchmark runners** - Reduce CI noise with consistent hardware From e0dc20beb08c4fcd72bb9bd4221a7b55b77c7d60 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Fri, 12 Dec 2025 13:42:04 +0000 Subject: [PATCH 46/65] Fix Vegeta tests --- spec/performance/bench.rb | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb index 60e91be49f..ac6680acc2 100755 --- a/spec/performance/bench.rb +++ b/spec/performance/bench.rb @@ -285,7 +285,6 @@ def run_vegeta_benchmark(target, route_name) begin puts "\n===> Vegeta: #{route_name}" - vegeta_bin = "#{OUTDIR}/#{route_name}_vegeta.bin" vegeta_json = "#{OUTDIR}/#{route_name}_vegeta.json" vegeta_txt = "#{OUTDIR}/#{route_name}_vegeta.txt" @@ -297,15 +296,28 @@ def run_vegeta_benchmark(target, route_name) ["-rate=#{RATE}", "--workers=#{CONNECTIONS}", "--max-workers=#{MAX_CONNECTIONS}"] end + # Run vegeta attack and pipe to text report (displayed and saved) + # Then generate JSON report by re-encoding from the text output isn't possible, + # so we save to a temp .bin file, generate both reports, then delete it + vegeta_bin = "#{OUTDIR}/#{route_name}_vegeta.bin" vegeta_cmd = [ "echo 'GET #{target}' |", "vegeta", "attack", *vegeta_args, "-duration=#{DURATION}", - "-timeout=#{REQUEST_TIMEOUT}" + "-timeout=#{REQUEST_TIMEOUT}", + "> #{vegeta_bin}" ].join(" ") - raise "Vegeta attack failed" unless system("#{vegeta_cmd} | tee #{vegeta_bin} | vegeta report | tee #{vegeta_txt}") - raise "Vegeta report generation failed" unless system("vegeta report -type=json #{vegeta_bin} > #{vegeta_json}") + raise "Vegeta attack failed" unless system(vegeta_cmd) + + # Generate text report (display and save) + raise "Vegeta text report failed" unless system("vegeta report #{vegeta_bin} | tee #{vegeta_txt}") + + # Generate JSON report + raise "Vegeta JSON report failed" unless system("vegeta report -type=json #{vegeta_bin} > #{vegeta_json}") + + # Delete the large binary file to save disk space + FileUtils.rm_f(vegeta_bin) vegeta_data = parse_json_file(vegeta_json, "Vegeta") vegeta_rps = vegeta_data["throughput"]&.round(2) || "missing" From 0e9a4adf38368cebce2290f5e33c6d1b6e189b68 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Fri, 12 Dec 2025 15:20:49 +0000 Subject: [PATCH 47/65] Don't follow redirects --- spec/performance/bench.rb | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb index ac6680acc2..b2b0840509 100755 --- a/spec/performance/bench.rb +++ b/spec/performance/bench.rb @@ -306,6 +306,7 @@ def run_vegeta_benchmark(target, route_name) *vegeta_args, "-duration=#{DURATION}", "-timeout=#{REQUEST_TIMEOUT}", + "-redirects=0", "> #{vegeta_bin}" ].join(" ") raise "Vegeta attack failed" unless system(vegeta_cmd) @@ -380,7 +381,10 @@ def run_k6_benchmark(target, route_name) }; export default function () { - const response = http.get('#{target}', { timeout: '#{REQUEST_TIMEOUT}' }); + const response = http.get('#{target}', { + timeout: '#{REQUEST_TIMEOUT}', + redirects: 0, + }); check(response, { 'status=200': r => r.status === 200, }); From ce6ded110c02cab2d9d41d561aba60c4dc379c07 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Fri, 12 Dec 2025 16:53:41 +0000 Subject: [PATCH 48/65] Move benchmark scripts to a non-gitignored directory --- .github/workflows/benchmark.yml | 4 ++-- {spec/performance => benchmarks}/bench.rb | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) rename {spec/performance => benchmarks}/bench.rb (97%) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index baa7514d43..b578cabaeb 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -334,7 +334,7 @@ jobs: set -e # Exit on any error echo "🏃 Running Core benchmark suite..." - if ! ruby spec/performance/bench.rb; then + if ! ruby benchmarks/bench.rb; then echo "❌ ERROR: Benchmark execution failed" exit 1 fi @@ -483,7 +483,7 @@ jobs: set -e echo "🏃 Running Pro benchmark suite..." - if ! PRO=true ruby spec/performance/bench.rb; then + if ! PRO=true ruby benchmarks/bench.rb; then echo "❌ ERROR: Benchmark execution failed" exit 1 fi diff --git a/spec/performance/bench.rb b/benchmarks/bench.rb similarity index 97% rename from spec/performance/bench.rb rename to benchmarks/bench.rb index b2b0840509..e95bae57f1 100755 --- a/spec/performance/bench.rb +++ b/benchmarks/bench.rb @@ -93,18 +93,18 @@ def strip_optional_params(route) end # Sanitize route name for use in filenames -# Removes characters that GitHub Actions disallows in artifacts +# Removes characters that GitHub Actions disallows in artifacts and shell metacharacters def sanitize_route_name(route) name = strip_optional_params(route).gsub(%r{^/}, "").tr("/", "_") name = "root" if name.empty? - # Replace invalid characters: " : < > | * ? \r \n - name.gsub(/[":.<>|*?\r\n]+/, "_").squeeze("_").gsub(/^_|_$/, "") + # Replace invalid characters: " : < > | * ? \r \n $ ` ; & ( ) [ ] { } ! # + name.gsub(/[":.<>|*?\r\n$`;&#!()\[\]{}]+/, "_").squeeze("_").gsub(/^_|_$/, "") end # Get routes from the Rails app filtered by pages# and react_router# controllers def get_benchmark_routes(app_dir) - routes_output = `cd #{app_dir} && bundle exec rails routes 2>&1` - raise "Failed to get routes from #{app_dir}" unless $CHILD_STATUS.success? + routes_output, status = Open3.capture2e("bundle", "exec", "rails", "routes", chdir: app_dir) + raise "Failed to get routes from #{app_dir}" unless status.success? routes = [] routes_output.each_line do |line| From 2384be471afe571c5097dad144636feff3692997 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Fri, 12 Dec 2025 16:44:15 +0000 Subject: [PATCH 49/65] Separate k6 test file --- benchmarks/bench.rb | 84 +++++++++++++++------------------------------ benchmarks/k6.ts | 77 +++++++++++++++++++++++++++++++++++++++++ knip.ts | 2 +- package.json | 1 + pnpm-lock.yaml | 8 +++++ 5 files changed, 114 insertions(+), 58 deletions(-) create mode 100644 benchmarks/k6.ts diff --git a/benchmarks/bench.rb b/benchmarks/bench.rb index e95bae57f1..f04202a92f 100755 --- a/benchmarks/bench.rb +++ b/benchmarks/bench.rb @@ -223,7 +223,7 @@ def server_responding?(uri) raise "For RATE=max, CONNECTIONS must be equal to MAX_CONNECTIONS (got #{CONNECTIONS} and #{MAX_CONNECTIONS})" end -# rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity, Metrics/MethodLength +# rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity # Benchmark a single route with Fortio def run_fortio_benchmark(target, route_name) @@ -341,58 +341,23 @@ def run_k6_benchmark(target, route_name) begin puts "\n===> k6: #{route_name}" - k6_script_file = "#{OUTDIR}/#{route_name}_k6_test.js" + k6_script = File.expand_path("k6.ts", __dir__) k6_summary_json = "#{OUTDIR}/#{route_name}_k6_summary.json" k6_txt = "#{OUTDIR}/#{route_name}_k6.txt" - # Configure k6 scenarios - k6_scenarios = - if IS_MAX_RATE - <<~JS.strip - { - max_rate: { - executor: 'constant-vus', - vus: #{CONNECTIONS}, - duration: '#{DURATION}' - } - } - JS - else - <<~JS.strip - { - constant_rate: { - executor: 'constant-arrival-rate', - rate: #{RATE}, - timeUnit: '1s', - duration: '#{DURATION}', - preAllocatedVUs: #{CONNECTIONS}, - maxVUs: #{MAX_CONNECTIONS} - } - } - JS - end + # Build k6 command with environment variables + k6_env_vars = [ + "-e TARGET_URL=#{target}", + "-e RATE=#{RATE}", + "-e DURATION=#{DURATION}", + "-e CONNECTIONS=#{CONNECTIONS}", + "-e MAX_CONNECTIONS=#{MAX_CONNECTIONS}", + "-e REQUEST_TIMEOUT=#{REQUEST_TIMEOUT}" + ].join(" ") - k6_script = <<~JS - import http from 'k6/http'; - import { check } from 'k6'; - - export const options = { - scenarios: #{k6_scenarios}, - }; - - export default function () { - const response = http.get('#{target}', { - timeout: '#{REQUEST_TIMEOUT}', - redirects: 0, - }); - check(response, { - 'status=200': r => r.status === 200, - }); - } - JS - File.write(k6_script_file, k6_script) - k6_command = "k6 run --summary-export=#{k6_summary_json} --summary-trend-stats 'min,avg,med,max,p(90),p(99)'" - raise "k6 benchmark failed" unless system("#{k6_command} #{k6_script_file} | tee #{k6_txt}") + k6_command = "k6 run #{k6_env_vars} --summary-export=#{k6_summary_json} " \ + "--summary-trend-stats 'min,avg,med,max,p(90),p(99)' #{k6_script}" + raise "k6 benchmark failed" unless system("#{k6_command} | tee #{k6_txt}") k6_data = parse_json_file(k6_summary_json, "k6") k6_rps = k6_data.dig("metrics", "iterations", "rate")&.round(2) || "missing" @@ -400,16 +365,21 @@ def run_k6_benchmark(target, route_name) k6_p90 = k6_data.dig("metrics", "http_req_duration", "p(90)")&.round(2) || "missing" k6_p99 = k6_data.dig("metrics", "http_req_duration", "p(99)")&.round(2) || "missing" - # Status: compute successful vs failed requests + # Status: extract counts from checks (status_200, status_302, status_4xx, status_5xx) k6_reqs_total = k6_data.dig("metrics", "http_reqs", "count") || 0 k6_checks = k6_data.dig("root_group", "checks") || {} - k6_status_parts = k6_checks.map do |name, check| - status_label = name.start_with?("status=") ? name.delete_prefix("status=") : name - "#{status_label}=#{check['passes']}" + k6_known_count = 0 + k6_status_parts = k6_checks.filter_map do |name, check| + passes = check["passes"] || 0 + k6_known_count += passes + next if passes.zero? + + # Convert check names like "status_200" to "200", "status_4xx" to "4xx" + status_label = name.sub(/^status_/, "") + "#{status_label}=#{passes}" end - k6_reqs_known_status = k6_checks.values.sum { |check| check["passes"] || 0 } - k6_reqs_other = k6_reqs_total - k6_reqs_known_status - k6_status_parts << "other=#{k6_reqs_other}" if k6_reqs_other.positive? + k6_other = k6_reqs_total - k6_known_count + k6_status_parts << "other=#{k6_other}" if k6_other.positive? k6_status = k6_status_parts.empty? ? "missing" : k6_status_parts.join(",") [k6_rps, k6_p50, k6_p90, k6_p99, k6_status] @@ -419,7 +389,7 @@ def run_k6_benchmark(target, route_name) end end -# rubocop:enable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity, Metrics/MethodLength +# rubocop:enable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity # Initialize summary file File.write(SUMMARY_TXT, "") diff --git a/benchmarks/k6.ts b/benchmarks/k6.ts new file mode 100644 index 0000000000..22c8bb9ccc --- /dev/null +++ b/benchmarks/k6.ts @@ -0,0 +1,77 @@ +/** + * k6 benchmark script for React on Rails + * + * This script is designed to be reusable across different routes and configurations. + * Configuration is passed via environment variables (using -e flag): + * + * Required: + * TARGET_URL - The full URL to benchmark (e.g., http://localhost:3001/server_side_hello_world) + * + * Optional: + * RATE - Requests per second ("max" for maximum throughput, or a number). Default: "max" + * DURATION - Test duration (e.g., "30s", "1m"). Default: "30s" + * CONNECTIONS - Number of concurrent connections/VUs. Default: 10 + * MAX_CONNECTIONS - Maximum VUs (for constant-arrival-rate). Default: same as CONNECTIONS + * REQUEST_TIMEOUT - Request timeout (e.g., "60s"). Default: "60s" + * + * Usage: + * k6 run -e TARGET_URL=http://localhost:3001/my_route benchmarks/k6.ts + * k6 run -e TARGET_URL=http://localhost:3001/my_route -e RATE=100 -e DURATION=1m benchmarks/k6.ts + */ +/* eslint-disable import/no-unresolved -- k6 is installed globally */ +import http from 'k6/http'; +import { check } from 'k6'; + +// Read configuration from environment variables +const targetUrl = __ENV.TARGET_URL; +const rate = __ENV.RATE || 'max'; +const duration = __ENV.DURATION || '30s'; +const vus = parseInt(__ENV.CONNECTIONS || '10', 10); +const maxVUs = __ENV.MAX_CONNECTIONS ? parseInt(__ENV.MAX_CONNECTIONS, 10) : vus; +const requestTimeout = __ENV.REQUEST_TIMEOUT || '60s'; + +if (!targetUrl) { + throw new Error('TARGET_URL environment variable is required'); +} + +// Configure scenarios based on rate mode +const scenarios = + rate === 'max' + ? { + max_rate: { + executor: 'constant-vus', + vus, + duration, + }, + } + : { + constant_rate: { + executor: 'constant-arrival-rate', + rate: parseInt(rate, 10) || 50, // same default as in bench.rb + timeUnit: '1s', + duration, + preAllocatedVUs: vus, + maxVUs, + }, + }; + +export const options = { + scenarios, + // Disable default thresholds to avoid noise in output + thresholds: {}, +}; + +export default () => { + const response = http.get(targetUrl, { + timeout: requestTimeout, + redirects: 0, + }); + + // Check for various status codes to get accurate reporting + check(response, { + status_200: (r) => r.status === 200, + status_3xx: (r) => r.status >= 300 && r.status < 400, + status_4xx: (r) => r.status >= 400 && r.status < 500, + status_5xx: (r) => r.status >= 500, + }); +}; diff --git a/knip.ts b/knip.ts index 36bfc74ff6..b97ae9cef2 100644 --- a/knip.ts +++ b/knip.ts @@ -5,7 +5,7 @@ const config: KnipConfig = { workspaces: { // Root workspace - manages the monorepo and global tooling '.': { - entry: ['eslint.config.ts', 'jest.config.base.js'], + entry: ['eslint.config.ts', 'jest.config.base.js', 'benchmarks/k6.ts'], project: ['*.{js,mjs,ts}'], ignoreBinaries: [ // Has to be installed globally diff --git a/package.json b/package.json index 74169fd6fa..390db836a6 100644 --- a/package.json +++ b/package.json @@ -30,6 +30,7 @@ "@testing-library/react": "^16.2.0", "@tsconfig/node14": "^14.1.2", "@types/jest": "^29.5.14", + "@types/k6": "^1.4.0", "@types/node": "^20.17.16", "@types/react": "^19.0.0", "@types/react-dom": "^19.0.0", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 9403953675..dd9fab055a 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -63,6 +63,9 @@ importers: '@types/jest': specifier: ^29.5.14 version: 29.5.14 + '@types/k6': + specifier: ^1.4.0 + version: 1.4.0 '@types/node': specifier: ^20.17.16 version: 20.19.25 @@ -1635,6 +1638,9 @@ packages: '@types/jsonwebtoken@9.0.10': resolution: {integrity: sha512-asx5hIG9Qmf/1oStypjanR7iKTv0gXQ1Ov/jfrX6kS/EO0OFni8orbmGCn0672NHR3kXHwpAwR+B368ZGN/2rA==} + '@types/k6@1.4.0': + resolution: {integrity: sha512-2tgKVnzNXZTZT1TDAGLY/3cuvHPZLyOF751N7M8T2dBgWzInzUVZYjGn9zVW01S1yNLqAr1az9gctyJHTW6GRQ==} + '@types/lockfile@1.0.4': resolution: {integrity: sha512-Q8oFIHJHr+htLrTXN2FuZfg+WXVHQRwU/hC2GpUu+Q8e3FUM9EDkS2pE3R2AO1ZGu56f479ybdMCNF1DAu8cAQ==} @@ -6994,6 +7000,8 @@ snapshots: '@types/ms': 2.1.0 '@types/node': 20.19.25 + '@types/k6@1.4.0': {} + '@types/lockfile@1.0.4': {} '@types/mime@1.3.5': {} From 32f7861e76e83b5c2202db809acbe1a98f5aa179 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Fri, 12 Dec 2025 17:33:42 +0000 Subject: [PATCH 50/65] Fix Yalc and pnpm handling in benchmark.yml --- .github/workflows/benchmark.yml | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index b578cabaeb..6dfe61884a 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -251,23 +251,19 @@ jobs: - name: yalc publish for react-on-rails run: cd packages/react-on-rails && yalc publish - # TODO only needed while we use --ignore-workspace below - name: Cache core dummy app node modules if: env.RUN_CORE uses: actions/cache@v4 with: path: react_on_rails/spec/dummy/node_modules - key: v4-pro-dummy-app-node-modules-cache-${{ hashFiles('pnpm-lock.yaml', 'react_on_rails/spec/dummy/package.json') }} + key: v4-core-dummy-app-node-modules-cache-${{ hashFiles('react_on_rails/spec/dummy/pnpm-lock.yaml') }} - name: Install Node modules for the dummy app if: env.RUN_CORE - # TODO simplify this both here and in integration tests - # --ignore-workspace prevents pnpm from treating this as part of the parent workspace - # The dummy app doesn't have a pnpm-lock.yaml and shouldn't use frozen lockfile run: | cd react_on_rails/spec/dummy - yalc add react-on-rails - pnpm install --ignore-workspace + yalc add --link react-on-rails + pnpm install - name: Save Core dummy app ruby gems to cache if: env.RUN_CORE @@ -399,20 +395,18 @@ jobs: uses: actions/cache@v4 with: path: react_on_rails_pro/spec/dummy/node_modules - key: v4-pro-dummy-app-node-modules-cache-${{ hashFiles('pnpm-lock.yaml', 'react_on_rails_pro/spec/dummy/package.json') }} + key: v4-pro-dummy-app-node-modules-cache-${{ hashFiles('react_on_rails_pro/spec/dummy/pnpm-lock.yaml') }} - name: yalc publish for react-on-rails-pro if: env.RUN_PRO run: cd packages/react-on-rails-pro && yalc publish - # Same reason for --ignore-workspace as for core - # TODO Remove it here as well - name: Install Node modules with Pnpm for Pro dummy app if: env.RUN_PRO run: | cd react_on_rails_pro/spec/dummy - yalc add react-on-rails-pro - pnpm install --ignore-workspace + yalc add --link react-on-rails-pro + pnpm install - name: Cache Pro dummy app Ruby gems if: env.RUN_PRO From 9718f81e88886134b20de40f4f580e0336163026 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Fri, 12 Dec 2025 19:05:22 +0000 Subject: [PATCH 51/65] Fix assets:precompile in Pro dummy --- react_on_rails_pro/spec/dummy/lib/tasks/assets.rake | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/react_on_rails_pro/spec/dummy/lib/tasks/assets.rake b/react_on_rails_pro/spec/dummy/lib/tasks/assets.rake index ea766fa26b..d7837e3200 100644 --- a/react_on_rails_pro/spec/dummy/lib/tasks/assets.rake +++ b/react_on_rails_pro/spec/dummy/lib/tasks/assets.rake @@ -16,10 +16,10 @@ namespace :assets do desc "Compile assets with webpack" task :webpack do - sh "cd client && yarn run build:client" + sh "cd client && pnpm run build:client" # Skip next line if not doing server rendering - sh "cd client && yarn run build:server" + sh "cd client && pnpm run build:server" end task :clobber do From 50d2278f82065b8a76a8c5e3ff83c7ac0555711f Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Fri, 12 Dec 2025 22:18:04 +0000 Subject: [PATCH 52/65] Closer config for Core and Pro dummy apps --- react_on_rails/spec/dummy/config/puma.rb | 11 +++++++++++ react_on_rails/spec/dummy/config/shakapacker.yml | 3 +++ react_on_rails_pro/spec/dummy/config/database.yml | 2 +- .../spec/dummy/config/environments/production.rb | 10 +++++----- react_on_rails_pro/spec/dummy/config/puma.rb | 14 ++++++++++---- .../spec/dummy/config/shakapacker.yml | 3 ++- 6 files changed, 32 insertions(+), 11 deletions(-) diff --git a/react_on_rails/spec/dummy/config/puma.rb b/react_on_rails/spec/dummy/config/puma.rb index 01b93c7d91..e190c501cb 100644 --- a/react_on_rails/spec/dummy/config/puma.rb +++ b/react_on_rails/spec/dummy/config/puma.rb @@ -44,6 +44,17 @@ # preload_app! + # The code in the `on_worker_boot` will be called if you are using + # clustered mode by specifying a number of `workers`. After each worker + # process is booted this block will be run, if you are using `preload_app!` + # option you will want to use this block to reconnect to any threads + # or connections that may have been created at application boot, Ruby + # cannot share connections between processes. + # + on_worker_boot do + ActiveRecord::Base.establish_connection if defined?(ActiveRecord) + end + # Specifies the `worker_shutdown_timeout` threshold that Puma will use to wait before # terminating a worker. # diff --git a/react_on_rails/spec/dummy/config/shakapacker.yml b/react_on_rails/spec/dummy/config/shakapacker.yml index 342b6cad1e..f4d1e586bd 100644 --- a/react_on_rails/spec/dummy/config/shakapacker.yml +++ b/react_on_rails/spec/dummy/config/shakapacker.yml @@ -20,6 +20,9 @@ default: &default cache_manifest: false nested_entries: true + # Extract and emit a css file + extract_css: true + # Hook to run before webpack compilation (e.g., for generating dynamic entry points) # SECURITY: Only reference trusted scripts within your project. Ensure the hook path # points to a file within the project root that you control. diff --git a/react_on_rails_pro/spec/dummy/config/database.yml b/react_on_rails_pro/spec/dummy/config/database.yml index 1c1a37ca8d..0d02f24980 100644 --- a/react_on_rails_pro/spec/dummy/config/database.yml +++ b/react_on_rails_pro/spec/dummy/config/database.yml @@ -6,7 +6,7 @@ # default: &default adapter: sqlite3 - pool: 5 + pool: <%= ENV.fetch("RAILS_MAX_THREADS") { 5 } %> timeout: 5000 development: diff --git a/react_on_rails_pro/spec/dummy/config/environments/production.rb b/react_on_rails_pro/spec/dummy/config/environments/production.rb index 330291b114..4c20d38667 100644 --- a/react_on_rails_pro/spec/dummy/config/environments/production.rb +++ b/react_on_rails_pro/spec/dummy/config/environments/production.rb @@ -46,9 +46,9 @@ # Force all access to the app over SSL, use Strict-Transport-Security, and use secure cookies. # config.force_ssl = true - # Use the lowest log level to ensure availability of diagnostic information - # when problems arise. - config.log_level = :debug + # Include generic and useful information about system operation, but avoid logging too much + # information to avoid inadvertent exposure of personally identifiable information (PII). + config.log_level = :info # Prepend all log lines with the following tags. config.log_tags = [:request_id] @@ -69,8 +69,8 @@ # the I18n.default_locale when a translation cannot be found). config.i18n.fallbacks = true - # Send deprecation notices to registered listeners. - config.active_support.deprecation = :notify + # Don't log any deprecations. + config.active_support.report_deprecations = false # Use default logging formatter so that PID and timestamp are not suppressed. config.log_formatter = Logger::Formatter.new diff --git a/react_on_rails_pro/spec/dummy/config/puma.rb b/react_on_rails_pro/spec/dummy/config/puma.rb index 035d43a2f6..c02737ef7b 100644 --- a/react_on_rails_pro/spec/dummy/config/puma.rb +++ b/react_on_rails_pro/spec/dummy/config/puma.rb @@ -1,13 +1,14 @@ # frozen_string_literal: true # Puma can serve each request in a thread from an internal thread pool. -# The `threads` method setting takes two numbers a minimum and maximum. +# The `threads` method setting takes two numbers: a minimum and maximum. # Any libraries that use thread pools should be configured to match # the maximum value specified for Puma. Default is set to 5 threads for minimum -# and maximum, this matches the default thread size of Active Record. +# and maximum; this matches the default thread size of Active Record. # -threads_count = ENV.fetch("RAILS_MAX_THREADS", 5).to_i -threads threads_count, threads_count +max_threads_count = ENV.fetch("RAILS_MAX_THREADS", 5) +min_threads_count = ENV.fetch("RAILS_MIN_THREADS") { max_threads_count } +threads min_threads_count, max_threads_count # Specifies the `port` that Puma will listen on to receive requests, default is 3000. # @@ -45,5 +46,10 @@ ActiveRecord::Base.establish_connection if defined?(ActiveRecord) end +# Specifies the `worker_shutdown_timeout` threshold that Puma will use to wait before +# terminating a worker. +# +worker_shutdown_timeout 60 + # Allow puma to be restarted by `rails restart` command. plugin :tmp_restart diff --git a/react_on_rails_pro/spec/dummy/config/shakapacker.yml b/react_on_rails_pro/spec/dummy/config/shakapacker.yml index 068bb30df4..672d872bf3 100644 --- a/react_on_rails_pro/spec/dummy/config/shakapacker.yml +++ b/react_on_rails_pro/spec/dummy/config/shakapacker.yml @@ -8,9 +8,10 @@ default: &default nested_entries: true javascript_transpiler: babel - cache_path: tmp/cache/webpacker + cache_path: tmp/cache/shakapacker check_yarn_integrity: false webpack_compile_output: false + ensure_consistent_versioning: true # Additional paths webpack should look up modules # ['app/assets', 'engine/foo/app/assets'] From 4cf0c65e9d7e81fdd8c7a06396b3348c043f724c Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Sun, 14 Dec 2025 10:33:32 +0000 Subject: [PATCH 53/65] Remove SSH access --- .github/workflows/benchmark.yml | 47 +++++---------------------------- 1 file changed, 6 insertions(+), 41 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 6dfe61884a..a79d54a3c6 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -1,14 +1,8 @@ name: Benchmark Workflow on: - # https://github.com/mxschmitt/action-tmate?tab=readme-ov-file#manually-triggered-debug workflow_dispatch: inputs: - debug_enabled: - description: 'Enable SSH access (⚠️ Security Risk - read workflow comments)' - required: false - default: false - type: boolean routes: description: 'Comma-separated routes to benchmark (e.g., "/,/hello"). Leave empty to auto-detect from Rails.' required: false @@ -110,36 +104,7 @@ jobs: uses: actions/checkout@v4 # ============================================ - # STEP 2: OPTIONAL SSH ACCESS - # ============================================ - # NOTE: Interactive confirmation is not possible in GitHub Actions. - # As a secure workaround, SSH access is gated by the workflow_dispatch - # input variable 'debug_enabled' which defaults to false. - # Users must explicitly set this to true to enable SSH. - - - name: SSH Warning - if: ${{ github.event.inputs.debug_enabled == true || github.event.inputs.debug_enabled == 'true' }} - run: | - echo "⚠️ ⚠️ ⚠️ SSH ACCESS ENABLED ⚠️ ⚠️ ⚠️" - echo "" - echo "SECURITY NOTICE:" - echo " - SSH access exposes your GitHub Actions runner" - echo " - Only proceed if you understand and accept the risks" - echo " - Do NOT store secrets or sensitive data on the runner" - echo " - Access is limited to the workflow initiator only" - echo " - The session will remain open until manually terminated" - echo "" - echo "⚠️ ⚠️ ⚠️ ⚠️ ⚠️ ⚠️ ⚠️ ⚠️ ⚠️ ⚠️ ⚠️" - - - name: Setup SSH access (if enabled) - if: ${{ github.event.inputs.debug_enabled == true || github.event.inputs.debug_enabled == 'true' }} - uses: mxschmitt/action-tmate@v3 - with: - detached: true - limit-access-to-actor: true # Only workflow trigger can access - - # ============================================ - # STEP 3: INSTALL BENCHMARKING TOOLS + # STEP 2: INSTALL BENCHMARKING TOOLS # ============================================ - name: Add tools directory to PATH @@ -194,7 +159,7 @@ jobs: k6-version: ${{ env.K6_VERSION }} # ============================================ - # STEP 4: START APPLICATION SERVER + # STEP 3: START APPLICATION SERVER # ============================================ - name: Setup Ruby @@ -320,7 +285,7 @@ jobs: exit 1 # ============================================ - # STEP 5: RUN CORE BENCHMARKS + # STEP 4: RUN CORE BENCHMARKS # ============================================ - name: Execute Core benchmark suite @@ -388,7 +353,7 @@ jobs: exit 1 # ============================================ - # STEP 6: SETUP PRO APPLICATION SERVER + # STEP 5: SETUP PRO APPLICATION SERVER # ============================================ - name: Cache Pro dummy app node modules if: env.RUN_PRO @@ -467,7 +432,7 @@ jobs: exit 1 # ============================================ - # STEP 7: RUN PRO BENCHMARKS + # STEP 6: RUN PRO BENCHMARKS # ============================================ - name: Execute Pro benchmark suite @@ -521,7 +486,7 @@ jobs: echo "✅ Server stopped" # ============================================ - # STEP 8: WORKFLOW COMPLETION + # STEP 7: WORKFLOW COMPLETION # ============================================ - name: Workflow summary if: always() From bd0def86dce2259897a7f31fe97f3691524aa68e Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Sat, 13 Dec 2025 10:08:49 +0000 Subject: [PATCH 54/65] Add Node renderer benchmarks --- .github/workflows/benchmark.yml | 25 ++- benchmarks/bench-node-renderer.rb | 246 ++++++++++++++++++++++++++++ benchmarks/bench.rb | 108 ++---------- benchmarks/lib/benchmark_helpers.rb | 124 ++++++++++++++ 4 files changed, 405 insertions(+), 98 deletions(-) create mode 100755 benchmarks/bench-node-renderer.rb create mode 100644 benchmarks/lib/benchmark_helpers.rb diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index a79d54a3c6..5aab0293bf 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -134,14 +134,14 @@ jobs: - name: Cache Vegeta binary id: cache-vegeta - if: contains(env.TOOLS, 'vegeta') + if: env.RUN_PRO || contains(env.TOOLS, 'vegeta') uses: actions/cache@v4 with: path: ~/bin/vegeta key: vegeta-${{ runner.os }}-${{ runner.arch }}-${{ env.VEGETA_VERSION }} - name: Install Vegeta - if: contains(env.TOOLS, 'vegeta') && steps.cache-vegeta.outputs.cache-hit != 'true' + if: (env.RUN_PRO || contains(env.TOOLS, 'vegeta')) && steps.cache-vegeta.outputs.cache-hit != 'true' run: | echo "📦 Installing Vegeta v${VEGETA_VERSION}" @@ -449,6 +449,20 @@ jobs: echo "✅ Benchmark suite completed successfully" + - name: Execute Pro Node Renderer benchmark suite + if: env.RUN_PRO + timeout-minutes: 30 + run: | + set -e + echo "🏃 Running Pro Node Renderer benchmark suite..." + + if ! ruby benchmarks/bench-node-renderer.rb; then + echo "❌ ERROR: Node Renderer benchmark execution failed" + exit 1 + fi + + echo "✅ Node Renderer benchmark suite completed successfully" + - name: Validate Pro benchmark results if: env.RUN_PRO run: | @@ -462,9 +476,14 @@ jobs: echo "✅ Benchmark results found" echo "" - echo "📊 Summary:" + echo "📊 Rails Benchmark Summary:" column -t -s $'\t' bench_results/summary.txt echo "" + if [ -f "bench_results/node_renderer_summary.txt" ]; then + echo "📊 Node Renderer Benchmark Summary:" + column -t -s $'\t' bench_results/node_renderer_summary.txt + echo "" + fi echo "Generated files:" ls -lh bench_results/ diff --git a/benchmarks/bench-node-renderer.rb b/benchmarks/bench-node-renderer.rb new file mode 100755 index 0000000000..e5c5b03f23 --- /dev/null +++ b/benchmarks/bench-node-renderer.rb @@ -0,0 +1,246 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +# Benchmark script for React on Rails Pro Node Renderer +# Uses Vegeta with HTTP/2 Cleartext (h2c) support + +require "English" +require "open3" +require "socket" +require_relative "lib/benchmark_helpers" + +# Read configuration from source files +def read_protocol_version + package_json_path = File.expand_path( + "../packages/react-on-rails-pro-node-renderer/package.json", + __dir__ + ) + package_json = JSON.parse(File.read(package_json_path)) + package_json["protocolVersion"] || raise("protocolVersion not found in #{package_json_path}") +end + +def read_password_from_config + config_path = File.expand_path( + "../react_on_rails_pro/spec/dummy/client/node-renderer.js", + __dir__ + ) + config_content = File.read(config_path) + match = config_content.match(/password:\s*['"]([^'"]+)['"]/) + match ? match[1] : raise("password not found in #{config_path}") +end + +# Benchmark parameters +BUNDLE_TIMESTAMP = env_or_default("BUNDLE_TIMESTAMP", nil) +PASSWORD = read_password_from_config +BASE_URL = env_or_default("BASE_URL", "localhost:3800") +PROTOCOL_VERSION = read_protocol_version + +# Test cases: JavaScript expressions to evaluate +# Format: { name: "test_name", request: "javascript_code" } +TEST_CASES = [ + { name: "simple_eval", request: "2+2" }, + { + name: "react_ssr", + request: "ReactOnRails.serverRenderReactComponent(" \ + '{name:"HelloWorld",props:{helloWorldData:{name:"Benchmark"}},domNodeId:"app"})' + } +].freeze + +# Benchmark configuration +RATE = env_or_default("RATE", "max") +CONNECTIONS = env_or_default("CONNECTIONS", 10).to_i +MAX_CONNECTIONS = env_or_default("MAX_CONNECTIONS", CONNECTIONS).to_i +DURATION = env_or_default("DURATION", "30s") +REQUEST_TIMEOUT = env_or_default("REQUEST_TIMEOUT", "60s") + +OUTDIR = "bench_results" +SUMMARY_TXT = "#{OUTDIR}/node_renderer_summary.txt".freeze + +# Local wrapper for add_summary_line to use local constant +def add_to_summary(*parts) + add_summary_line(SUMMARY_TXT, *parts) +end + +# Find available bundle in the node-renderer bundles directory +def find_bundle_timestamp + bundles_dir = File.expand_path( + "../react_on_rails_pro/spec/dummy/.node-renderer-bundles", + __dir__ + ) + + unless Dir.exist?(bundles_dir) + raise "Node renderer bundles directory not found: #{bundles_dir}\n" \ + "Make sure the Pro dummy app has been compiled with NODE_ENV=production" + end + + # Bundle directories have format: - (e.g., 623229694671afc1ac9137f2715bb654-production) + # Filter to only include production bundles with hash-like names + bundles = Dir.children(bundles_dir).select do |entry| + File.directory?(File.join(bundles_dir, entry)) && + entry.match?(/^[a-f0-9]+-production$/) + end + + raise "No production bundles found in #{bundles_dir}" if bundles.empty? + + # Return the first production bundle + bundles.first +end + +# URL-encode special characters for form body +def url_encode(str) + URI.encode_www_form_component(str) +end + +# rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/MethodLength, Metrics/PerceivedComplexity + +# Run Vegeta benchmark for a single test case +def run_vegeta_benchmark(test_case, bundle_timestamp) + name = test_case[:name] + request = test_case[:request] + + puts "\n===> Vegeta h2c: #{name}" + + # Create target URL + target_url = "http://#{BASE_URL}/bundles/#{bundle_timestamp}/render/#{name}" + + # Create request body + body = [ + "protocolVersion=#{url_encode(PROTOCOL_VERSION)}", + "password=#{url_encode(PASSWORD)}", + "renderingRequest=#{url_encode(request)}" + ].join("&") + + # Create temp files for Vegeta + targets_file = "#{OUTDIR}/#{name}_vegeta_targets.txt" + body_file = "#{OUTDIR}/#{name}_vegeta_body.txt" + vegeta_bin = "#{OUTDIR}/#{name}_vegeta.bin" + vegeta_json = "#{OUTDIR}/#{name}_vegeta.json" + vegeta_txt = "#{OUTDIR}/#{name}_vegeta.txt" + + # Write body file + File.write(body_file, body) + + # Write targets file (Vegeta format with @body reference) + File.write(targets_file, <<~TARGETS) + POST #{target_url} + Content-Type: application/x-www-form-urlencoded + @#{body_file} + TARGETS + + # Configure Vegeta arguments for max rate + is_max_rate = RATE == "max" + vegeta_args = + if is_max_rate + ["-rate=0", "-workers=#{CONNECTIONS}", "-max-workers=#{CONNECTIONS}"] + else + ["-rate=#{RATE}", "-workers=#{CONNECTIONS}", "-max-workers=#{MAX_CONNECTIONS}"] + end + + # Run Vegeta attack with h2c + vegeta_cmd = [ + "vegeta", "attack", + "-targets=#{targets_file}", + *vegeta_args, + "-duration=#{DURATION}", + "-timeout=#{REQUEST_TIMEOUT}", + "-h2c", # HTTP/2 Cleartext (required for node renderer) + "-max-body=0", + "> #{vegeta_bin}" + ].join(" ") + + raise "Vegeta attack failed for #{name}" unless system(vegeta_cmd) + + # Generate text report (display and save) + raise "Vegeta text report failed" unless system("vegeta report #{vegeta_bin} | tee #{vegeta_txt}") + + # Generate JSON report + raise "Vegeta JSON report failed" unless system("vegeta report -type=json #{vegeta_bin} > #{vegeta_json}") + + # Delete the large binary file to save disk space + FileUtils.rm_f(vegeta_bin) + + # Parse results + vegeta_data = parse_json_file(vegeta_json, "Vegeta") + vegeta_rps = vegeta_data["throughput"]&.round(2) || "missing" + vegeta_p50 = vegeta_data.dig("latencies", "50th")&./(1_000_000.0)&.round(2) || "missing" + vegeta_p90 = vegeta_data.dig("latencies", "90th")&./(1_000_000.0)&.round(2) || "missing" + vegeta_p99 = vegeta_data.dig("latencies", "99th")&./(1_000_000.0)&.round(2) || "missing" + vegeta_status = vegeta_data["status_codes"]&.map { |k, v| "#{k}=#{v}" }&.join(",") || "missing" + + [vegeta_rps, vegeta_p50, vegeta_p90, vegeta_p99, vegeta_status] +rescue StandardError => e + puts "Error: #{e.message}" + failure_metrics(e) +end + +# rubocop:enable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/MethodLength, Metrics/PerceivedComplexity + +# Main execution +bundle_timestamp = BUNDLE_TIMESTAMP || find_bundle_timestamp + +# Validate parameters +validate_rate(RATE) +validate_positive_integer(CONNECTIONS, "CONNECTIONS") +validate_positive_integer(MAX_CONNECTIONS, "MAX_CONNECTIONS") +validate_duration(DURATION, "DURATION") +validate_duration(REQUEST_TIMEOUT, "REQUEST_TIMEOUT") + +if RATE == "max" && CONNECTIONS != MAX_CONNECTIONS + raise "For RATE=max, CONNECTIONS must equal MAX_CONNECTIONS (got #{CONNECTIONS} and #{MAX_CONNECTIONS})" +end + +# Check required tools +check_required_tools(%w[vegeta column tee]) + +# Print parameters +print_params( + "BASE_URL" => BASE_URL, + "BUNDLE_TIMESTAMP" => bundle_timestamp, + "RATE" => RATE, + "DURATION" => DURATION, + "REQUEST_TIMEOUT" => REQUEST_TIMEOUT, + "CONNECTIONS" => CONNECTIONS, + "MAX_CONNECTIONS" => MAX_CONNECTIONS, + "TEST_CASES" => TEST_CASES.map { |tc| tc[:name] }.join(", ") +) + +# Wait for node renderer to be ready +# Note: Node renderer only speaks HTTP/2, but we can still check with a simple GET +# that will fail - we just check it doesn't refuse connection +puts "\nWaiting for node renderer at #{BASE_URL}..." +start_time = Time.now +timeout_sec = 60 +loop do + # Try a simple TCP connection to check if server is up + + Socket.tcp(BASE_URL.split(":").first, BASE_URL.split(":").last.to_i, connect_timeout: 5, &:close) + puts " Node renderer is accepting connections" + break +rescue StandardError => e + elapsed = Time.now - start_time + puts " Attempt at #{elapsed.round(2)}s: #{e.message}" + raise "Node renderer at #{BASE_URL} not responding within #{timeout_sec}s" if elapsed > timeout_sec + + sleep 1 +end + +# Create output directory +FileUtils.mkdir_p(OUTDIR) + +# Initialize summary file +File.write(SUMMARY_TXT, "") +add_to_summary("Test", "Tool", "RPS", "p50(ms)", "p90(ms)", "p99(ms)", "Status") + +# Run benchmarks for each test case +TEST_CASES.each do |test_case| + print_separator + puts "Benchmarking: #{test_case[:name]}" + puts " Request: #{test_case[:request]}" + print_separator + + metrics = run_vegeta_benchmark(test_case, bundle_timestamp) + add_to_summary(test_case[:name], "Vegeta-h2c", *metrics) +end + +# Display summary +display_summary(SUMMARY_TXT) diff --git a/benchmarks/bench.rb b/benchmarks/bench.rb index f04202a92f..edc8d396d4 100755 --- a/benchmarks/bench.rb +++ b/benchmarks/bench.rb @@ -1,18 +1,9 @@ #!/usr/bin/env ruby # frozen_string_literal: true -require "English" -require "json" -require "fileutils" -require "net/http" -require "uri" - -# Helper to get env var with default, -# treating empty string and "0" as unset since they can come from the benchmark workflow. -def env_or_default(key, default) - value = ENV[key].to_s - value.empty? || value == "0" ? default : value -end +require "open3" +require "shellwords" +require_relative "lib/benchmark_helpers" # Benchmark parameters PRO = ENV.fetch("PRO", "false") == "true" @@ -35,45 +26,9 @@ def env_or_default(key, default) OUTDIR = "bench_results" SUMMARY_TXT = "#{OUTDIR}/summary.txt".freeze -# Validate input parameters -def validate_rate(rate) - return if rate == "max" - - return if rate.match?(/^\d+(\.\d+)?$/) && rate.to_f.positive? - - raise "RATE must be 'max' or a positive number (got: '#{rate}')" -end - -def validate_positive_integer(value, name) - return if value.is_a?(Integer) && value.positive? - - raise "#{name} must be a positive integer (got: '#{value}')" -end - -def validate_duration(value, name) - return if value.match?(/^(\d+(\.\d+)?[smh])+$/) - - raise "#{name} must be a duration like '10s', '1m', '1.5m' (got: '#{value}')" -end - -def parse_json_file(file_path, tool_name) - JSON.parse(File.read(file_path)) -rescue Errno::ENOENT - raise "#{tool_name} results file not found: #{file_path}" -rescue JSON::ParserError => e - raise "Failed to parse #{tool_name} JSON: #{e.message}" -rescue StandardError => e - raise "Failed to read #{tool_name} results: #{e.message}" -end - -def failure_metrics(error) - ["FAILED", "FAILED", "FAILED", "FAILED", error.message] -end - -def add_summary_line(*parts) - File.open(SUMMARY_TXT, "a") do |f| - f.puts parts.join("\t") - end +# Local wrapper for add_summary_line to use local constant +def add_to_summary(*parts) + add_summary_line(SUMMARY_TXT, *parts) end # Check if a route has required parameters (e.g., /rsc_payload/:component_name) @@ -154,10 +109,7 @@ def get_benchmark_routes(app_dir) raise "MAX_CONNECTIONS (#{MAX_CONNECTIONS}) must be >= CONNECTIONS (#{CONNECTIONS})" if MAX_CONNECTIONS < CONNECTIONS # Check required tools are installed -required_tools = TOOLS + %w[column tee] -required_tools.each do |cmd| - raise "required tool '#{cmd}' is not installed" unless system("command -v #{cmd} >/dev/null 2>&1") -end +check_required_tools(TOOLS + %w[column tee]) puts <<~PARAMS Benchmark parameters: @@ -175,44 +127,9 @@ def get_benchmark_routes(app_dir) - TOOLS: #{TOOLS.join(', ')} PARAMS -# Helper method to check if server is responding -def server_responding?(uri) - response = Net::HTTP.get_response(uri) - # Accept both success (2xx) and redirect (3xx) responses as "server is responding" - success = response.is_a?(Net::HTTPSuccess) || response.is_a?(Net::HTTPRedirection) - info = "HTTP #{response.code} #{response.message}" - info += " -> #{response['location']}" if response.is_a?(Net::HTTPRedirection) && response["location"] - { success: success, info: info } -rescue StandardError => e - { success: false, info: "#{e.class.name}: #{e.message}" } -end - # Wait for the server to be ready -TIMEOUT_SEC = 60 -puts "Checking server availability at #{BASE_URL}..." test_uri = URI.parse("http://#{BASE_URL}#{routes.first}") -start_time = Time.now -attempt_count = 0 -loop do - attempt_count += 1 - attempt_start = Time.now - result = server_responding?(test_uri) - attempt_duration = Time.now - attempt_start - elapsed = Time.now - start_time - - # rubocop:disable Layout/LineLength - if result[:success] - puts " ✅ Attempt #{attempt_count} at #{elapsed.round(2)}s: SUCCESS - #{result[:info]} (took #{attempt_duration.round(3)}s)" - break - else - puts " ❌ Attempt #{attempt_count} at #{elapsed.round(2)}s: FAILED - #{result[:info]} (took #{attempt_duration.round(3)}s)" - end - # rubocop:enable Layout/LineLength - - raise "Server at #{BASE_URL} not responding within #{TIMEOUT_SEC}s" if elapsed > TIMEOUT_SEC - - sleep 1 -end +wait_for_server(test_uri) puts "Server is ready!" FileUtils.mkdir_p(OUTDIR) @@ -307,6 +224,7 @@ def run_vegeta_benchmark(target, route_name) "-duration=#{DURATION}", "-timeout=#{REQUEST_TIMEOUT}", "-redirects=0", + "-max-body=0", "> #{vegeta_bin}" ].join(" ") raise "Vegeta attack failed" unless system(vegeta_cmd) @@ -393,7 +311,7 @@ def run_k6_benchmark(target, route_name) # Initialize summary file File.write(SUMMARY_TXT, "") -add_summary_line("Route", "Tool", "RPS", "p50(ms)", "p90(ms)", "p99(ms)", "Status") +add_to_summary("Route", "Tool", "RPS", "p50(ms)", "p90(ms)", "p99(ms)", "Status") # Run benchmarks for each route routes.each do |route| @@ -417,13 +335,13 @@ def run_k6_benchmark(target, route_name) # Run each benchmark tool fortio_metrics = run_fortio_benchmark(target, route_name) - add_summary_line(route, "Fortio", *fortio_metrics) if fortio_metrics + add_to_summary(route, "Fortio", *fortio_metrics) if fortio_metrics vegeta_metrics = run_vegeta_benchmark(target, route_name) - add_summary_line(route, "Vegeta", *vegeta_metrics) if vegeta_metrics + add_to_summary(route, "Vegeta", *vegeta_metrics) if vegeta_metrics k6_metrics = run_k6_benchmark(target, route_name) - add_summary_line(route, "k6", *k6_metrics) if k6_metrics + add_to_summary(route, "k6", *k6_metrics) if k6_metrics end puts "\nSummary saved to #{SUMMARY_TXT}" diff --git a/benchmarks/lib/benchmark_helpers.rb b/benchmarks/lib/benchmark_helpers.rb new file mode 100644 index 0000000000..6576705a83 --- /dev/null +++ b/benchmarks/lib/benchmark_helpers.rb @@ -0,0 +1,124 @@ +# frozen_string_literal: true + +require "json" +require "fileutils" +require "net/http" +require "uri" + +# Shared utilities for benchmark scripts + +# Helper to get env var with default, +# treating empty string and "0" as unset since they can come from the benchmark workflow. +def env_or_default(key, default) + value = ENV[key].to_s + value.empty? || value == "0" ? default : value +end + +# Validation helpers +def validate_rate(rate) + return if rate == "max" + return if rate.match?(/^\d+(\.\d+)?$/) && rate.to_f.positive? + + raise "RATE must be 'max' or a positive number (got: '#{rate}')" +end + +def validate_positive_integer(value, name) + return if value.is_a?(Integer) && value.positive? + + raise "#{name} must be a positive integer (got: '#{value}')" +end + +def validate_duration(value, name) + return if value.match?(/^(\d+(\.\d+)?[smh])+$/) + + raise "#{name} must be a duration like '10s', '1m', '1.5m' (got: '#{value}')" +end + +# JSON parsing with error handling +def parse_json_file(file_path, tool_name) + JSON.parse(File.read(file_path)) +rescue Errno::ENOENT + raise "#{tool_name} results file not found: #{file_path}" +rescue JSON::ParserError => e + raise "Failed to parse #{tool_name} JSON: #{e.message}" +rescue StandardError => e + raise "Failed to read #{tool_name} results: #{e.message}" +end + +# Create failure metrics array for summary +def failure_metrics(error) + ["FAILED", "FAILED", "FAILED", "FAILED", error.message] +end + +# Append a line to the summary file +def add_summary_line(summary_file, *parts) + File.open(summary_file, "a") do |f| + f.puts parts.join("\t") + end +end + +# HTTP server health check +def server_responding?(uri) + response = Net::HTTP.get_response(uri) + # Accept both success (2xx) and redirect (3xx) responses as "server is responding" + success = response.is_a?(Net::HTTPSuccess) || response.is_a?(Net::HTTPRedirection) + info = "HTTP #{response.code} #{response.message}" + info += " -> #{response['location']}" if response.is_a?(Net::HTTPRedirection) && response["location"] + { success: success, info: info } +rescue StandardError => e + { success: false, info: "#{e.class.name}: #{e.message}" } +end + +# Wait for a server to be ready with timeout and retries +def wait_for_server(uri, timeout_sec: 60) + puts "Checking server availability at #{uri.host}:#{uri.port}..." + start_time = Time.now + attempt_count = 0 + + loop do + attempt_count += 1 + attempt_start = Time.now + result = server_responding?(uri) + attempt_duration = Time.now - attempt_start + elapsed = Time.now - start_time + + if result[:success] + puts " Attempt #{attempt_count} at #{elapsed.round(2)}s: SUCCESS - #{result[:info]} " \ + "(took #{attempt_duration.round(3)}s)" + return true + else + puts " Attempt #{attempt_count} at #{elapsed.round(2)}s: FAILED - #{result[:info]} " \ + "(took #{attempt_duration.round(3)}s)" + end + + raise "Server at #{uri.host}:#{uri.port} not responding within #{timeout_sec}s" if elapsed > timeout_sec + + sleep 1 + end +end + +# Check that required CLI tools are installed +def check_required_tools(tools) + tools.each do |cmd| + raise "required tool '#{cmd}' is not installed" unless system("command -v #{cmd} >/dev/null 2>&1") + end +end + +# Print a section separator +def print_separator(char = "=", width = 80) + puts char * width +end + +# Print benchmark parameters +def print_params(params) + puts "Benchmark parameters:" + params.each do |key, value| + puts " - #{key}: #{value}" + end +end + +# Display summary using column command +def display_summary(summary_file) + puts "\nSummary saved to #{summary_file}" + system("column", "-t", "-s", "\t", summary_file) +end From b313f933d14b04715f077f8bc8744779fe3c7886 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Sat, 13 Dec 2025 10:11:51 +0000 Subject: [PATCH 55/65] Use only k6 in Rails benchmarks --- .github/workflows/benchmark.yml | 32 +---- benchmarks/bench-node-renderer.rb | 4 +- benchmarks/bench.rb | 225 +++++++----------------------- 3 files changed, 54 insertions(+), 207 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 5aab0293bf..c6ef90f061 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -37,11 +37,6 @@ on: required: false default: 3 type: number - tools: - description: 'Comma-separated list of tools to run' - required: false - default: 'fortio,vegeta,k6' - type: string app_version: description: 'Which app version to benchmark' required: false @@ -65,7 +60,6 @@ on: env: RUBY_VERSION: "3.3.7" BUNDLER_VERSION: "2.5.4" - FORTIO_VERSION: "1.73.0" K6_VERSION: "1.4.2" VEGETA_VERSION: "12.13.0" # Determine which apps to run (default is 'pro_only' for all triggers) @@ -81,7 +75,6 @@ env: WEB_CONCURRENCY: ${{ github.event.inputs.web_concurrency || 4 }} RAILS_MAX_THREADS: ${{ github.event.inputs.rails_threads || 3 }} RAILS_MIN_THREADS: ${{ github.event.inputs.rails_threads || 3 }} - TOOLS: ${{ github.event.inputs.tools || 'fortio,vegeta,k6' }} jobs: benchmark: @@ -112,36 +105,16 @@ jobs: mkdir -p ~/bin echo "$HOME/bin" >> $GITHUB_PATH - - name: Cache Fortio binary - id: cache-fortio - if: contains(env.TOOLS, 'fortio') - uses: actions/cache@v4 - with: - path: ~/bin/fortio - key: fortio-${{ runner.os }}-${{ runner.arch }}-${{ env.FORTIO_VERSION }} - - - name: Install Fortio - if: contains(env.TOOLS, 'fortio') && steps.cache-fortio.outputs.cache-hit != 'true' - run: | - echo "📦 Installing Fortio v${FORTIO_VERSION}" - - # Download and extract fortio binary - wget -q https://github.com/fortio/fortio/releases/download/v${FORTIO_VERSION}/fortio-linux_amd64-${FORTIO_VERSION}.tgz - tar -xzf fortio-linux_amd64-${FORTIO_VERSION}.tgz - - # Store in cache directory - mv usr/bin/fortio ~/bin/ - - name: Cache Vegeta binary id: cache-vegeta - if: env.RUN_PRO || contains(env.TOOLS, 'vegeta') + if: env.RUN_PRO uses: actions/cache@v4 with: path: ~/bin/vegeta key: vegeta-${{ runner.os }}-${{ runner.arch }}-${{ env.VEGETA_VERSION }} - name: Install Vegeta - if: (env.RUN_PRO || contains(env.TOOLS, 'vegeta')) && steps.cache-vegeta.outputs.cache-hit != 'true' + if: env.RUN_PRO && steps.cache-vegeta.outputs.cache-hit != 'true' run: | echo "📦 Installing Vegeta v${VEGETA_VERSION}" @@ -153,7 +126,6 @@ jobs: mv vegeta ~/bin/ - name: Setup k6 - if: contains(env.TOOLS, 'k6') uses: grafana/setup-k6-action@v1 with: k6-version: ${{ env.K6_VERSION }} diff --git a/benchmarks/bench-node-renderer.rb b/benchmarks/bench-node-renderer.rb index e5c5b03f23..9d145e369b 100755 --- a/benchmarks/bench-node-renderer.rb +++ b/benchmarks/bench-node-renderer.rb @@ -229,7 +229,7 @@ def run_vegeta_benchmark(test_case, bundle_timestamp) # Initialize summary file File.write(SUMMARY_TXT, "") -add_to_summary("Test", "Tool", "RPS", "p50(ms)", "p90(ms)", "p99(ms)", "Status") +add_to_summary("Test", "RPS", "p50(ms)", "p90(ms)", "p99(ms)", "Status") # Run benchmarks for each test case TEST_CASES.each do |test_case| @@ -239,7 +239,7 @@ def run_vegeta_benchmark(test_case, bundle_timestamp) print_separator metrics = run_vegeta_benchmark(test_case, bundle_timestamp) - add_to_summary(test_case[:name], "Vegeta-h2c", *metrics) + add_to_summary(test_case[:name], *metrics) end # Display summary diff --git a/benchmarks/bench.rb b/benchmarks/bench.rb index edc8d396d4..3ed08ce2de 100755 --- a/benchmarks/bench.rb +++ b/benchmarks/bench.rb @@ -20,8 +20,6 @@ DURATION = env_or_default("DURATION", "30s") # request timeout (duration string as above) REQUEST_TIMEOUT = env_or_default("REQUEST_TIMEOUT", "60s") -# Tools to run (comma-separated) -TOOLS = env_or_default("TOOLS", "fortio,vegeta,k6").split(",") OUTDIR = "bench_results" SUMMARY_TXT = "#{OUTDIR}/summary.txt".freeze @@ -109,7 +107,7 @@ def get_benchmark_routes(app_dir) raise "MAX_CONNECTIONS (#{MAX_CONNECTIONS}) must be >= CONNECTIONS (#{CONNECTIONS})" if MAX_CONNECTIONS < CONNECTIONS # Check required tools are installed -check_required_tools(TOOLS + %w[column tee]) +check_required_tools(%w[k6 column tee]) puts <<~PARAMS Benchmark parameters: @@ -124,7 +122,6 @@ def get_benchmark_routes(app_dir) - WEB_CONCURRENCY: #{ENV['WEB_CONCURRENCY'] || 'unset'} - RAILS_MAX_THREADS: #{ENV['RAILS_MAX_THREADS'] || 'unset'} - RAILS_MIN_THREADS: #{ENV['RAILS_MIN_THREADS'] || 'unset'} - - TOOLS: #{TOOLS.join(', ')} PARAMS # Wait for the server to be ready @@ -142,176 +139,62 @@ def get_benchmark_routes(app_dir) # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity -# Benchmark a single route with Fortio -def run_fortio_benchmark(target, route_name) - return nil unless TOOLS.include?("fortio") - - begin - puts "===> Fortio: #{route_name}" - - fortio_json = "#{OUTDIR}/#{route_name}_fortio.json" - fortio_txt = "#{OUTDIR}/#{route_name}_fortio.txt" - - # Configure Fortio arguments - # See https://github.com/fortio/fortio/wiki/FAQ#i-want-to-get-the-best-results-what-flags-should-i-pass - fortio_args = - if IS_MAX_RATE - ["-qps", 0, "-c", CONNECTIONS] - else - ["-qps", RATE, "-uniform", "-nocatchup", "-c", CONNECTIONS] - end - - fortio_cmd = [ - "fortio", "load", - *fortio_args, - "-t", DURATION, - "-timeout", REQUEST_TIMEOUT, - # Allow redirects. Could use -L instead, but it uses the slower HTTP client. - "-allow-initial-errors", - "-json", fortio_json, - target - ].join(" ") - raise "Fortio benchmark failed" unless system("#{fortio_cmd} | tee #{fortio_txt}") - - fortio_data = parse_json_file(fortio_json, "Fortio") - fortio_rps = fortio_data["ActualQPS"]&.round(2) || "missing" - - percentiles = fortio_data.dig("DurationHistogram", "Percentiles") || [] - p50_data = percentiles.find { |p| p["Percentile"] == 50 } - p90_data = percentiles.find { |p| p["Percentile"] == 90 } - p99_data = percentiles.find { |p| p["Percentile"] == 99 } - - raise "Fortio results missing percentile data" unless p50_data && p90_data && p99_data - - fortio_p50 = (p50_data["Value"] * 1000).round(2) - fortio_p90 = (p90_data["Value"] * 1000).round(2) - fortio_p99 = (p99_data["Value"] * 1000).round(2) - fortio_status = fortio_data["RetCodes"]&.map { |k, v| "#{k}=#{v}" }&.join(",") || "missing" - - [fortio_rps, fortio_p50, fortio_p90, fortio_p99, fortio_status] - rescue StandardError => e - puts "Error: #{e.message}" - failure_metrics(e) - end -end - -# Benchmark a single route with Vegeta -def run_vegeta_benchmark(target, route_name) - return nil unless TOOLS.include?("vegeta") - - begin - puts "\n===> Vegeta: #{route_name}" - - vegeta_json = "#{OUTDIR}/#{route_name}_vegeta.json" - vegeta_txt = "#{OUTDIR}/#{route_name}_vegeta.txt" - - # Configure Vegeta arguments - vegeta_args = - if IS_MAX_RATE - ["-rate=0", "--workers=#{CONNECTIONS}", "--max-workers=#{CONNECTIONS}"] - else - ["-rate=#{RATE}", "--workers=#{CONNECTIONS}", "--max-workers=#{MAX_CONNECTIONS}"] - end - - # Run vegeta attack and pipe to text report (displayed and saved) - # Then generate JSON report by re-encoding from the text output isn't possible, - # so we save to a temp .bin file, generate both reports, then delete it - vegeta_bin = "#{OUTDIR}/#{route_name}_vegeta.bin" - vegeta_cmd = [ - "echo 'GET #{target}' |", - "vegeta", "attack", - *vegeta_args, - "-duration=#{DURATION}", - "-timeout=#{REQUEST_TIMEOUT}", - "-redirects=0", - "-max-body=0", - "> #{vegeta_bin}" - ].join(" ") - raise "Vegeta attack failed" unless system(vegeta_cmd) - - # Generate text report (display and save) - raise "Vegeta text report failed" unless system("vegeta report #{vegeta_bin} | tee #{vegeta_txt}") - - # Generate JSON report - raise "Vegeta JSON report failed" unless system("vegeta report -type=json #{vegeta_bin} > #{vegeta_json}") - - # Delete the large binary file to save disk space - FileUtils.rm_f(vegeta_bin) - - vegeta_data = parse_json_file(vegeta_json, "Vegeta") - vegeta_rps = vegeta_data["throughput"]&.round(2) || "missing" - vegeta_p50 = vegeta_data.dig("latencies", "50th")&./(1_000_000.0)&.round(2) || "missing" - vegeta_p90 = vegeta_data.dig("latencies", "90th")&./(1_000_000.0)&.round(2) || "missing" - vegeta_p99 = vegeta_data.dig("latencies", "99th")&./(1_000_000.0)&.round(2) || "missing" - vegeta_status = vegeta_data["status_codes"]&.map { |k, v| "#{k}=#{v}" }&.join(",") || "missing" - - [vegeta_rps, vegeta_p50, vegeta_p90, vegeta_p99, vegeta_status] - rescue StandardError => e - puts "Error: #{e.message}" - failure_metrics(e) - end -end - # Benchmark a single route with k6 def run_k6_benchmark(target, route_name) - return nil unless TOOLS.include?("k6") - - begin - puts "\n===> k6: #{route_name}" - - k6_script = File.expand_path("k6.ts", __dir__) - k6_summary_json = "#{OUTDIR}/#{route_name}_k6_summary.json" - k6_txt = "#{OUTDIR}/#{route_name}_k6.txt" - - # Build k6 command with environment variables - k6_env_vars = [ - "-e TARGET_URL=#{target}", - "-e RATE=#{RATE}", - "-e DURATION=#{DURATION}", - "-e CONNECTIONS=#{CONNECTIONS}", - "-e MAX_CONNECTIONS=#{MAX_CONNECTIONS}", - "-e REQUEST_TIMEOUT=#{REQUEST_TIMEOUT}" - ].join(" ") - - k6_command = "k6 run #{k6_env_vars} --summary-export=#{k6_summary_json} " \ - "--summary-trend-stats 'min,avg,med,max,p(90),p(99)' #{k6_script}" - raise "k6 benchmark failed" unless system("#{k6_command} | tee #{k6_txt}") - - k6_data = parse_json_file(k6_summary_json, "k6") - k6_rps = k6_data.dig("metrics", "iterations", "rate")&.round(2) || "missing" - k6_p50 = k6_data.dig("metrics", "http_req_duration", "med")&.round(2) || "missing" - k6_p90 = k6_data.dig("metrics", "http_req_duration", "p(90)")&.round(2) || "missing" - k6_p99 = k6_data.dig("metrics", "http_req_duration", "p(99)")&.round(2) || "missing" - - # Status: extract counts from checks (status_200, status_302, status_4xx, status_5xx) - k6_reqs_total = k6_data.dig("metrics", "http_reqs", "count") || 0 - k6_checks = k6_data.dig("root_group", "checks") || {} - k6_known_count = 0 - k6_status_parts = k6_checks.filter_map do |name, check| - passes = check["passes"] || 0 - k6_known_count += passes - next if passes.zero? - - # Convert check names like "status_200" to "200", "status_4xx" to "4xx" - status_label = name.sub(/^status_/, "") - "#{status_label}=#{passes}" - end - k6_other = k6_reqs_total - k6_known_count - k6_status_parts << "other=#{k6_other}" if k6_other.positive? - k6_status = k6_status_parts.empty? ? "missing" : k6_status_parts.join(",") - - [k6_rps, k6_p50, k6_p90, k6_p99, k6_status] - rescue StandardError => e - puts "Error: #{e.message}" - failure_metrics(e) + puts "\n===> k6: #{route_name}" + + k6_script = File.expand_path("k6.ts", __dir__) + k6_summary_json = "#{OUTDIR}/#{route_name}_k6_summary.json" + k6_txt = "#{OUTDIR}/#{route_name}_k6.txt" + + # Build k6 command with environment variables + k6_env_vars = [ + "-e TARGET_URL=#{Shellwords.escape(target)}", + "-e RATE=#{RATE}", + "-e DURATION=#{DURATION}", + "-e CONNECTIONS=#{CONNECTIONS}", + "-e MAX_CONNECTIONS=#{MAX_CONNECTIONS}", + "-e REQUEST_TIMEOUT=#{REQUEST_TIMEOUT}" + ].join(" ") + + k6_command = "k6 run #{k6_env_vars} --summary-export=#{k6_summary_json} " \ + "--summary-trend-stats 'min,avg,med,max,p(90),p(99)' #{k6_script}" + raise "k6 benchmark failed" unless system("#{k6_command} | tee #{k6_txt}") + + k6_data = parse_json_file(k6_summary_json, "k6") + k6_rps = k6_data.dig("metrics", "iterations", "rate")&.round(2) || "missing" + k6_p50 = k6_data.dig("metrics", "http_req_duration", "med")&.round(2) || "missing" + k6_p90 = k6_data.dig("metrics", "http_req_duration", "p(90)")&.round(2) || "missing" + k6_p99 = k6_data.dig("metrics", "http_req_duration", "p(99)")&.round(2) || "missing" + + # Status: extract counts from checks (status_200, status_3xx, status_4xx, status_5xx) + k6_reqs_total = k6_data.dig("metrics", "http_reqs", "count") || 0 + k6_checks = k6_data.dig("root_group", "checks") || {} + k6_known_count = 0 + k6_status_parts = k6_checks.filter_map do |name, check| + passes = check["passes"] || 0 + k6_known_count += passes + next if passes.zero? + + # Convert check names like "status_200" to "200", "status_4xx" to "4xx" + status_label = name.sub(/^status_/, "") + "#{status_label}=#{passes}" end + k6_other = k6_reqs_total - k6_known_count + k6_status_parts << "other=#{k6_other}" if k6_other.positive? + k6_status = k6_status_parts.empty? ? "missing" : k6_status_parts.join(",") + + [k6_rps, k6_p50, k6_p90, k6_p99, k6_status] +rescue StandardError => e + puts "Error: #{e.message}" + failure_metrics(e) end # rubocop:enable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity # Initialize summary file File.write(SUMMARY_TXT, "") -add_to_summary("Route", "Tool", "RPS", "p50(ms)", "p90(ms)", "p99(ms)", "Status") +add_to_summary("Route", "RPS", "p50(ms)", "p90(ms)", "p99(ms)", "Status") # Run benchmarks for each route routes.each do |route| @@ -332,16 +215,8 @@ def run_k6_benchmark(target, route_name) puts "Warm-up complete for #{route}" route_name = sanitize_route_name(route) - - # Run each benchmark tool - fortio_metrics = run_fortio_benchmark(target, route_name) - add_to_summary(route, "Fortio", *fortio_metrics) if fortio_metrics - - vegeta_metrics = run_vegeta_benchmark(target, route_name) - add_to_summary(route, "Vegeta", *vegeta_metrics) if vegeta_metrics - - k6_metrics = run_k6_benchmark(target, route_name) - add_to_summary(route, "k6", *k6_metrics) if k6_metrics + metrics = run_k6_benchmark(target, route_name) + add_to_summary(route, *metrics) end puts "\nSummary saved to #{SUMMARY_TXT}" From 2b7ca0c4086666be5f2fea41114c78342f7e5506 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Sun, 14 Dec 2025 13:11:52 +0000 Subject: [PATCH 56/65] Add max column --- benchmarks/bench-node-renderer.rb | 5 +++-- benchmarks/bench.rb | 11 ++++++----- benchmarks/lib/benchmark_helpers.rb | 2 +- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/benchmarks/bench-node-renderer.rb b/benchmarks/bench-node-renderer.rb index 9d145e369b..2afa04607f 100755 --- a/benchmarks/bench-node-renderer.rb +++ b/benchmarks/bench-node-renderer.rb @@ -165,9 +165,10 @@ def run_vegeta_benchmark(test_case, bundle_timestamp) vegeta_p50 = vegeta_data.dig("latencies", "50th")&./(1_000_000.0)&.round(2) || "missing" vegeta_p90 = vegeta_data.dig("latencies", "90th")&./(1_000_000.0)&.round(2) || "missing" vegeta_p99 = vegeta_data.dig("latencies", "99th")&./(1_000_000.0)&.round(2) || "missing" + vegeta_max = vegeta_data.dig("latencies", "max")&./(1_000_000.0)&.round(2) || "missing" vegeta_status = vegeta_data["status_codes"]&.map { |k, v| "#{k}=#{v}" }&.join(",") || "missing" - [vegeta_rps, vegeta_p50, vegeta_p90, vegeta_p99, vegeta_status] + [vegeta_rps, vegeta_p50, vegeta_p90, vegeta_p99, vegeta_max, vegeta_status] rescue StandardError => e puts "Error: #{e.message}" failure_metrics(e) @@ -229,7 +230,7 @@ def run_vegeta_benchmark(test_case, bundle_timestamp) # Initialize summary file File.write(SUMMARY_TXT, "") -add_to_summary("Test", "RPS", "p50(ms)", "p90(ms)", "p99(ms)", "Status") +add_to_summary("Test", "RPS", "p50(ms)", "p90(ms)", "p99(ms)", "max(ms)", "Status") # Run benchmarks for each test case TEST_CASES.each do |test_case| diff --git a/benchmarks/bench.rb b/benchmarks/bench.rb index 3ed08ce2de..42a9e26a22 100755 --- a/benchmarks/bench.rb +++ b/benchmarks/bench.rb @@ -157,15 +157,16 @@ def run_k6_benchmark(target, route_name) "-e REQUEST_TIMEOUT=#{REQUEST_TIMEOUT}" ].join(" ") - k6_command = "k6 run #{k6_env_vars} --summary-export=#{k6_summary_json} " \ - "--summary-trend-stats 'min,avg,med,max,p(90),p(99)' #{k6_script}" - raise "k6 benchmark failed" unless system("#{k6_command} | tee #{k6_txt}") + k6_command = "k6 run #{k6_env_vars} --summary-export=#{Shellwords.escape(k6_summary_json)} " \ + "--summary-trend-stats 'med,max,p(90),p(99)' #{k6_script}" + raise "k6 benchmark failed" unless system("#{k6_command} | tee #{Shellwords.escape(k6_txt)}") k6_data = parse_json_file(k6_summary_json, "k6") k6_rps = k6_data.dig("metrics", "iterations", "rate")&.round(2) || "missing" k6_p50 = k6_data.dig("metrics", "http_req_duration", "med")&.round(2) || "missing" k6_p90 = k6_data.dig("metrics", "http_req_duration", "p(90)")&.round(2) || "missing" k6_p99 = k6_data.dig("metrics", "http_req_duration", "p(99)")&.round(2) || "missing" + k6_max = k6_data.dig("metrics", "http_req_duration", "max")&.round(2) || "missing" # Status: extract counts from checks (status_200, status_3xx, status_4xx, status_5xx) k6_reqs_total = k6_data.dig("metrics", "http_reqs", "count") || 0 @@ -184,7 +185,7 @@ def run_k6_benchmark(target, route_name) k6_status_parts << "other=#{k6_other}" if k6_other.positive? k6_status = k6_status_parts.empty? ? "missing" : k6_status_parts.join(",") - [k6_rps, k6_p50, k6_p90, k6_p99, k6_status] + [k6_rps, k6_p50, k6_p90, k6_p99, k6_max, k6_status] rescue StandardError => e puts "Error: #{e.message}" failure_metrics(e) @@ -194,7 +195,7 @@ def run_k6_benchmark(target, route_name) # Initialize summary file File.write(SUMMARY_TXT, "") -add_to_summary("Route", "RPS", "p50(ms)", "p90(ms)", "p99(ms)", "Status") +add_to_summary("Route", "RPS", "p50(ms)", "p90(ms)", "p99(ms)", "max(ms)", "Status") # Run benchmarks for each route routes.each do |route| diff --git a/benchmarks/lib/benchmark_helpers.rb b/benchmarks/lib/benchmark_helpers.rb index 6576705a83..39811f838e 100644 --- a/benchmarks/lib/benchmark_helpers.rb +++ b/benchmarks/lib/benchmark_helpers.rb @@ -47,7 +47,7 @@ def parse_json_file(file_path, tool_name) # Create failure metrics array for summary def failure_metrics(error) - ["FAILED", "FAILED", "FAILED", "FAILED", error.message] + ["FAILED", "FAILED", "FAILED", "FAILED", "FAILED", error.message] end # Append a line to the summary file From 85f95474aca93e2ce22b9e9f96387b5085373003 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Sun, 14 Dec 2025 13:32:56 +0000 Subject: [PATCH 57/65] Add discardResponseBodies and specify types in the k6 file --- benchmarks/k6.ts | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/benchmarks/k6.ts b/benchmarks/k6.ts index 22c8bb9ccc..1d00617ced 100644 --- a/benchmarks/k6.ts +++ b/benchmarks/k6.ts @@ -20,6 +20,7 @@ */ /* eslint-disable import/no-unresolved -- k6 is installed globally */ import http from 'k6/http'; +import { Options, Scenario } from 'k6/options'; import { check } from 'k6'; // Read configuration from environment variables @@ -35,7 +36,7 @@ if (!targetUrl) { } // Configure scenarios based on rate mode -const scenarios = +const scenarios: Record = rate === 'max' ? { max_rate: { @@ -55,7 +56,9 @@ const scenarios = }, }; -export const options = { +export const options: Options = { + // "Highly recommended" in https://grafana.com/docs/k6/latest/using-k6/k6-options/reference/#discard-response-bodies + discardResponseBodies: true, scenarios, // Disable default thresholds to avoid noise in output thresholds: {}, From 43569ad7159e90d98de1fae240f2b7bece1cf187 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Mon, 15 Dec 2025 10:28:10 +0000 Subject: [PATCH 58/65] Ignore vendor/bundle for Prettier --- .prettierignore | 1 + react_on_rails_pro/.prettierignore | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.prettierignore b/.prettierignore index ac70a7af5b..0a613409a7 100644 --- a/.prettierignore +++ b/.prettierignore @@ -17,6 +17,7 @@ react_on_rails/spec/dummy/public **/.yalc/** **/*generated* *.res.js +**/vendor # Prettier doesn't understand ERB syntax in YAML files and can damage templates *.yml diff --git a/react_on_rails_pro/.prettierignore b/react_on_rails_pro/.prettierignore index 47322297f2..d095c03e8e 100644 --- a/react_on_rails_pro/.prettierignore +++ b/react_on_rails_pro/.prettierignore @@ -3,7 +3,7 @@ node_modules **/tmp **/public **/package.json -vendor/bundle +**/vendor **/.node-renderer-bundles spec/dummy/.yalc/ From 738c7c1a4d1bd72e66184bf1005283513d7dbb13 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Mon, 15 Dec 2025 11:19:32 +0000 Subject: [PATCH 59/65] Support both RSC and non-RSC benchmarks for Node renderer --- benchmarks/bench-node-renderer.rb | 163 +++++++++++++++++++++++------- 1 file changed, 127 insertions(+), 36 deletions(-) diff --git a/benchmarks/bench-node-renderer.rb b/benchmarks/bench-node-renderer.rb index 2afa04607f..fecb8f3205 100755 --- a/benchmarks/bench-node-renderer.rb +++ b/benchmarks/bench-node-renderer.rb @@ -30,17 +30,18 @@ def read_password_from_config end # Benchmark parameters -BUNDLE_TIMESTAMP = env_or_default("BUNDLE_TIMESTAMP", nil) PASSWORD = read_password_from_config BASE_URL = env_or_default("BASE_URL", "localhost:3800") PROTOCOL_VERSION = read_protocol_version # Test cases: JavaScript expressions to evaluate -# Format: { name: "test_name", request: "javascript_code" } +# Format: { name: "test_name", request: "javascript_code", rsc: true/false } +# rsc: true means the test requires an RSC bundle, false means non-RSC bundle TEST_CASES = [ - { name: "simple_eval", request: "2+2" }, + { name: "simple_eval", rsc: false, request: "2+2" }, { name: "react_ssr", + rsc: false, request: "ReactOnRails.serverRenderReactComponent(" \ '{name:"HelloWorld",props:{helloWorldData:{name:"Benchmark"}},domNodeId:"app"})' } @@ -61,8 +62,8 @@ def add_to_summary(*parts) add_summary_line(SUMMARY_TXT, *parts) end -# Find available bundle in the node-renderer bundles directory -def find_bundle_timestamp +# Find all production bundles in the node-renderer bundles directory +def find_all_production_bundles bundles_dir = File.expand_path( "../react_on_rails_pro/spec/dummy/.node-renderer-bundles", __dir__ @@ -82,8 +83,57 @@ def find_bundle_timestamp raise "No production bundles found in #{bundles_dir}" if bundles.empty? - # Return the first production bundle - bundles.first + bundles +end + +# Check if a bundle is an RSC bundle by evaluating ReactOnRails.isRSCBundle +# Returns true/false/nil (nil means couldn't determine) +# rubocop:disable Style/ReturnNilInPredicateMethodDefinition +def rsc_bundle?(bundle_timestamp) + url = render_url(bundle_timestamp, "rsc_check") + body = render_body("ReactOnRails.isRSCBundle") + + # Use curl with h2c since Net::HTTP doesn't support HTTP/2 + result, status = Open3.capture2( + "curl", "-s", "--http2-prior-knowledge", "-X", "POST", + "-H", "Content-Type: application/x-www-form-urlencoded", + "-d", body, + url + ) + return nil unless status.success? + + # The response should be "true" or "false" + result.strip == "true" +rescue StandardError => e + puts " Warning: Could not determine RSC status for #{bundle_timestamp}: #{e.message}" + nil +end +# rubocop:enable Style/ReturnNilInPredicateMethodDefinition + +# Categorize bundles into RSC and non-RSC +# Stops early once we find one of each type +def categorize_bundles(bundles) + rsc_bundle = nil + non_rsc_bundle = nil + + bundles.each do |bundle| + # Stop if we already have both types + break if rsc_bundle && non_rsc_bundle + + puts " Checking bundle #{bundle}..." + is_rsc = rsc_bundle?(bundle) + if is_rsc.nil? + puts " Could not determine bundle type, skipping" + elsif is_rsc + puts " RSC bundle" + rsc_bundle ||= bundle + else + puts " Non-RSC bundle" + non_rsc_bundle ||= bundle + end + end + + [rsc_bundle, non_rsc_bundle] end # URL-encode special characters for form body @@ -91,6 +141,20 @@ def url_encode(str) URI.encode_www_form_component(str) end +# Build render URL for a bundle and render name +def render_url(bundle_timestamp, render_name) + "http://#{BASE_URL}/bundles/#{bundle_timestamp}/render/#{render_name}" +end + +# Build request body for a rendering request +def render_body(rendering_request) + [ + "protocolVersion=#{url_encode(PROTOCOL_VERSION)}", + "password=#{url_encode(PASSWORD)}", + "renderingRequest=#{url_encode(rendering_request)}" + ].join("&") +end + # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/MethodLength, Metrics/PerceivedComplexity # Run Vegeta benchmark for a single test case @@ -100,15 +164,8 @@ def run_vegeta_benchmark(test_case, bundle_timestamp) puts "\n===> Vegeta h2c: #{name}" - # Create target URL - target_url = "http://#{BASE_URL}/bundles/#{bundle_timestamp}/render/#{name}" - - # Create request body - body = [ - "protocolVersion=#{url_encode(PROTOCOL_VERSION)}", - "password=#{url_encode(PASSWORD)}", - "renderingRequest=#{url_encode(request)}" - ].join("&") + target_url = render_url(bundle_timestamp, name) + body = render_body(request) # Create temp files for Vegeta targets_file = "#{OUTDIR}/#{name}_vegeta_targets.txt" @@ -177,7 +234,6 @@ def run_vegeta_benchmark(test_case, bundle_timestamp) # rubocop:enable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/MethodLength, Metrics/PerceivedComplexity # Main execution -bundle_timestamp = BUNDLE_TIMESTAMP || find_bundle_timestamp # Validate parameters validate_rate(RATE) @@ -191,19 +247,7 @@ def run_vegeta_benchmark(test_case, bundle_timestamp) end # Check required tools -check_required_tools(%w[vegeta column tee]) - -# Print parameters -print_params( - "BASE_URL" => BASE_URL, - "BUNDLE_TIMESTAMP" => bundle_timestamp, - "RATE" => RATE, - "DURATION" => DURATION, - "REQUEST_TIMEOUT" => REQUEST_TIMEOUT, - "CONNECTIONS" => CONNECTIONS, - "MAX_CONNECTIONS" => MAX_CONNECTIONS, - "TEST_CASES" => TEST_CASES.map { |tc| tc[:name] }.join(", ") -) +check_required_tools(%w[vegeta curl column tee]) # Wait for node renderer to be ready # Note: Node renderer only speaks HTTP/2, but we can still check with a simple GET @@ -225,22 +269,69 @@ def run_vegeta_benchmark(test_case, bundle_timestamp) sleep 1 end +# Find and categorize bundles +puts "\nDiscovering and categorizing bundles..." +all_bundles = find_all_production_bundles +puts "Found #{all_bundles.length} production bundle(s)" +rsc_bundle, non_rsc_bundle = categorize_bundles(all_bundles) + +rsc_tests = TEST_CASES.select { |tc| tc[:rsc] } +non_rsc_tests = TEST_CASES.reject { |tc| tc[:rsc] } + +if rsc_tests.any? && rsc_bundle.nil? + puts "Warning: RSC tests requested but no RSC bundle found, skipping: #{rsc_tests.map { |tc| tc[:name] }.join(', ')}" + rsc_tests = [] +end + +if non_rsc_tests.any? && non_rsc_bundle.nil? + skipped = non_rsc_tests.map { |tc| tc[:name] }.join(", ") + puts "Warning: Non-RSC tests requested but no non-RSC bundle found, skipping: #{skipped}" + non_rsc_tests = [] +end + +# Print parameters +print_params( + "BASE_URL" => BASE_URL, + "RSC_BUNDLE" => rsc_bundle || "none", + "NON_RSC_BUNDLE" => non_rsc_bundle || "none", + "RATE" => RATE, + "DURATION" => DURATION, + "REQUEST_TIMEOUT" => REQUEST_TIMEOUT, + "CONNECTIONS" => CONNECTIONS, + "MAX_CONNECTIONS" => MAX_CONNECTIONS, + "RSC_TESTS" => rsc_tests.map { |tc| tc[:name] }.join(", ").then { |s| s.empty? ? "none" : s }, + "NON_RSC_TESTS" => non_rsc_tests.map { |tc| tc[:name] }.join(", ").then { |s| s.empty? ? "none" : s } +) + # Create output directory FileUtils.mkdir_p(OUTDIR) # Initialize summary file File.write(SUMMARY_TXT, "") -add_to_summary("Test", "RPS", "p50(ms)", "p90(ms)", "p99(ms)", "max(ms)", "Status") +add_to_summary("Test", "Bundle", "RPS", "p50(ms)", "p90(ms)", "p99(ms)", "max(ms)", "Status") + +# Run non-RSC benchmarks +non_rsc_tests.each do |test_case| + print_separator + puts "Benchmarking (non-RSC): #{test_case[:name]}" + puts " Bundle: #{non_rsc_bundle}" + puts " Request: #{test_case[:request]}" + print_separator + + metrics = run_vegeta_benchmark(test_case, non_rsc_bundle) + add_to_summary(test_case[:name], "non-RSC", *metrics) +end -# Run benchmarks for each test case -TEST_CASES.each do |test_case| +# Run RSC benchmarks +rsc_tests.each do |test_case| print_separator - puts "Benchmarking: #{test_case[:name]}" + puts "Benchmarking (RSC): #{test_case[:name]}" + puts " Bundle: #{rsc_bundle}" puts " Request: #{test_case[:request]}" print_separator - metrics = run_vegeta_benchmark(test_case, bundle_timestamp) - add_to_summary(test_case[:name], *metrics) + metrics = run_vegeta_benchmark(test_case, rsc_bundle) + add_to_summary(test_case[:name], "RSC", *metrics) end # Display summary From f8b8fc0ee10ee58f4c70af2dc544bd1488e448ef Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Mon, 15 Dec 2025 11:28:56 +0000 Subject: [PATCH 60/65] Allow running only Pro Rails or Pro Node Renderer benchmarks --- .github/workflows/benchmark.yml | 42 +++++++++++++++++++++------------ 1 file changed, 27 insertions(+), 15 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index c6ef90f061..55e8c5b4e1 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -46,6 +46,8 @@ on: - 'both' - 'core_only' - 'pro_only' + - 'pro_rails_only' + - 'pro_node_renderer_only' push: branches: - master @@ -62,9 +64,11 @@ env: BUNDLER_VERSION: "2.5.4" K6_VERSION: "1.4.2" VEGETA_VERSION: "12.13.0" - # Determine which apps to run (default is 'pro_only' for all triggers) - RUN_CORE: ${{ (github.event.inputs.app_version || 'both') != 'pro_only' && 'true' || '' }} + # Determine which apps/benchmarks to run (default is 'both' for all triggers) + RUN_CORE: ${{ contains(fromJSON('["both", "core_only"]'), github.event.inputs.app_version || 'both') && 'true' || '' }} RUN_PRO: ${{ (github.event.inputs.app_version || 'both') != 'core_only' && 'true' || '' }} + RUN_PRO_RAILS: ${{ contains(fromJSON('["both", "pro_only", "pro_rails_only"]'), github.event.inputs.app_version || 'both') && 'true' || '' }} + RUN_PRO_NODE_RENDERER: ${{ contains(fromJSON('["both", "pro_only", "pro_node_renderer_only"]'), github.event.inputs.app_version || 'both') && 'true' || '' }} # Benchmark parameters (defaults in bench.rb unless overridden here for CI) ROUTES: ${{ github.event.inputs.routes }} RATE: ${{ github.event.inputs.rate || 'max' }} @@ -408,7 +412,7 @@ jobs: # ============================================ - name: Execute Pro benchmark suite - if: env.RUN_PRO + if: env.RUN_PRO_RAILS timeout-minutes: 120 run: | set -e @@ -422,7 +426,7 @@ jobs: echo "✅ Benchmark suite completed successfully" - name: Execute Pro Node Renderer benchmark suite - if: env.RUN_PRO + if: env.RUN_PRO_NODE_RENDERER timeout-minutes: 30 run: | set -e @@ -441,21 +445,28 @@ jobs: set -e echo "🔍 Validating benchmark results..." - if [ ! -f "bench_results/summary.txt" ]; then - echo "❌ ERROR: benchmark summary file not found" - exit 1 + if [ "$RUN_PRO_RAILS" = "true" ]; then + if [ ! -f "bench_results/summary.txt" ]; then + echo "❌ ERROR: Rails benchmark summary file not found" + exit 1 + fi + echo "📊 Rails Benchmark Summary:" + column -t -s $'\t' bench_results/summary.txt + echo "" fi - echo "✅ Benchmark results found" - echo "" - echo "📊 Rails Benchmark Summary:" - column -t -s $'\t' bench_results/summary.txt - echo "" - if [ -f "bench_results/node_renderer_summary.txt" ]; then + if [ "$RUN_PRO_NODE_RENDERER" = "true" ]; then + if [ ! -f "bench_results/node_renderer_summary.txt" ]; then + echo "❌ ERROR: Node Renderer benchmark summary file not found" + exit 1 + fi echo "📊 Node Renderer Benchmark Summary:" column -t -s $'\t' bench_results/node_renderer_summary.txt echo "" fi + + echo "✅ Benchmark results validated" + echo "" echo "Generated files:" ls -lh bench_results/ @@ -488,8 +499,9 @@ jobs: echo "Run number: ${{ github.run_number }}" echo "Triggered by: ${{ github.actor }}" echo "Branch: ${{ github.ref_name }}" - echo "Run Core: ${{ env.RUN_CORE }}" - echo "Run Pro: ${{ env.RUN_PRO }}" + echo "Run Core: ${{ env.RUN_CORE || 'false' }}" + echo "Run Pro Rails: ${{ env.RUN_PRO_RAILS || 'false' }}" + echo "Run Pro Node Renderer: ${{ env.RUN_PRO_NODE_RENDERER || 'false' }}" echo "" if [ "${{ job.status }}" == "success" ]; then echo "✅ All steps completed successfully" From f0f2692d3e1d44e4ef3c13b6e9d4aee4e3106c27 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Mon, 15 Dec 2025 12:22:36 +0000 Subject: [PATCH 61/65] Add github-action-benchmark integration for benchmark tracking MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add convert_to_benchmark_json.rb script to convert benchmark results to JSON format compatible with github-action-benchmark - Track RPS (customBiggerIsBetter) and latency/failure % (customSmallerIsBetter) - Exclude max latencies (not stable enough for regression detection) - Alert threshold set to 150% (50% regression) - Store benchmark data in docs/benchmarks on master branch - Enable job summary for all runs (PRs and pushes) 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude Opus 4.5 --- .github/workflows/benchmark.yml | 88 ++++++++++++- benchmarks/convert_to_benchmark_json.rb | 158 ++++++++++++++++++++++++ 2 files changed, 245 insertions(+), 1 deletion(-) create mode 100755 benchmarks/convert_to_benchmark_json.rb diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 55e8c5b4e1..00c338da6b 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -297,6 +297,45 @@ jobs: echo "Generated files:" ls -lh bench_results/ + - name: Convert Core benchmark results to JSON + if: env.RUN_CORE + run: | + ruby benchmarks/convert_to_benchmark_json.rb "Core: " + + - name: Store Core RPS benchmark results + if: env.RUN_CORE + uses: benchmark-action/github-action-benchmark@v1 + with: + name: Core Benchmark - RPS + tool: customBiggerIsBetter + output-file-path: bench_results/benchmark_rps.json + gh-pages-branch: benchmark-data + benchmark-data-dir-path: docs/benchmarks + alert-threshold: '150%' + github-token: ${{ secrets.GITHUB_TOKEN }} + comment-on-alert: true + alert-comment-cc-users: '@alexeyr-ci2' + fail-on-alert: true + summary-always: true + auto-push: false + + - name: Store Core latency benchmark results + if: env.RUN_CORE + uses: benchmark-action/github-action-benchmark@v1 + with: + name: Core Benchmark - Latency + tool: customSmallerIsBetter + output-file-path: bench_results/benchmark_latency.json + gh-pages-branch: benchmark-data + benchmark-data-dir-path: docs/benchmarks + alert-threshold: '150%' + github-token: ${{ secrets.GITHUB_TOKEN }} + comment-on-alert: true + alert-comment-cc-users: '@alexeyr-ci2' + fail-on-alert: true + summary-always: true + auto-push: false + - name: Upload Core benchmark results uses: actions/upload-artifact@v4 if: env.RUN_CORE && always() @@ -470,6 +509,45 @@ jobs: echo "Generated files:" ls -lh bench_results/ + - name: Convert Pro benchmark results to JSON + if: env.RUN_PRO + run: | + ruby benchmarks/convert_to_benchmark_json.rb "Pro: " + + - name: Store Pro RPS benchmark results + if: env.RUN_PRO + uses: benchmark-action/github-action-benchmark@v1 + with: + name: Pro Benchmark - RPS + tool: customBiggerIsBetter + output-file-path: bench_results/benchmark_rps.json + gh-pages-branch: benchmark-data + benchmark-data-dir-path: docs/benchmarks + alert-threshold: '150%' + github-token: ${{ secrets.GITHUB_TOKEN }} + comment-on-alert: true + alert-comment-cc-users: '@alexeyr-ci2' + fail-on-alert: true + summary-always: true + auto-push: false + + - name: Store Pro latency benchmark results + if: env.RUN_PRO + uses: benchmark-action/github-action-benchmark@v1 + with: + name: Pro Benchmark - Latency + tool: customSmallerIsBetter + output-file-path: bench_results/benchmark_latency.json + gh-pages-branch: benchmark-data + benchmark-data-dir-path: docs/benchmarks + alert-threshold: '150%' + github-token: ${{ secrets.GITHUB_TOKEN }} + comment-on-alert: true + alert-comment-cc-users: '@alexeyr-ci2' + fail-on-alert: true + summary-always: true + auto-push: false + - name: Upload Pro benchmark results uses: actions/upload-artifact@v4 if: env.RUN_PRO && always() @@ -488,7 +566,15 @@ jobs: echo "✅ Server stopped" # ============================================ - # STEP 7: WORKFLOW COMPLETION + # STEP 7: PUSH BENCHMARK DATA + # ============================================ + - name: Push benchmark data + if: github.event_name == 'push' && github.ref == 'refs/heads/master' + run: | + git push 'https://github-actions:${{ secrets.GITHUB_TOKEN }}@github.com/${{ github.repository }}.git' benchmark-data:benchmark-data + + # ============================================ + # STEP 8: WORKFLOW COMPLETION # ============================================ - name: Workflow summary if: always() diff --git a/benchmarks/convert_to_benchmark_json.rb b/benchmarks/convert_to_benchmark_json.rb new file mode 100755 index 0000000000..fd7252257f --- /dev/null +++ b/benchmarks/convert_to_benchmark_json.rb @@ -0,0 +1,158 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +# Converts benchmark summary files to JSON format for github-action-benchmark +# Outputs two files: +# - benchmark_rps.json (customBiggerIsBetter) +# - benchmark_latency.json (customSmallerIsBetter) +# +# Usage: ruby convert_to_benchmark_json.rb [prefix] +# prefix: Optional prefix for benchmark names (e.g., "Core: " or "Pro: ") + +require "json" + +BENCH_RESULTS_DIR = "bench_results" +PREFIX = ARGV[0] || "" + +# rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity + +# Parse a summary file and return array of hashes with metrics +# Expected format (tab-separated): +# Route RPS p50(ms) p90(ms) p99(ms) max(ms) Status +# or for node renderer: +# Test Bundle RPS p50(ms) p90(ms) p99(ms) max(ms) Status +def parse_summary_file(file_path, prefix: "") + return [] unless File.exist?(file_path) + + lines = File.readlines(file_path).map(&:strip).reject(&:empty?) + return [] if lines.length < 2 + + header = lines.first.split("\t") + results = [] + + lines[1..].each do |line| + cols = line.split("\t") + row = header.zip(cols).to_h + + # Determine the name based on available columns + name = row["Route"] || row["Test"] || "unknown" + bundle_suffix = row["Bundle"] ? " (#{row['Bundle']})" : "" + full_name = "#{prefix}#{name}#{bundle_suffix}" + + # Skip if we got FAILED values + next if row["RPS"] == "FAILED" + + # Parse numeric values + rps = row["RPS"]&.to_f + p50 = row["p50(ms)"]&.to_f + p90 = row["p90(ms)"]&.to_f + p99 = row["p99(ms)"]&.to_f + + # Calculate failed percentage from Status column + failed_pct = calculate_failed_percentage(row["Status"]) + + results << { + name: full_name, + rps: rps, + p50: p50, + p90: p90, + p99: p99, + failed_pct: failed_pct + } + end + + results +end + +# rubocop:enable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity + +# Calculate failed request percentage from status string +# Status format: "200=7508,302=100,5xx=10" etc. +def calculate_failed_percentage(status_str) + return 0.0 if status_str.nil? || status_str == "missing" + + total = 0 + failed = 0 + + status_str.split(",").each do |part| + code, count = part.split("=") + count = count.to_i + total += count + + # Consider 0 (for Vegeta), 4xx and 5xx as failures, also "other" + failed += count if code.match?(/^[045]/) || code == "other" + end + + return 0.0 if total.zero? + + (failed.to_f / total * 100).round(2) +end + +# Convert results to customBiggerIsBetter format (for RPS) +def to_rps_json(results) + results.map do |r| + { + name: "#{r[:name]} - RPS", + unit: "requests/sec", + value: r[:rps] + } + end +end + +# Convert results to customSmallerIsBetter format (for latencies and failure rate) +def to_latency_json(results) + output = [] + + results.each do |r| + output << { + name: "#{r[:name]} - p50 latency", + unit: "ms", + value: r[:p50] + } + output << { + name: "#{r[:name]} - p90 latency", + unit: "ms", + value: r[:p90] + } + output << { + name: "#{r[:name]} - p99 latency", + unit: "ms", + value: r[:p99] + } + output << { + name: "#{r[:name]} - failed requests", + unit: "%", + value: r[:failed_pct] + } + end + + output +end + +# Main execution +all_results = [] + +# Parse Rails benchmark +rails_summary = File.join(BENCH_RESULTS_DIR, "summary.txt") +all_results.concat(parse_summary_file(rails_summary, prefix: PREFIX)) if File.exist?(rails_summary) + +# Parse Node Renderer benchmark +node_renderer_summary = File.join(BENCH_RESULTS_DIR, "node_renderer_summary.txt") +if File.exist?(node_renderer_summary) + all_results.concat(parse_summary_file(node_renderer_summary, prefix: "#{PREFIX}NodeRenderer: ")) +end + +if all_results.empty? + puts "No benchmark results found to convert" + exit 0 +end + +# Write RPS JSON (bigger is better) +rps_json = to_rps_json(all_results) +File.write(File.join(BENCH_RESULTS_DIR, "benchmark_rps.json"), JSON.pretty_generate(rps_json)) +puts "Wrote #{rps_json.length} RPS metrics to benchmark_rps.json" + +# Write latency/failure JSON (smaller is better) +latency_json = to_latency_json(all_results) +File.write(File.join(BENCH_RESULTS_DIR, "benchmark_latency.json"), JSON.pretty_generate(latency_json)) +puts "Wrote #{latency_json.length} latency/failure metrics to benchmark_latency.json" From 4bc64cdcc18776c3e51a220ce51a2763ae1fef6e Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Mon, 15 Dec 2025 13:00:58 +0000 Subject: [PATCH 62/65] Add changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 20ea6ea4a6..894d780db3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,10 @@ After a release, run `/update-changelog` in Claude Code to analyze commits, writ - **React Server Components Security Vulnerabilities (CVE-2025-55183, CVE-2025-55184, CVE-2025-67779)**: Upgraded React to v19.0.3 and react-on-rails-rsc to v19.0.4 to fix three critical security vulnerabilities in React Server Components. CVE-2025-55183 (CVSS 5.3) involved source code exposure when server function references were stringified, potentially leaking hardcoded secrets. CVE-2025-55184 and CVE-2025-67779 (both CVSS 7.5) involved denial of service attacks via cyclic promise references that could cause infinite loops and 100% CPU consumption. The fixes implement dual-layer cycle detection with a 1,000-iteration depth limit and override `toString()` methods on server references to return safe placeholders. Addresses [issue 2223](https://github.com/shakacode/react_on_rails/issues/2223). [PR 2233](https://github.com/shakacode/react_on_rails/pull/2233) by [AbanoubGhadban](https://github.com/AbanoubGhadban). +#### Developer (Contributors Only) + +- **Benchmarking in CI**: A benchmark workflow will now run on all pushes to master, as well as PRs with `benchmark` or `full-ci` labels. [PR 1868](https://github.com/shakacode/react_on_rails/pull/1868) by [alexeyr-ci2](https://github.com/alexeyr-ci2) + ### [16.2.0.beta.20] - 2025-12-12 #### Fixed From 5775fae3eda8e4beafa886f952e10960af76d0b7 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Mon, 15 Dec 2025 22:39:28 +0000 Subject: [PATCH 63/65] Update docs --- CONTRIBUTING.md | 69 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 418cb6bdf6..2d0209a5ec 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -590,6 +590,75 @@ Removes the `full-ci` label and returns to standard CI behavior: - **Force-pushes:** The `/run-skipped-ci` command adds the `full-ci` label to your PR. If you force-push after commenting, the initial workflow run will test the old commit, but subsequent pushes will automatically run full CI because the label persists. - **Branch operations:** Avoid deleting or force-pushing branches while workflows are running, as this may cause failures. +### Benchmarking + +React on Rails includes a performance benchmark workflow that measures RPS (requests per second) and latency for both Core and Pro versions. + +#### When Benchmarks Run + +- **Automatically on master**: Benchmarks run on every push to master +- **On PRs with labels**: Add the `benchmark` or `full-ci` label to your PR to run benchmarks +- **Manual trigger**: Use `gh workflow run` to run benchmarks with custom parameters (see [https://github.com/cli/cli#installation](https://github.com/cli/cli#installation) if you don't have `gh`): + + ```bash + # Run with default parameters + gh workflow run benchmark.yml + + # Run with custom parameters + gh workflow run benchmark.yml \ + -f rate=100 \ + -f duration=60s \ + -f connections=20 \ + -f app_version=core_only + ``` + +#### Regression Detection + +When benchmarks run, the [github-action-benchmark](https://github.com/benchmark-action/github-action-benchmark) action compares results against historical data. If performance regresses by more than 50%, the workflow will: + +1. **Fail the CI check** with `fail-on-alert: true` +2. **Post a comment on the PR** explaining the regression +3. **Tag reviewers** for attention + +This helps catch performance regressions before they reach production. + +#### Running Benchmarks Locally + +**Prerequisites:** Install [k6](https://k6.io/docs/get-started/installation/) and [Vegeta](https://github.com/tsenart/vegeta#install). + +You can also run the server in a separate terminal instead of backgrounding it. + +**Core benchmarks:** + +```bash +cd react_on_rails/spec/dummy +bin/prod-assets # Build production assets +bin/prod & # Start production server on port 3001 +SERVER_PID=$! +cd ../.. +ruby benchmarks/bench.rb +kill $SERVER_PID +``` + +**Pro benchmarks:** + +```bash +cd react_on_rails_pro/spec/dummy +bin/prod-assets +bin/prod & # Starts Rails server and node renderer +SERVER_PID=$! +cd ../.. +PRO=true ruby benchmarks/bench.rb # Rails benchmarks +ruby benchmarks/bench-node-renderer.rb # Node renderer benchmarks +kill $SERVER_PID +``` + +**Configuration:** Both scripts support environment variables for customization (rate, duration, connections, etc.). See the script headers in [`benchmarks/bench.rb`](benchmarks/bench.rb) and [`benchmarks/bench-node-renderer.rb`](benchmarks/bench-node-renderer.rb) for available options. For debugging, you may want lower `DURATION` and/or specific `ROUTES`: + +```bash +DURATION=5s ROUTES=/ ruby benchmarks/bench.rb +``` + ### Install Generator In your Rails app add this gem with a path to your fork. From 448ce1cfce66cd2696648176026fa7de415bc014 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Mon, 15 Dec 2025 23:17:50 +0000 Subject: [PATCH 64/65] Fix line endings --- .github/workflows/benchmark.yml | 1192 +++++++++++++++---------------- 1 file changed, 596 insertions(+), 596 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 00c338da6b..27a4617ff0 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -1,596 +1,596 @@ -name: Benchmark Workflow - -on: - workflow_dispatch: - inputs: - routes: - description: 'Comma-separated routes to benchmark (e.g., "/,/hello"). Leave empty to auto-detect from Rails.' - required: false - type: string - rate: - description: 'Requests per second (use "max" for maximum throughput)' - required: false - default: 'max' - type: string - duration: - description: 'Duration (e.g., "30s", "1m", "90s")' - required: false - default: '30s' - type: string - request_timeout: - description: 'Request timeout (e.g., "60s", "1m", "90s")' - required: false - default: '60s' - type: string - connections: - description: 'Concurrent connections/virtual users (also used as max)' - required: false - default: 10 - type: number - web_concurrency: - description: 'Number of Puma worker processes' - required: false - default: 4 - type: number - rails_threads: - description: 'Number of Puma threads (min and max will be same)' - required: false - default: 3 - type: number - app_version: - description: 'Which app version to benchmark' - required: false - default: 'both' - type: choice - options: - - 'both' - - 'core_only' - - 'pro_only' - - 'pro_rails_only' - - 'pro_node_renderer_only' - push: - branches: - - master - paths-ignore: - - '**.md' - - 'docs/**' - pull_request: - types: [opened, synchronize, reopened, labeled] - paths-ignore: - - '**.md' - - 'docs/**' -env: - RUBY_VERSION: "3.3.7" - BUNDLER_VERSION: "2.5.4" - K6_VERSION: "1.4.2" - VEGETA_VERSION: "12.13.0" - # Determine which apps/benchmarks to run (default is 'both' for all triggers) - RUN_CORE: ${{ contains(fromJSON('["both", "core_only"]'), github.event.inputs.app_version || 'both') && 'true' || '' }} - RUN_PRO: ${{ (github.event.inputs.app_version || 'both') != 'core_only' && 'true' || '' }} - RUN_PRO_RAILS: ${{ contains(fromJSON('["both", "pro_only", "pro_rails_only"]'), github.event.inputs.app_version || 'both') && 'true' || '' }} - RUN_PRO_NODE_RENDERER: ${{ contains(fromJSON('["both", "pro_only", "pro_node_renderer_only"]'), github.event.inputs.app_version || 'both') && 'true' || '' }} - # Benchmark parameters (defaults in bench.rb unless overridden here for CI) - ROUTES: ${{ github.event.inputs.routes }} - RATE: ${{ github.event.inputs.rate || 'max' }} - DURATION: ${{ github.event.inputs.duration }} - REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout }} - CONNECTIONS: ${{ github.event.inputs.connections }} - MAX_CONNECTIONS: ${{ github.event.inputs.connections }} - WEB_CONCURRENCY: ${{ github.event.inputs.web_concurrency || 4 }} - RAILS_MAX_THREADS: ${{ github.event.inputs.rails_threads || 3 }} - RAILS_MIN_THREADS: ${{ github.event.inputs.rails_threads || 3 }} - -jobs: - benchmark: - # Run on: push to master, workflow_dispatch, or PRs with 'full-ci' or 'benchmark' labels - if: | - github.event_name == 'push' || - github.event_name == 'workflow_dispatch' || - contains(github.event.pull_request.labels.*.name, 'full-ci') || - contains(github.event.pull_request.labels.*.name, 'benchmark') - runs-on: ubuntu-latest - env: - SECRET_KEY_BASE: 'dummy-secret-key-for-ci-testing-not-used-in-production' - REACT_ON_RAILS_PRO_LICENSE: ${{ secrets.REACT_ON_RAILS_PRO_LICENSE_V2 }} - - steps: - # ============================================ - # STEP 1: CHECKOUT CODE - # ============================================ - - name: Checkout repository - uses: actions/checkout@v4 - - # ============================================ - # STEP 2: INSTALL BENCHMARKING TOOLS - # ============================================ - - - name: Add tools directory to PATH - run: | - mkdir -p ~/bin - echo "$HOME/bin" >> $GITHUB_PATH - - - name: Cache Vegeta binary - id: cache-vegeta - if: env.RUN_PRO - uses: actions/cache@v4 - with: - path: ~/bin/vegeta - key: vegeta-${{ runner.os }}-${{ runner.arch }}-${{ env.VEGETA_VERSION }} - - - name: Install Vegeta - if: env.RUN_PRO && steps.cache-vegeta.outputs.cache-hit != 'true' - run: | - echo "📦 Installing Vegeta v${VEGETA_VERSION}" - - # Download and extract vegeta binary - wget -q https://github.com/tsenart/vegeta/releases/download/v${VEGETA_VERSION}/vegeta_${VEGETA_VERSION}_linux_amd64.tar.gz - tar -xzf vegeta_${VEGETA_VERSION}_linux_amd64.tar.gz - - # Store in cache directory - mv vegeta ~/bin/ - - - name: Setup k6 - uses: grafana/setup-k6-action@v1 - with: - k6-version: ${{ env.K6_VERSION }} - - # ============================================ - # STEP 3: START APPLICATION SERVER - # ============================================ - - - name: Setup Ruby - uses: ruby/setup-ruby@v1 - with: - ruby-version: ${{ env.RUBY_VERSION }} - bundler: ${{ env.BUNDLER_VERSION }} - - - name: Get gem home directory - run: echo "GEM_HOME_PATH=$(gem env home)" >> $GITHUB_ENV - - - name: Cache foreman gem - id: cache-foreman - uses: actions/cache@v4 - with: - path: ${{ env.GEM_HOME_PATH }} - key: foreman-gem-${{ runner.os }}-ruby-${{ env.RUBY_VERSION }} - - - name: Install foreman - if: steps.cache-foreman.outputs.cache-hit != 'true' - run: gem install foreman - - - name: Fix dependency for libyaml-dev - run: sudo apt install libyaml-dev -y - - # Follow https://github.com/pnpm/action-setup?tab=readme-ov-file#use-cache-to-reduce-installation-time - - name: Setup pnpm - uses: pnpm/action-setup@v4 - with: - cache: true - cache_dependency_path: '**/pnpm-lock.yaml' - run_install: false - - - name: Setup Node - uses: actions/setup-node@v4 - with: - node-version: '22' - - - name: Print system information - run: | - echo "Linux release: "; cat /etc/issue - echo "Current user: "; whoami - echo "Current directory: "; pwd - echo "Ruby version: "; ruby -v - echo "Node version: "; node -v - echo "Pnpm version: "; pnpm --version - echo "Bundler version: "; bundle --version - - - name: Install Node modules with Pnpm for all packages - run: | - pnpm install --recursive --frozen-lockfile - pnpm add --global yalc - - - name: yalc publish for react-on-rails - run: cd packages/react-on-rails && yalc publish - - - name: Cache core dummy app node modules - if: env.RUN_CORE - uses: actions/cache@v4 - with: - path: react_on_rails/spec/dummy/node_modules - key: v4-core-dummy-app-node-modules-cache-${{ hashFiles('react_on_rails/spec/dummy/pnpm-lock.yaml') }} - - - name: Install Node modules for the dummy app - if: env.RUN_CORE - run: | - cd react_on_rails/spec/dummy - yalc add --link react-on-rails - pnpm install - - - name: Save Core dummy app ruby gems to cache - if: env.RUN_CORE - uses: actions/cache@v4 - with: - path: react_on_rails/spec/dummy/vendor/bundle - key: v4-core-dummy-app-gem-cache-${{ hashFiles('react_on_rails/spec/dummy/Gemfile.lock') }} - - - name: Install Ruby Gems for Core dummy app - if: env.RUN_CORE - run: | - cd react_on_rails/spec/dummy - bundle config set path vendor/bundle - bundle config set frozen true - bundle _${BUNDLER_VERSION}_ install --jobs=4 --retry=3 - - - name: Prepare Core production assets - if: env.RUN_CORE - run: | - set -e # Exit on any error - echo "🔨 Building production assets..." - cd react_on_rails/spec/dummy - - if ! bin/prod-assets; then - echo "❌ ERROR: Failed to build production assets" - exit 1 - fi - - echo "✅ Production assets built successfully" - - - name: Start Core production server - if: env.RUN_CORE - run: | - set -e # Exit on any error - echo "🚀 Starting production server..." - cd react_on_rails/spec/dummy - - # Start server in background (Core uses rails directly, not foreman) - bin/prod & - echo "Server started in background" - - # Wait for server to be ready (max 30 seconds) - echo "⏳ Waiting for server to be ready..." - for i in {1..30}; do - if curl -fsS http://localhost:3001 > /dev/null; then - echo "✅ Server is ready and responding" - exit 0 - fi - echo " Attempt $i/30: Server not ready yet..." - sleep 1 - done - - echo "❌ ERROR: Server failed to start within 30 seconds" - exit 1 - - # ============================================ - # STEP 4: RUN CORE BENCHMARKS - # ============================================ - - - name: Execute Core benchmark suite - if: env.RUN_CORE - timeout-minutes: 120 - run: | - set -e # Exit on any error - echo "🏃 Running Core benchmark suite..." - - if ! ruby benchmarks/bench.rb; then - echo "❌ ERROR: Benchmark execution failed" - exit 1 - fi - - echo "✅ Benchmark suite completed successfully" - - - name: Validate Core benchmark results - if: env.RUN_CORE - run: | - set -e - echo "🔍 Validating benchmark results..." - - if [ ! -f "bench_results/summary.txt" ]; then - echo "❌ ERROR: benchmark summary file not found" - exit 1 - fi - - echo "✅ Benchmark results found" - echo "" - echo "📊 Summary:" - column -t -s $'\t' bench_results/summary.txt - echo "" - echo "Generated files:" - ls -lh bench_results/ - - - name: Convert Core benchmark results to JSON - if: env.RUN_CORE - run: | - ruby benchmarks/convert_to_benchmark_json.rb "Core: " - - - name: Store Core RPS benchmark results - if: env.RUN_CORE - uses: benchmark-action/github-action-benchmark@v1 - with: - name: Core Benchmark - RPS - tool: customBiggerIsBetter - output-file-path: bench_results/benchmark_rps.json - gh-pages-branch: benchmark-data - benchmark-data-dir-path: docs/benchmarks - alert-threshold: '150%' - github-token: ${{ secrets.GITHUB_TOKEN }} - comment-on-alert: true - alert-comment-cc-users: '@alexeyr-ci2' - fail-on-alert: true - summary-always: true - auto-push: false - - - name: Store Core latency benchmark results - if: env.RUN_CORE - uses: benchmark-action/github-action-benchmark@v1 - with: - name: Core Benchmark - Latency - tool: customSmallerIsBetter - output-file-path: bench_results/benchmark_latency.json - gh-pages-branch: benchmark-data - benchmark-data-dir-path: docs/benchmarks - alert-threshold: '150%' - github-token: ${{ secrets.GITHUB_TOKEN }} - comment-on-alert: true - alert-comment-cc-users: '@alexeyr-ci2' - fail-on-alert: true - summary-always: true - auto-push: false - - - name: Upload Core benchmark results - uses: actions/upload-artifact@v4 - if: env.RUN_CORE && always() - with: - name: benchmark-core-results-${{ github.run_number }} - path: bench_results/ - retention-days: 30 - if-no-files-found: warn - - - name: Stop Core production server - if: env.RUN_CORE && always() - run: | - echo "🛑 Stopping Core production server..." - # Kill all server-related processes (safe in isolated CI environment) - pkill -9 -f "ruby|node|foreman|overmind|puma" || true - - # Wait for port 3001 to be free - echo "⏳ Waiting for port 3001 to be free..." - for _ in {1..10}; do - if ! lsof -ti:3001 > /dev/null 2>&1; then - echo "✅ Port 3001 is now free" - exit 0 - fi - sleep 1 - done - - echo "❌ ERROR: Port 3001 is still in use after 10 seconds" - echo "Processes using port 3001:" - lsof -i:3001 || true - exit 1 - - # ============================================ - # STEP 5: SETUP PRO APPLICATION SERVER - # ============================================ - - name: Cache Pro dummy app node modules - if: env.RUN_PRO - uses: actions/cache@v4 - with: - path: react_on_rails_pro/spec/dummy/node_modules - key: v4-pro-dummy-app-node-modules-cache-${{ hashFiles('react_on_rails_pro/spec/dummy/pnpm-lock.yaml') }} - - - name: yalc publish for react-on-rails-pro - if: env.RUN_PRO - run: cd packages/react-on-rails-pro && yalc publish - - - name: Install Node modules with Pnpm for Pro dummy app - if: env.RUN_PRO - run: | - cd react_on_rails_pro/spec/dummy - yalc add --link react-on-rails-pro - pnpm install - - - name: Cache Pro dummy app Ruby gems - if: env.RUN_PRO - uses: actions/cache@v4 - with: - path: react_on_rails_pro/spec/dummy/vendor/bundle - key: v4-pro-dummy-app-gem-cache-${{ hashFiles('react_on_rails_pro/spec/dummy/Gemfile.lock') }} - - - name: Install Ruby Gems for Pro dummy app - if: env.RUN_PRO - run: | - cd react_on_rails_pro/spec/dummy - bundle config set path vendor/bundle - bundle config set frozen true - bundle _${BUNDLER_VERSION}_ install --jobs=4 --retry=3 - - - name: Generate file-system based entrypoints for Pro - if: env.RUN_PRO - run: cd react_on_rails_pro/spec/dummy && bundle exec rake react_on_rails:generate_packs - - - name: Prepare Pro production assets - if: env.RUN_PRO - run: | - set -e - echo "🔨 Building Pro production assets..." - cd react_on_rails_pro/spec/dummy - - if ! bin/prod-assets; then - echo "❌ ERROR: Failed to build production assets" - exit 1 - fi - - echo "✅ Production assets built successfully" - - - name: Start Pro production server - if: env.RUN_PRO - run: | - set -e - echo "🚀 Starting Pro production server..." - cd react_on_rails_pro/spec/dummy - - # Start server in background - bin/prod & - echo "Server started in background" - - # Wait for server to be ready (max 30 seconds) - echo "⏳ Waiting for server to be ready..." - for i in {1..30}; do - if curl -fsS http://localhost:3001 > /dev/null; then - echo "✅ Server is ready and responding" - exit 0 - fi - echo " Attempt $i/30: Server not ready yet..." - sleep 1 - done - - echo "❌ ERROR: Server failed to start within 30 seconds" - exit 1 - - # ============================================ - # STEP 6: RUN PRO BENCHMARKS - # ============================================ - - - name: Execute Pro benchmark suite - if: env.RUN_PRO_RAILS - timeout-minutes: 120 - run: | - set -e - echo "🏃 Running Pro benchmark suite..." - - if ! PRO=true ruby benchmarks/bench.rb; then - echo "❌ ERROR: Benchmark execution failed" - exit 1 - fi - - echo "✅ Benchmark suite completed successfully" - - - name: Execute Pro Node Renderer benchmark suite - if: env.RUN_PRO_NODE_RENDERER - timeout-minutes: 30 - run: | - set -e - echo "🏃 Running Pro Node Renderer benchmark suite..." - - if ! ruby benchmarks/bench-node-renderer.rb; then - echo "❌ ERROR: Node Renderer benchmark execution failed" - exit 1 - fi - - echo "✅ Node Renderer benchmark suite completed successfully" - - - name: Validate Pro benchmark results - if: env.RUN_PRO - run: | - set -e - echo "🔍 Validating benchmark results..." - - if [ "$RUN_PRO_RAILS" = "true" ]; then - if [ ! -f "bench_results/summary.txt" ]; then - echo "❌ ERROR: Rails benchmark summary file not found" - exit 1 - fi - echo "📊 Rails Benchmark Summary:" - column -t -s $'\t' bench_results/summary.txt - echo "" - fi - - if [ "$RUN_PRO_NODE_RENDERER" = "true" ]; then - if [ ! -f "bench_results/node_renderer_summary.txt" ]; then - echo "❌ ERROR: Node Renderer benchmark summary file not found" - exit 1 - fi - echo "📊 Node Renderer Benchmark Summary:" - column -t -s $'\t' bench_results/node_renderer_summary.txt - echo "" - fi - - echo "✅ Benchmark results validated" - echo "" - echo "Generated files:" - ls -lh bench_results/ - - - name: Convert Pro benchmark results to JSON - if: env.RUN_PRO - run: | - ruby benchmarks/convert_to_benchmark_json.rb "Pro: " - - - name: Store Pro RPS benchmark results - if: env.RUN_PRO - uses: benchmark-action/github-action-benchmark@v1 - with: - name: Pro Benchmark - RPS - tool: customBiggerIsBetter - output-file-path: bench_results/benchmark_rps.json - gh-pages-branch: benchmark-data - benchmark-data-dir-path: docs/benchmarks - alert-threshold: '150%' - github-token: ${{ secrets.GITHUB_TOKEN }} - comment-on-alert: true - alert-comment-cc-users: '@alexeyr-ci2' - fail-on-alert: true - summary-always: true - auto-push: false - - - name: Store Pro latency benchmark results - if: env.RUN_PRO - uses: benchmark-action/github-action-benchmark@v1 - with: - name: Pro Benchmark - Latency - tool: customSmallerIsBetter - output-file-path: bench_results/benchmark_latency.json - gh-pages-branch: benchmark-data - benchmark-data-dir-path: docs/benchmarks - alert-threshold: '150%' - github-token: ${{ secrets.GITHUB_TOKEN }} - comment-on-alert: true - alert-comment-cc-users: '@alexeyr-ci2' - fail-on-alert: true - summary-always: true - auto-push: false - - - name: Upload Pro benchmark results - uses: actions/upload-artifact@v4 - if: env.RUN_PRO && always() - with: - name: benchmark-pro-results-${{ github.run_number }} - path: bench_results/ - retention-days: 30 - if-no-files-found: warn - - - name: Stop Pro production server - if: env.RUN_PRO && always() - run: | - echo "🛑 Stopping Pro production server..." - # Kill all server-related processes (safe in isolated CI environment) - pkill -9 -f "ruby|node|foreman|overmind|puma" || true - echo "✅ Server stopped" - - # ============================================ - # STEP 7: PUSH BENCHMARK DATA - # ============================================ - - name: Push benchmark data - if: github.event_name == 'push' && github.ref == 'refs/heads/master' - run: | - git push 'https://github-actions:${{ secrets.GITHUB_TOKEN }}@github.com/${{ github.repository }}.git' benchmark-data:benchmark-data - - # ============================================ - # STEP 8: WORKFLOW COMPLETION - # ============================================ - - name: Workflow summary - if: always() - run: | - echo "📋 Benchmark Workflow Summary" - echo "====================================" - echo "Status: ${{ job.status }}" - echo "Run number: ${{ github.run_number }}" - echo "Triggered by: ${{ github.actor }}" - echo "Branch: ${{ github.ref_name }}" - echo "Run Core: ${{ env.RUN_CORE || 'false' }}" - echo "Run Pro Rails: ${{ env.RUN_PRO_RAILS || 'false' }}" - echo "Run Pro Node Renderer: ${{ env.RUN_PRO_NODE_RENDERER || 'false' }}" - echo "" - if [ "${{ job.status }}" == "success" ]; then - echo "✅ All steps completed successfully" - else - echo "❌ Workflow encountered errors - check logs above" - fi +name: Benchmark Workflow + +on: + workflow_dispatch: + inputs: + routes: + description: 'Comma-separated routes to benchmark (e.g., "/,/hello"). Leave empty to auto-detect from Rails.' + required: false + type: string + rate: + description: 'Requests per second (use "max" for maximum throughput)' + required: false + default: 'max' + type: string + duration: + description: 'Duration (e.g., "30s", "1m", "90s")' + required: false + default: '30s' + type: string + request_timeout: + description: 'Request timeout (e.g., "60s", "1m", "90s")' + required: false + default: '60s' + type: string + connections: + description: 'Concurrent connections/virtual users (also used as max)' + required: false + default: 10 + type: number + web_concurrency: + description: 'Number of Puma worker processes' + required: false + default: 4 + type: number + rails_threads: + description: 'Number of Puma threads (min and max will be same)' + required: false + default: 3 + type: number + app_version: + description: 'Which app version to benchmark' + required: false + default: 'both' + type: choice + options: + - 'both' + - 'core_only' + - 'pro_only' + - 'pro_rails_only' + - 'pro_node_renderer_only' + push: + branches: + - master + paths-ignore: + - '**.md' + - 'docs/**' + pull_request: + types: [opened, synchronize, reopened, labeled] + paths-ignore: + - '**.md' + - 'docs/**' +env: + RUBY_VERSION: '3.3.7' + BUNDLER_VERSION: '2.5.4' + K6_VERSION: '1.4.2' + VEGETA_VERSION: '12.13.0' + # Determine which apps/benchmarks to run (default is 'both' for all triggers) + RUN_CORE: ${{ contains(fromJSON('["both", "core_only"]'), github.event.inputs.app_version || 'both') && 'true' || '' }} + RUN_PRO: ${{ (github.event.inputs.app_version || 'both') != 'core_only' && 'true' || '' }} + RUN_PRO_RAILS: ${{ contains(fromJSON('["both", "pro_only", "pro_rails_only"]'), github.event.inputs.app_version || 'both') && 'true' || '' }} + RUN_PRO_NODE_RENDERER: ${{ contains(fromJSON('["both", "pro_only", "pro_node_renderer_only"]'), github.event.inputs.app_version || 'both') && 'true' || '' }} + # Benchmark parameters (defaults in bench.rb unless overridden here for CI) + ROUTES: ${{ github.event.inputs.routes }} + RATE: ${{ github.event.inputs.rate || 'max' }} + DURATION: ${{ github.event.inputs.duration }} + REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout }} + CONNECTIONS: ${{ github.event.inputs.connections }} + MAX_CONNECTIONS: ${{ github.event.inputs.connections }} + WEB_CONCURRENCY: ${{ github.event.inputs.web_concurrency || 4 }} + RAILS_MAX_THREADS: ${{ github.event.inputs.rails_threads || 3 }} + RAILS_MIN_THREADS: ${{ github.event.inputs.rails_threads || 3 }} + +jobs: + benchmark: + # Run on: push to master, workflow_dispatch, or PRs with 'full-ci' or 'benchmark' labels + if: | + github.event_name == 'push' || + github.event_name == 'workflow_dispatch' || + contains(github.event.pull_request.labels.*.name, 'full-ci') || + contains(github.event.pull_request.labels.*.name, 'benchmark') + runs-on: ubuntu-latest + env: + SECRET_KEY_BASE: 'dummy-secret-key-for-ci-testing-not-used-in-production' + REACT_ON_RAILS_PRO_LICENSE: ${{ secrets.REACT_ON_RAILS_PRO_LICENSE_V2 }} + + steps: + # ============================================ + # STEP 1: CHECKOUT CODE + # ============================================ + - name: Checkout repository + uses: actions/checkout@v4 + + # ============================================ + # STEP 2: INSTALL BENCHMARKING TOOLS + # ============================================ + + - name: Add tools directory to PATH + run: | + mkdir -p ~/bin + echo "$HOME/bin" >> $GITHUB_PATH + + - name: Cache Vegeta binary + id: cache-vegeta + if: env.RUN_PRO + uses: actions/cache@v4 + with: + path: ~/bin/vegeta + key: vegeta-${{ runner.os }}-${{ runner.arch }}-${{ env.VEGETA_VERSION }} + + - name: Install Vegeta + if: env.RUN_PRO && steps.cache-vegeta.outputs.cache-hit != 'true' + run: | + echo "📦 Installing Vegeta v${VEGETA_VERSION}" + + # Download and extract vegeta binary + wget -q https://github.com/tsenart/vegeta/releases/download/v${VEGETA_VERSION}/vegeta_${VEGETA_VERSION}_linux_amd64.tar.gz + tar -xzf vegeta_${VEGETA_VERSION}_linux_amd64.tar.gz + + # Store in cache directory + mv vegeta ~/bin/ + + - name: Setup k6 + uses: grafana/setup-k6-action@v1 + with: + k6-version: ${{ env.K6_VERSION }} + + # ============================================ + # STEP 3: START APPLICATION SERVER + # ============================================ + + - name: Setup Ruby + uses: ruby/setup-ruby@v1 + with: + ruby-version: ${{ env.RUBY_VERSION }} + bundler: ${{ env.BUNDLER_VERSION }} + + - name: Get gem home directory + run: echo "GEM_HOME_PATH=$(gem env home)" >> $GITHUB_ENV + + - name: Cache foreman gem + id: cache-foreman + uses: actions/cache@v4 + with: + path: ${{ env.GEM_HOME_PATH }} + key: foreman-gem-${{ runner.os }}-ruby-${{ env.RUBY_VERSION }} + + - name: Install foreman + if: steps.cache-foreman.outputs.cache-hit != 'true' + run: gem install foreman + + - name: Fix dependency for libyaml-dev + run: sudo apt install libyaml-dev -y + + # Follow https://github.com/pnpm/action-setup?tab=readme-ov-file#use-cache-to-reduce-installation-time + - name: Setup pnpm + uses: pnpm/action-setup@v4 + with: + cache: true + cache_dependency_path: '**/pnpm-lock.yaml' + run_install: false + + - name: Setup Node + uses: actions/setup-node@v4 + with: + node-version: '22' + + - name: Print system information + run: | + echo "Linux release: "; cat /etc/issue + echo "Current user: "; whoami + echo "Current directory: "; pwd + echo "Ruby version: "; ruby -v + echo "Node version: "; node -v + echo "Pnpm version: "; pnpm --version + echo "Bundler version: "; bundle --version + + - name: Install Node modules with Pnpm for all packages + run: | + pnpm install --recursive --frozen-lockfile + pnpm add --global yalc + + - name: yalc publish for react-on-rails + run: cd packages/react-on-rails && yalc publish + + - name: Cache core dummy app node modules + if: env.RUN_CORE + uses: actions/cache@v4 + with: + path: react_on_rails/spec/dummy/node_modules + key: v4-core-dummy-app-node-modules-cache-${{ hashFiles('react_on_rails/spec/dummy/pnpm-lock.yaml') }} + + - name: Install Node modules for the dummy app + if: env.RUN_CORE + run: | + cd react_on_rails/spec/dummy + yalc add --link react-on-rails + pnpm install + + - name: Save Core dummy app ruby gems to cache + if: env.RUN_CORE + uses: actions/cache@v4 + with: + path: react_on_rails/spec/dummy/vendor/bundle + key: v4-core-dummy-app-gem-cache-${{ hashFiles('react_on_rails/spec/dummy/Gemfile.lock') }} + + - name: Install Ruby Gems for Core dummy app + if: env.RUN_CORE + run: | + cd react_on_rails/spec/dummy + bundle config set path vendor/bundle + bundle config set frozen true + bundle _${BUNDLER_VERSION}_ install --jobs=4 --retry=3 + + - name: Prepare Core production assets + if: env.RUN_CORE + run: | + set -e # Exit on any error + echo "🔨 Building production assets..." + cd react_on_rails/spec/dummy + + if ! bin/prod-assets; then + echo "❌ ERROR: Failed to build production assets" + exit 1 + fi + + echo "✅ Production assets built successfully" + + - name: Start Core production server + if: env.RUN_CORE + run: | + set -e # Exit on any error + echo "🚀 Starting production server..." + cd react_on_rails/spec/dummy + + # Start server in background (Core uses rails directly, not foreman) + bin/prod & + echo "Server started in background" + + # Wait for server to be ready (max 30 seconds) + echo "⏳ Waiting for server to be ready..." + for i in {1..30}; do + if curl -fsS http://localhost:3001 > /dev/null; then + echo "✅ Server is ready and responding" + exit 0 + fi + echo " Attempt $i/30: Server not ready yet..." + sleep 1 + done + + echo "❌ ERROR: Server failed to start within 30 seconds" + exit 1 + + # ============================================ + # STEP 4: RUN CORE BENCHMARKS + # ============================================ + + - name: Execute Core benchmark suite + if: env.RUN_CORE + timeout-minutes: 120 + run: | + set -e # Exit on any error + echo "🏃 Running Core benchmark suite..." + + if ! ruby benchmarks/bench.rb; then + echo "❌ ERROR: Benchmark execution failed" + exit 1 + fi + + echo "✅ Benchmark suite completed successfully" + + - name: Validate Core benchmark results + if: env.RUN_CORE + run: | + set -e + echo "🔍 Validating benchmark results..." + + if [ ! -f "bench_results/summary.txt" ]; then + echo "❌ ERROR: benchmark summary file not found" + exit 1 + fi + + echo "✅ Benchmark results found" + echo "" + echo "📊 Summary:" + column -t -s $'\t' bench_results/summary.txt + echo "" + echo "Generated files:" + ls -lh bench_results/ + + - name: Convert Core benchmark results to JSON + if: env.RUN_CORE + run: | + ruby benchmarks/convert_to_benchmark_json.rb "Core: " + + - name: Store Core RPS benchmark results + if: env.RUN_CORE + uses: benchmark-action/github-action-benchmark@v1 + with: + name: Core Benchmark - RPS + tool: customBiggerIsBetter + output-file-path: bench_results/benchmark_rps.json + gh-pages-branch: benchmark-data + benchmark-data-dir-path: docs/benchmarks + alert-threshold: '150%' + github-token: ${{ secrets.GITHUB_TOKEN }} + comment-on-alert: true + alert-comment-cc-users: '@alexeyr-ci2' + fail-on-alert: true + summary-always: true + auto-push: false + + - name: Store Core latency benchmark results + if: env.RUN_CORE + uses: benchmark-action/github-action-benchmark@v1 + with: + name: Core Benchmark - Latency + tool: customSmallerIsBetter + output-file-path: bench_results/benchmark_latency.json + gh-pages-branch: benchmark-data + benchmark-data-dir-path: docs/benchmarks + alert-threshold: '150%' + github-token: ${{ secrets.GITHUB_TOKEN }} + comment-on-alert: true + alert-comment-cc-users: '@alexeyr-ci2' + fail-on-alert: true + summary-always: true + auto-push: false + + - name: Upload Core benchmark results + uses: actions/upload-artifact@v4 + if: env.RUN_CORE && always() + with: + name: benchmark-core-results-${{ github.run_number }} + path: bench_results/ + retention-days: 30 + if-no-files-found: warn + + - name: Stop Core production server + if: env.RUN_CORE && always() + run: | + echo "🛑 Stopping Core production server..." + # Kill all server-related processes (safe in isolated CI environment) + pkill -9 -f "ruby|node|foreman|overmind|puma" || true + + # Wait for port 3001 to be free + echo "⏳ Waiting for port 3001 to be free..." + for _ in {1..10}; do + if ! lsof -ti:3001 > /dev/null 2>&1; then + echo "✅ Port 3001 is now free" + exit 0 + fi + sleep 1 + done + + echo "❌ ERROR: Port 3001 is still in use after 10 seconds" + echo "Processes using port 3001:" + lsof -i:3001 || true + exit 1 + + # ============================================ + # STEP 5: SETUP PRO APPLICATION SERVER + # ============================================ + - name: Cache Pro dummy app node modules + if: env.RUN_PRO + uses: actions/cache@v4 + with: + path: react_on_rails_pro/spec/dummy/node_modules + key: v4-pro-dummy-app-node-modules-cache-${{ hashFiles('react_on_rails_pro/spec/dummy/pnpm-lock.yaml') }} + + - name: yalc publish for react-on-rails-pro + if: env.RUN_PRO + run: cd packages/react-on-rails-pro && yalc publish + + - name: Install Node modules with Pnpm for Pro dummy app + if: env.RUN_PRO + run: | + cd react_on_rails_pro/spec/dummy + yalc add --link react-on-rails-pro + pnpm install + + - name: Cache Pro dummy app Ruby gems + if: env.RUN_PRO + uses: actions/cache@v4 + with: + path: react_on_rails_pro/spec/dummy/vendor/bundle + key: v4-pro-dummy-app-gem-cache-${{ hashFiles('react_on_rails_pro/spec/dummy/Gemfile.lock') }} + + - name: Install Ruby Gems for Pro dummy app + if: env.RUN_PRO + run: | + cd react_on_rails_pro/spec/dummy + bundle config set path vendor/bundle + bundle config set frozen true + bundle _${BUNDLER_VERSION}_ install --jobs=4 --retry=3 + + - name: Generate file-system based entrypoints for Pro + if: env.RUN_PRO + run: cd react_on_rails_pro/spec/dummy && bundle exec rake react_on_rails:generate_packs + + - name: Prepare Pro production assets + if: env.RUN_PRO + run: | + set -e + echo "🔨 Building Pro production assets..." + cd react_on_rails_pro/spec/dummy + + if ! bin/prod-assets; then + echo "❌ ERROR: Failed to build production assets" + exit 1 + fi + + echo "✅ Production assets built successfully" + + - name: Start Pro production server + if: env.RUN_PRO + run: | + set -e + echo "🚀 Starting Pro production server..." + cd react_on_rails_pro/spec/dummy + + # Start server in background + bin/prod & + echo "Server started in background" + + # Wait for server to be ready (max 30 seconds) + echo "⏳ Waiting for server to be ready..." + for i in {1..30}; do + if curl -fsS http://localhost:3001 > /dev/null; then + echo "✅ Server is ready and responding" + exit 0 + fi + echo " Attempt $i/30: Server not ready yet..." + sleep 1 + done + + echo "❌ ERROR: Server failed to start within 30 seconds" + exit 1 + + # ============================================ + # STEP 6: RUN PRO BENCHMARKS + # ============================================ + + - name: Execute Pro benchmark suite + if: env.RUN_PRO_RAILS + timeout-minutes: 120 + run: | + set -e + echo "🏃 Running Pro benchmark suite..." + + if ! PRO=true ruby benchmarks/bench.rb; then + echo "❌ ERROR: Benchmark execution failed" + exit 1 + fi + + echo "✅ Benchmark suite completed successfully" + + - name: Execute Pro Node Renderer benchmark suite + if: env.RUN_PRO_NODE_RENDERER + timeout-minutes: 30 + run: | + set -e + echo "🏃 Running Pro Node Renderer benchmark suite..." + + if ! ruby benchmarks/bench-node-renderer.rb; then + echo "❌ ERROR: Node Renderer benchmark execution failed" + exit 1 + fi + + echo "✅ Node Renderer benchmark suite completed successfully" + + - name: Validate Pro benchmark results + if: env.RUN_PRO + run: | + set -e + echo "🔍 Validating benchmark results..." + + if [ "$RUN_PRO_RAILS" = "true" ]; then + if [ ! -f "bench_results/summary.txt" ]; then + echo "❌ ERROR: Rails benchmark summary file not found" + exit 1 + fi + echo "📊 Rails Benchmark Summary:" + column -t -s $'\t' bench_results/summary.txt + echo "" + fi + + if [ "$RUN_PRO_NODE_RENDERER" = "true" ]; then + if [ ! -f "bench_results/node_renderer_summary.txt" ]; then + echo "❌ ERROR: Node Renderer benchmark summary file not found" + exit 1 + fi + echo "📊 Node Renderer Benchmark Summary:" + column -t -s $'\t' bench_results/node_renderer_summary.txt + echo "" + fi + + echo "✅ Benchmark results validated" + echo "" + echo "Generated files:" + ls -lh bench_results/ + + - name: Convert Pro benchmark results to JSON + if: env.RUN_PRO + run: | + ruby benchmarks/convert_to_benchmark_json.rb "Pro: " + + - name: Store Pro RPS benchmark results + if: env.RUN_PRO + uses: benchmark-action/github-action-benchmark@v1 + with: + name: Pro Benchmark - RPS + tool: customBiggerIsBetter + output-file-path: bench_results/benchmark_rps.json + gh-pages-branch: benchmark-data + benchmark-data-dir-path: docs/benchmarks + alert-threshold: '150%' + github-token: ${{ secrets.GITHUB_TOKEN }} + comment-on-alert: true + alert-comment-cc-users: '@alexeyr-ci2' + fail-on-alert: true + summary-always: true + auto-push: false + + - name: Store Pro latency benchmark results + if: env.RUN_PRO + uses: benchmark-action/github-action-benchmark@v1 + with: + name: Pro Benchmark - Latency + tool: customSmallerIsBetter + output-file-path: bench_results/benchmark_latency.json + gh-pages-branch: benchmark-data + benchmark-data-dir-path: docs/benchmarks + alert-threshold: '150%' + github-token: ${{ secrets.GITHUB_TOKEN }} + comment-on-alert: true + alert-comment-cc-users: '@alexeyr-ci2' + fail-on-alert: true + summary-always: true + auto-push: false + + - name: Upload Pro benchmark results + uses: actions/upload-artifact@v4 + if: env.RUN_PRO && always() + with: + name: benchmark-pro-results-${{ github.run_number }} + path: bench_results/ + retention-days: 30 + if-no-files-found: warn + + - name: Stop Pro production server + if: env.RUN_PRO && always() + run: | + echo "🛑 Stopping Pro production server..." + # Kill all server-related processes (safe in isolated CI environment) + pkill -9 -f "ruby|node|foreman|overmind|puma" || true + echo "✅ Server stopped" + + # ============================================ + # STEP 7: PUSH BENCHMARK DATA + # ============================================ + - name: Push benchmark data + if: github.event_name == 'push' && github.ref == 'refs/heads/master' + run: | + git push 'https://github-actions:${{ secrets.GITHUB_TOKEN }}@github.com/${{ github.repository }}.git' benchmark-data:benchmark-data + + # ============================================ + # STEP 8: WORKFLOW COMPLETION + # ============================================ + - name: Workflow summary + if: always() + run: | + echo "📋 Benchmark Workflow Summary" + echo "====================================" + echo "Status: ${{ job.status }}" + echo "Run number: ${{ github.run_number }}" + echo "Triggered by: ${{ github.actor }}" + echo "Branch: ${{ github.ref_name }}" + echo "Run Core: ${{ env.RUN_CORE || 'false' }}" + echo "Run Pro Rails: ${{ env.RUN_PRO_RAILS || 'false' }}" + echo "Run Pro Node Renderer: ${{ env.RUN_PRO_NODE_RENDERER || 'false' }}" + echo "" + if [ "${{ job.status }}" == "success" ]; then + echo "✅ All steps completed successfully" + else + echo "❌ Workflow encountered errors - check logs above" + fi From 3b6c790345509822fd3e6570179ded5f3a6c9401 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Tue, 16 Dec 2025 08:45:52 +0000 Subject: [PATCH 65/65] Fix benchmark workflow non-fast-forward error MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The benchmark action was being called multiple times (once for Core, once for Pro), causing Git conflicts when trying to create multiple commits to the benchmark-data branch. Changes: - Updated conversion script to support --append mode for merging results - Merge all metrics (RPS, latencies, failure rate) into single JSON file - Negate RPS values so all metrics use customSmallerIsBetter tool (higher RPS = lower negative value = better performance) - Consolidated to ONE benchmark storage step at the end (was 2 separate steps) - Enable auto-push conditionally (only on push to master) This fixes the "non-fast-forward" Git error by ensuring only one commit to benchmark-data branch per workflow run. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- .github/workflows/benchmark.yml | 92 +++++-------------------- benchmarks/convert_to_benchmark_json.rb | 65 ++++++++++------- 2 files changed, 58 insertions(+), 99 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 27a4617ff0..d6a62014a8 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -302,40 +302,6 @@ jobs: run: | ruby benchmarks/convert_to_benchmark_json.rb "Core: " - - name: Store Core RPS benchmark results - if: env.RUN_CORE - uses: benchmark-action/github-action-benchmark@v1 - with: - name: Core Benchmark - RPS - tool: customBiggerIsBetter - output-file-path: bench_results/benchmark_rps.json - gh-pages-branch: benchmark-data - benchmark-data-dir-path: docs/benchmarks - alert-threshold: '150%' - github-token: ${{ secrets.GITHUB_TOKEN }} - comment-on-alert: true - alert-comment-cc-users: '@alexeyr-ci2' - fail-on-alert: true - summary-always: true - auto-push: false - - - name: Store Core latency benchmark results - if: env.RUN_CORE - uses: benchmark-action/github-action-benchmark@v1 - with: - name: Core Benchmark - Latency - tool: customSmallerIsBetter - output-file-path: bench_results/benchmark_latency.json - gh-pages-branch: benchmark-data - benchmark-data-dir-path: docs/benchmarks - alert-threshold: '150%' - github-token: ${{ secrets.GITHUB_TOKEN }} - comment-on-alert: true - alert-comment-cc-users: '@alexeyr-ci2' - fail-on-alert: true - summary-always: true - auto-push: false - - name: Upload Core benchmark results uses: actions/upload-artifact@v4 if: env.RUN_CORE && always() @@ -512,41 +478,7 @@ jobs: - name: Convert Pro benchmark results to JSON if: env.RUN_PRO run: | - ruby benchmarks/convert_to_benchmark_json.rb "Pro: " - - - name: Store Pro RPS benchmark results - if: env.RUN_PRO - uses: benchmark-action/github-action-benchmark@v1 - with: - name: Pro Benchmark - RPS - tool: customBiggerIsBetter - output-file-path: bench_results/benchmark_rps.json - gh-pages-branch: benchmark-data - benchmark-data-dir-path: docs/benchmarks - alert-threshold: '150%' - github-token: ${{ secrets.GITHUB_TOKEN }} - comment-on-alert: true - alert-comment-cc-users: '@alexeyr-ci2' - fail-on-alert: true - summary-always: true - auto-push: false - - - name: Store Pro latency benchmark results - if: env.RUN_PRO - uses: benchmark-action/github-action-benchmark@v1 - with: - name: Pro Benchmark - Latency - tool: customSmallerIsBetter - output-file-path: bench_results/benchmark_latency.json - gh-pages-branch: benchmark-data - benchmark-data-dir-path: docs/benchmarks - alert-threshold: '150%' - github-token: ${{ secrets.GITHUB_TOKEN }} - comment-on-alert: true - alert-comment-cc-users: '@alexeyr-ci2' - fail-on-alert: true - summary-always: true - auto-push: false + ruby benchmarks/convert_to_benchmark_json.rb "Pro: " --append - name: Upload Pro benchmark results uses: actions/upload-artifact@v4 @@ -566,12 +498,24 @@ jobs: echo "✅ Server stopped" # ============================================ - # STEP 7: PUSH BENCHMARK DATA + # STEP 7: STORE BENCHMARK DATA # ============================================ - - name: Push benchmark data - if: github.event_name == 'push' && github.ref == 'refs/heads/master' - run: | - git push 'https://github-actions:${{ secrets.GITHUB_TOKEN }}@github.com/${{ github.repository }}.git' benchmark-data:benchmark-data + - name: Store all benchmark results + uses: benchmark-action/github-action-benchmark@v1 + with: + name: React on Rails Benchmarks + tool: customSmallerIsBetter + output-file-path: bench_results/benchmark.json + gh-pages-branch: benchmark-data + benchmark-data-dir-path: docs/benchmarks + alert-threshold: '150%' + github-token: ${{ secrets.GITHUB_TOKEN }} + comment-on-alert: true + alert-comment-cc-users: '@alexeyr-ci2' + fail-on-alert: true + summary-always: true + # New changes should only be actually recorded on pushes to master + auto-push: ${{ github.event_name == 'push' && github.ref == 'refs/heads/master' }} # ============================================ # STEP 8: WORKFLOW COMPLETION diff --git a/benchmarks/convert_to_benchmark_json.rb b/benchmarks/convert_to_benchmark_json.rb index fd7252257f..7e57e2aa82 100755 --- a/benchmarks/convert_to_benchmark_json.rb +++ b/benchmarks/convert_to_benchmark_json.rb @@ -2,17 +2,21 @@ # frozen_string_literal: true # Converts benchmark summary files to JSON format for github-action-benchmark -# Outputs two files: -# - benchmark_rps.json (customBiggerIsBetter) -# - benchmark_latency.json (customSmallerIsBetter) +# Outputs a single file with all metrics using customSmallerIsBetter: +# - benchmark.json (customSmallerIsBetter) +# - RPS values are negated (so higher RPS = lower negative value = better) +# - Latencies are kept as-is (lower is better) +# - Failed percentage is kept as-is (lower is better) # -# Usage: ruby convert_to_benchmark_json.rb [prefix] +# Usage: ruby convert_to_benchmark_json.rb [prefix] [--append] # prefix: Optional prefix for benchmark names (e.g., "Core: " or "Pro: ") +# --append: Append to existing benchmark.json instead of overwriting require "json" BENCH_RESULTS_DIR = "bench_results" PREFIX = ARGV[0] || "" +APPEND_MODE = ARGV.include?("--append") # rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity @@ -88,22 +92,21 @@ def calculate_failed_percentage(status_str) (failed.to_f / total * 100).round(2) end -# Convert results to customBiggerIsBetter format (for RPS) -def to_rps_json(results) - results.map do |r| - { - name: "#{r[:name]} - RPS", - unit: "requests/sec", - value: r[:rps] - } - end -end - -# Convert results to customSmallerIsBetter format (for latencies and failure rate) -def to_latency_json(results) +# Convert all results to customSmallerIsBetter format +# RPS is negated (higher RPS = lower negative value = better) +# Latencies and failure rates are kept as-is (lower is better) +def to_unified_json(results) output = [] results.each do |r| + # Add negated RPS (higher RPS becomes lower negative value, which is better) + output << { + name: "#{r[:name]} - RPS", + unit: "requests/sec (negated)", + value: -r[:rps] + } + + # Add latencies (lower is better) output << { name: "#{r[:name]} - p50 latency", unit: "ms", @@ -119,6 +122,8 @@ def to_latency_json(results) unit: "ms", value: r[:p99] } + + # Add failure percentage (lower is better) output << { name: "#{r[:name]} - failed requests", unit: "%", @@ -147,12 +152,22 @@ def to_latency_json(results) exit 0 end -# Write RPS JSON (bigger is better) -rps_json = to_rps_json(all_results) -File.write(File.join(BENCH_RESULTS_DIR, "benchmark_rps.json"), JSON.pretty_generate(rps_json)) -puts "Wrote #{rps_json.length} RPS metrics to benchmark_rps.json" +# Convert current results to JSON +new_metrics = to_unified_json(all_results) +output_path = File.join(BENCH_RESULTS_DIR, "benchmark.json") + +# In append mode, merge with existing metrics +if APPEND_MODE && File.exist?(output_path) + existing_metrics = JSON.parse(File.read(output_path)) + unified_json = existing_metrics + new_metrics + puts "Appended #{new_metrics.length} metrics to existing #{existing_metrics.length} metrics" +else + unified_json = new_metrics + puts "Created #{unified_json.length} new metrics" +end -# Write latency/failure JSON (smaller is better) -latency_json = to_latency_json(all_results) -File.write(File.join(BENCH_RESULTS_DIR, "benchmark_latency.json"), JSON.pretty_generate(latency_json)) -puts "Wrote #{latency_json.length} latency/failure metrics to benchmark_latency.json" +# Write unified JSON (all metrics using customSmallerIsBetter with negated RPS) +File.write(output_path, JSON.pretty_generate(unified_json)) +puts "Wrote #{unified_json.length} total metrics to benchmark.json (from #{all_results.length} benchmark results)" +puts " - RPS values are negated (higher RPS = lower negative value = better)" +puts " - Latencies and failure rates use original values (lower is better)"