diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml new file mode 100644 index 0000000000..d6a62014a8 --- /dev/null +++ b/.github/workflows/benchmark.yml @@ -0,0 +1,540 @@ +name: Benchmark Workflow + +on: + workflow_dispatch: + inputs: + routes: + description: 'Comma-separated routes to benchmark (e.g., "/,/hello"). Leave empty to auto-detect from Rails.' + required: false + type: string + rate: + description: 'Requests per second (use "max" for maximum throughput)' + required: false + default: 'max' + type: string + duration: + description: 'Duration (e.g., "30s", "1m", "90s")' + required: false + default: '30s' + type: string + request_timeout: + description: 'Request timeout (e.g., "60s", "1m", "90s")' + required: false + default: '60s' + type: string + connections: + description: 'Concurrent connections/virtual users (also used as max)' + required: false + default: 10 + type: number + web_concurrency: + description: 'Number of Puma worker processes' + required: false + default: 4 + type: number + rails_threads: + description: 'Number of Puma threads (min and max will be same)' + required: false + default: 3 + type: number + app_version: + description: 'Which app version to benchmark' + required: false + default: 'both' + type: choice + options: + - 'both' + - 'core_only' + - 'pro_only' + - 'pro_rails_only' + - 'pro_node_renderer_only' + push: + branches: + - master + paths-ignore: + - '**.md' + - 'docs/**' + pull_request: + types: [opened, synchronize, reopened, labeled] + paths-ignore: + - '**.md' + - 'docs/**' +env: + RUBY_VERSION: '3.3.7' + BUNDLER_VERSION: '2.5.4' + K6_VERSION: '1.4.2' + VEGETA_VERSION: '12.13.0' + # Determine which apps/benchmarks to run (default is 'both' for all triggers) + RUN_CORE: ${{ contains(fromJSON('["both", "core_only"]'), github.event.inputs.app_version || 'both') && 'true' || '' }} + RUN_PRO: ${{ (github.event.inputs.app_version || 'both') != 'core_only' && 'true' || '' }} + RUN_PRO_RAILS: ${{ contains(fromJSON('["both", "pro_only", "pro_rails_only"]'), github.event.inputs.app_version || 'both') && 'true' || '' }} + RUN_PRO_NODE_RENDERER: ${{ contains(fromJSON('["both", "pro_only", "pro_node_renderer_only"]'), github.event.inputs.app_version || 'both') && 'true' || '' }} + # Benchmark parameters (defaults in bench.rb unless overridden here for CI) + ROUTES: ${{ github.event.inputs.routes }} + RATE: ${{ github.event.inputs.rate || 'max' }} + DURATION: ${{ github.event.inputs.duration }} + REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout }} + CONNECTIONS: ${{ github.event.inputs.connections }} + MAX_CONNECTIONS: ${{ github.event.inputs.connections }} + WEB_CONCURRENCY: ${{ github.event.inputs.web_concurrency || 4 }} + RAILS_MAX_THREADS: ${{ github.event.inputs.rails_threads || 3 }} + RAILS_MIN_THREADS: ${{ github.event.inputs.rails_threads || 3 }} + +jobs: + benchmark: + # Run on: push to master, workflow_dispatch, or PRs with 'full-ci' or 'benchmark' labels + if: | + github.event_name == 'push' || + github.event_name == 'workflow_dispatch' || + contains(github.event.pull_request.labels.*.name, 'full-ci') || + contains(github.event.pull_request.labels.*.name, 'benchmark') + runs-on: ubuntu-latest + env: + SECRET_KEY_BASE: 'dummy-secret-key-for-ci-testing-not-used-in-production' + REACT_ON_RAILS_PRO_LICENSE: ${{ secrets.REACT_ON_RAILS_PRO_LICENSE_V2 }} + + steps: + # ============================================ + # STEP 1: CHECKOUT CODE + # ============================================ + - name: Checkout repository + uses: actions/checkout@v4 + + # ============================================ + # STEP 2: INSTALL BENCHMARKING TOOLS + # ============================================ + + - name: Add tools directory to PATH + run: | + mkdir -p ~/bin + echo "$HOME/bin" >> $GITHUB_PATH + + - name: Cache Vegeta binary + id: cache-vegeta + if: env.RUN_PRO + uses: actions/cache@v4 + with: + path: ~/bin/vegeta + key: vegeta-${{ runner.os }}-${{ runner.arch }}-${{ env.VEGETA_VERSION }} + + - name: Install Vegeta + if: env.RUN_PRO && steps.cache-vegeta.outputs.cache-hit != 'true' + run: | + echo "📦 Installing Vegeta v${VEGETA_VERSION}" + + # Download and extract vegeta binary + wget -q https://github.com/tsenart/vegeta/releases/download/v${VEGETA_VERSION}/vegeta_${VEGETA_VERSION}_linux_amd64.tar.gz + tar -xzf vegeta_${VEGETA_VERSION}_linux_amd64.tar.gz + + # Store in cache directory + mv vegeta ~/bin/ + + - name: Setup k6 + uses: grafana/setup-k6-action@v1 + with: + k6-version: ${{ env.K6_VERSION }} + + # ============================================ + # STEP 3: START APPLICATION SERVER + # ============================================ + + - name: Setup Ruby + uses: ruby/setup-ruby@v1 + with: + ruby-version: ${{ env.RUBY_VERSION }} + bundler: ${{ env.BUNDLER_VERSION }} + + - name: Get gem home directory + run: echo "GEM_HOME_PATH=$(gem env home)" >> $GITHUB_ENV + + - name: Cache foreman gem + id: cache-foreman + uses: actions/cache@v4 + with: + path: ${{ env.GEM_HOME_PATH }} + key: foreman-gem-${{ runner.os }}-ruby-${{ env.RUBY_VERSION }} + + - name: Install foreman + if: steps.cache-foreman.outputs.cache-hit != 'true' + run: gem install foreman + + - name: Fix dependency for libyaml-dev + run: sudo apt install libyaml-dev -y + + # Follow https://github.com/pnpm/action-setup?tab=readme-ov-file#use-cache-to-reduce-installation-time + - name: Setup pnpm + uses: pnpm/action-setup@v4 + with: + cache: true + cache_dependency_path: '**/pnpm-lock.yaml' + run_install: false + + - name: Setup Node + uses: actions/setup-node@v4 + with: + node-version: '22' + + - name: Print system information + run: | + echo "Linux release: "; cat /etc/issue + echo "Current user: "; whoami + echo "Current directory: "; pwd + echo "Ruby version: "; ruby -v + echo "Node version: "; node -v + echo "Pnpm version: "; pnpm --version + echo "Bundler version: "; bundle --version + + - name: Install Node modules with Pnpm for all packages + run: | + pnpm install --recursive --frozen-lockfile + pnpm add --global yalc + + - name: yalc publish for react-on-rails + run: cd packages/react-on-rails && yalc publish + + - name: Cache core dummy app node modules + if: env.RUN_CORE + uses: actions/cache@v4 + with: + path: react_on_rails/spec/dummy/node_modules + key: v4-core-dummy-app-node-modules-cache-${{ hashFiles('react_on_rails/spec/dummy/pnpm-lock.yaml') }} + + - name: Install Node modules for the dummy app + if: env.RUN_CORE + run: | + cd react_on_rails/spec/dummy + yalc add --link react-on-rails + pnpm install + + - name: Save Core dummy app ruby gems to cache + if: env.RUN_CORE + uses: actions/cache@v4 + with: + path: react_on_rails/spec/dummy/vendor/bundle + key: v4-core-dummy-app-gem-cache-${{ hashFiles('react_on_rails/spec/dummy/Gemfile.lock') }} + + - name: Install Ruby Gems for Core dummy app + if: env.RUN_CORE + run: | + cd react_on_rails/spec/dummy + bundle config set path vendor/bundle + bundle config set frozen true + bundle _${BUNDLER_VERSION}_ install --jobs=4 --retry=3 + + - name: Prepare Core production assets + if: env.RUN_CORE + run: | + set -e # Exit on any error + echo "🔨 Building production assets..." + cd react_on_rails/spec/dummy + + if ! bin/prod-assets; then + echo "❌ ERROR: Failed to build production assets" + exit 1 + fi + + echo "✅ Production assets built successfully" + + - name: Start Core production server + if: env.RUN_CORE + run: | + set -e # Exit on any error + echo "🚀 Starting production server..." + cd react_on_rails/spec/dummy + + # Start server in background (Core uses rails directly, not foreman) + bin/prod & + echo "Server started in background" + + # Wait for server to be ready (max 30 seconds) + echo "⏳ Waiting for server to be ready..." + for i in {1..30}; do + if curl -fsS http://localhost:3001 > /dev/null; then + echo "✅ Server is ready and responding" + exit 0 + fi + echo " Attempt $i/30: Server not ready yet..." + sleep 1 + done + + echo "❌ ERROR: Server failed to start within 30 seconds" + exit 1 + + # ============================================ + # STEP 4: RUN CORE BENCHMARKS + # ============================================ + + - name: Execute Core benchmark suite + if: env.RUN_CORE + timeout-minutes: 120 + run: | + set -e # Exit on any error + echo "🏃 Running Core benchmark suite..." + + if ! ruby benchmarks/bench.rb; then + echo "❌ ERROR: Benchmark execution failed" + exit 1 + fi + + echo "✅ Benchmark suite completed successfully" + + - name: Validate Core benchmark results + if: env.RUN_CORE + run: | + set -e + echo "🔍 Validating benchmark results..." + + if [ ! -f "bench_results/summary.txt" ]; then + echo "❌ ERROR: benchmark summary file not found" + exit 1 + fi + + echo "✅ Benchmark results found" + echo "" + echo "📊 Summary:" + column -t -s $'\t' bench_results/summary.txt + echo "" + echo "Generated files:" + ls -lh bench_results/ + + - name: Convert Core benchmark results to JSON + if: env.RUN_CORE + run: | + ruby benchmarks/convert_to_benchmark_json.rb "Core: " + + - name: Upload Core benchmark results + uses: actions/upload-artifact@v4 + if: env.RUN_CORE && always() + with: + name: benchmark-core-results-${{ github.run_number }} + path: bench_results/ + retention-days: 30 + if-no-files-found: warn + + - name: Stop Core production server + if: env.RUN_CORE && always() + run: | + echo "🛑 Stopping Core production server..." + # Kill all server-related processes (safe in isolated CI environment) + pkill -9 -f "ruby|node|foreman|overmind|puma" || true + + # Wait for port 3001 to be free + echo "⏳ Waiting for port 3001 to be free..." + for _ in {1..10}; do + if ! lsof -ti:3001 > /dev/null 2>&1; then + echo "✅ Port 3001 is now free" + exit 0 + fi + sleep 1 + done + + echo "❌ ERROR: Port 3001 is still in use after 10 seconds" + echo "Processes using port 3001:" + lsof -i:3001 || true + exit 1 + + # ============================================ + # STEP 5: SETUP PRO APPLICATION SERVER + # ============================================ + - name: Cache Pro dummy app node modules + if: env.RUN_PRO + uses: actions/cache@v4 + with: + path: react_on_rails_pro/spec/dummy/node_modules + key: v4-pro-dummy-app-node-modules-cache-${{ hashFiles('react_on_rails_pro/spec/dummy/pnpm-lock.yaml') }} + + - name: yalc publish for react-on-rails-pro + if: env.RUN_PRO + run: cd packages/react-on-rails-pro && yalc publish + + - name: Install Node modules with Pnpm for Pro dummy app + if: env.RUN_PRO + run: | + cd react_on_rails_pro/spec/dummy + yalc add --link react-on-rails-pro + pnpm install + + - name: Cache Pro dummy app Ruby gems + if: env.RUN_PRO + uses: actions/cache@v4 + with: + path: react_on_rails_pro/spec/dummy/vendor/bundle + key: v4-pro-dummy-app-gem-cache-${{ hashFiles('react_on_rails_pro/spec/dummy/Gemfile.lock') }} + + - name: Install Ruby Gems for Pro dummy app + if: env.RUN_PRO + run: | + cd react_on_rails_pro/spec/dummy + bundle config set path vendor/bundle + bundle config set frozen true + bundle _${BUNDLER_VERSION}_ install --jobs=4 --retry=3 + + - name: Generate file-system based entrypoints for Pro + if: env.RUN_PRO + run: cd react_on_rails_pro/spec/dummy && bundle exec rake react_on_rails:generate_packs + + - name: Prepare Pro production assets + if: env.RUN_PRO + run: | + set -e + echo "🔨 Building Pro production assets..." + cd react_on_rails_pro/spec/dummy + + if ! bin/prod-assets; then + echo "❌ ERROR: Failed to build production assets" + exit 1 + fi + + echo "✅ Production assets built successfully" + + - name: Start Pro production server + if: env.RUN_PRO + run: | + set -e + echo "🚀 Starting Pro production server..." + cd react_on_rails_pro/spec/dummy + + # Start server in background + bin/prod & + echo "Server started in background" + + # Wait for server to be ready (max 30 seconds) + echo "⏳ Waiting for server to be ready..." + for i in {1..30}; do + if curl -fsS http://localhost:3001 > /dev/null; then + echo "✅ Server is ready and responding" + exit 0 + fi + echo " Attempt $i/30: Server not ready yet..." + sleep 1 + done + + echo "❌ ERROR: Server failed to start within 30 seconds" + exit 1 + + # ============================================ + # STEP 6: RUN PRO BENCHMARKS + # ============================================ + + - name: Execute Pro benchmark suite + if: env.RUN_PRO_RAILS + timeout-minutes: 120 + run: | + set -e + echo "🏃 Running Pro benchmark suite..." + + if ! PRO=true ruby benchmarks/bench.rb; then + echo "❌ ERROR: Benchmark execution failed" + exit 1 + fi + + echo "✅ Benchmark suite completed successfully" + + - name: Execute Pro Node Renderer benchmark suite + if: env.RUN_PRO_NODE_RENDERER + timeout-minutes: 30 + run: | + set -e + echo "🏃 Running Pro Node Renderer benchmark suite..." + + if ! ruby benchmarks/bench-node-renderer.rb; then + echo "❌ ERROR: Node Renderer benchmark execution failed" + exit 1 + fi + + echo "✅ Node Renderer benchmark suite completed successfully" + + - name: Validate Pro benchmark results + if: env.RUN_PRO + run: | + set -e + echo "🔍 Validating benchmark results..." + + if [ "$RUN_PRO_RAILS" = "true" ]; then + if [ ! -f "bench_results/summary.txt" ]; then + echo "❌ ERROR: Rails benchmark summary file not found" + exit 1 + fi + echo "📊 Rails Benchmark Summary:" + column -t -s $'\t' bench_results/summary.txt + echo "" + fi + + if [ "$RUN_PRO_NODE_RENDERER" = "true" ]; then + if [ ! -f "bench_results/node_renderer_summary.txt" ]; then + echo "❌ ERROR: Node Renderer benchmark summary file not found" + exit 1 + fi + echo "📊 Node Renderer Benchmark Summary:" + column -t -s $'\t' bench_results/node_renderer_summary.txt + echo "" + fi + + echo "✅ Benchmark results validated" + echo "" + echo "Generated files:" + ls -lh bench_results/ + + - name: Convert Pro benchmark results to JSON + if: env.RUN_PRO + run: | + ruby benchmarks/convert_to_benchmark_json.rb "Pro: " --append + + - name: Upload Pro benchmark results + uses: actions/upload-artifact@v4 + if: env.RUN_PRO && always() + with: + name: benchmark-pro-results-${{ github.run_number }} + path: bench_results/ + retention-days: 30 + if-no-files-found: warn + + - name: Stop Pro production server + if: env.RUN_PRO && always() + run: | + echo "🛑 Stopping Pro production server..." + # Kill all server-related processes (safe in isolated CI environment) + pkill -9 -f "ruby|node|foreman|overmind|puma" || true + echo "✅ Server stopped" + + # ============================================ + # STEP 7: STORE BENCHMARK DATA + # ============================================ + - name: Store all benchmark results + uses: benchmark-action/github-action-benchmark@v1 + with: + name: React on Rails Benchmarks + tool: customSmallerIsBetter + output-file-path: bench_results/benchmark.json + gh-pages-branch: benchmark-data + benchmark-data-dir-path: docs/benchmarks + alert-threshold: '150%' + github-token: ${{ secrets.GITHUB_TOKEN }} + comment-on-alert: true + alert-comment-cc-users: '@alexeyr-ci2' + fail-on-alert: true + summary-always: true + # New changes should only be actually recorded on pushes to master + auto-push: ${{ github.event_name == 'push' && github.ref == 'refs/heads/master' }} + + # ============================================ + # STEP 8: WORKFLOW COMPLETION + # ============================================ + - name: Workflow summary + if: always() + run: | + echo "📋 Benchmark Workflow Summary" + echo "====================================" + echo "Status: ${{ job.status }}" + echo "Run number: ${{ github.run_number }}" + echo "Triggered by: ${{ github.actor }}" + echo "Branch: ${{ github.ref_name }}" + echo "Run Core: ${{ env.RUN_CORE || 'false' }}" + echo "Run Pro Rails: ${{ env.RUN_PRO_RAILS || 'false' }}" + echo "Run Pro Node Renderer: ${{ env.RUN_PRO_NODE_RENDERER || 'false' }}" + echo "" + if [ "${{ job.status }}" == "success" ]; then + echo "✅ All steps completed successfully" + else + echo "❌ Workflow encountered errors - check logs above" + fi diff --git a/.gitignore b/.gitignore index 2df55b9e3f..8cd05104c3 100644 --- a/.gitignore +++ b/.gitignore @@ -76,6 +76,9 @@ react_on_rails/spec/dummy/**/*.res.js react_on_rails_pro/spec/dummy/.bsb.lock react_on_rails_pro/spec/dummy/**/*.res.js +# Performance test results +/bench_results + # Generated by ROR FS-based Registry generated diff --git a/.prettierignore b/.prettierignore index ac70a7af5b..0a613409a7 100644 --- a/.prettierignore +++ b/.prettierignore @@ -17,6 +17,7 @@ react_on_rails/spec/dummy/public **/.yalc/** **/*generated* *.res.js +**/vendor # Prettier doesn't understand ERB syntax in YAML files and can damage templates *.yml diff --git a/CHANGELOG.md b/CHANGELOG.md index 20ea6ea4a6..894d780db3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,10 @@ After a release, run `/update-changelog` in Claude Code to analyze commits, writ - **React Server Components Security Vulnerabilities (CVE-2025-55183, CVE-2025-55184, CVE-2025-67779)**: Upgraded React to v19.0.3 and react-on-rails-rsc to v19.0.4 to fix three critical security vulnerabilities in React Server Components. CVE-2025-55183 (CVSS 5.3) involved source code exposure when server function references were stringified, potentially leaking hardcoded secrets. CVE-2025-55184 and CVE-2025-67779 (both CVSS 7.5) involved denial of service attacks via cyclic promise references that could cause infinite loops and 100% CPU consumption. The fixes implement dual-layer cycle detection with a 1,000-iteration depth limit and override `toString()` methods on server references to return safe placeholders. Addresses [issue 2223](https://github.com/shakacode/react_on_rails/issues/2223). [PR 2233](https://github.com/shakacode/react_on_rails/pull/2233) by [AbanoubGhadban](https://github.com/AbanoubGhadban). +#### Developer (Contributors Only) + +- **Benchmarking in CI**: A benchmark workflow will now run on all pushes to master, as well as PRs with `benchmark` or `full-ci` labels. [PR 1868](https://github.com/shakacode/react_on_rails/pull/1868) by [alexeyr-ci2](https://github.com/alexeyr-ci2) + ### [16.2.0.beta.20] - 2025-12-12 #### Fixed diff --git a/CLAUDE.md b/CLAUDE.md index 1ebbea84ee..0f2460f780 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -157,6 +157,8 @@ Pre-commit hooks automatically run: - All linters: `rake lint` (runs ESLint and RuboCop) - ESLint only: `pnpm run lint` or `rake lint:eslint` - RuboCop only: `rake lint:rubocop` + - GitHub Action files (workflows, reusable actions, etc.): `actionlint` + - YAML files: `yamllint` (or validate the syntax with Ruby if it isn't installed). Do _not_ try to run RuboCop on `.yml` files. - **Code Formatting**: - Format code with Prettier: `rake autofix` - Check formatting without fixing: `pnpm run format.listDifferent` diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 418cb6bdf6..2d0209a5ec 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -590,6 +590,75 @@ Removes the `full-ci` label and returns to standard CI behavior: - **Force-pushes:** The `/run-skipped-ci` command adds the `full-ci` label to your PR. If you force-push after commenting, the initial workflow run will test the old commit, but subsequent pushes will automatically run full CI because the label persists. - **Branch operations:** Avoid deleting or force-pushing branches while workflows are running, as this may cause failures. +### Benchmarking + +React on Rails includes a performance benchmark workflow that measures RPS (requests per second) and latency for both Core and Pro versions. + +#### When Benchmarks Run + +- **Automatically on master**: Benchmarks run on every push to master +- **On PRs with labels**: Add the `benchmark` or `full-ci` label to your PR to run benchmarks +- **Manual trigger**: Use `gh workflow run` to run benchmarks with custom parameters (see [https://github.com/cli/cli#installation](https://github.com/cli/cli#installation) if you don't have `gh`): + + ```bash + # Run with default parameters + gh workflow run benchmark.yml + + # Run with custom parameters + gh workflow run benchmark.yml \ + -f rate=100 \ + -f duration=60s \ + -f connections=20 \ + -f app_version=core_only + ``` + +#### Regression Detection + +When benchmarks run, the [github-action-benchmark](https://github.com/benchmark-action/github-action-benchmark) action compares results against historical data. If performance regresses by more than 50%, the workflow will: + +1. **Fail the CI check** with `fail-on-alert: true` +2. **Post a comment on the PR** explaining the regression +3. **Tag reviewers** for attention + +This helps catch performance regressions before they reach production. + +#### Running Benchmarks Locally + +**Prerequisites:** Install [k6](https://k6.io/docs/get-started/installation/) and [Vegeta](https://github.com/tsenart/vegeta#install). + +You can also run the server in a separate terminal instead of backgrounding it. + +**Core benchmarks:** + +```bash +cd react_on_rails/spec/dummy +bin/prod-assets # Build production assets +bin/prod & # Start production server on port 3001 +SERVER_PID=$! +cd ../.. +ruby benchmarks/bench.rb +kill $SERVER_PID +``` + +**Pro benchmarks:** + +```bash +cd react_on_rails_pro/spec/dummy +bin/prod-assets +bin/prod & # Starts Rails server and node renderer +SERVER_PID=$! +cd ../.. +PRO=true ruby benchmarks/bench.rb # Rails benchmarks +ruby benchmarks/bench-node-renderer.rb # Node renderer benchmarks +kill $SERVER_PID +``` + +**Configuration:** Both scripts support environment variables for customization (rate, duration, connections, etc.). See the script headers in [`benchmarks/bench.rb`](benchmarks/bench.rb) and [`benchmarks/bench-node-renderer.rb`](benchmarks/bench-node-renderer.rb) for available options. For debugging, you may want lower `DURATION` and/or specific `ROUTES`: + +```bash +DURATION=5s ROUTES=/ ruby benchmarks/bench.rb +``` + ### Install Generator In your Rails app add this gem with a path to your fork. diff --git a/benchmarks/bench-node-renderer.rb b/benchmarks/bench-node-renderer.rb new file mode 100755 index 0000000000..fecb8f3205 --- /dev/null +++ b/benchmarks/bench-node-renderer.rb @@ -0,0 +1,338 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +# Benchmark script for React on Rails Pro Node Renderer +# Uses Vegeta with HTTP/2 Cleartext (h2c) support + +require "English" +require "open3" +require "socket" +require_relative "lib/benchmark_helpers" + +# Read configuration from source files +def read_protocol_version + package_json_path = File.expand_path( + "../packages/react-on-rails-pro-node-renderer/package.json", + __dir__ + ) + package_json = JSON.parse(File.read(package_json_path)) + package_json["protocolVersion"] || raise("protocolVersion not found in #{package_json_path}") +end + +def read_password_from_config + config_path = File.expand_path( + "../react_on_rails_pro/spec/dummy/client/node-renderer.js", + __dir__ + ) + config_content = File.read(config_path) + match = config_content.match(/password:\s*['"]([^'"]+)['"]/) + match ? match[1] : raise("password not found in #{config_path}") +end + +# Benchmark parameters +PASSWORD = read_password_from_config +BASE_URL = env_or_default("BASE_URL", "localhost:3800") +PROTOCOL_VERSION = read_protocol_version + +# Test cases: JavaScript expressions to evaluate +# Format: { name: "test_name", request: "javascript_code", rsc: true/false } +# rsc: true means the test requires an RSC bundle, false means non-RSC bundle +TEST_CASES = [ + { name: "simple_eval", rsc: false, request: "2+2" }, + { + name: "react_ssr", + rsc: false, + request: "ReactOnRails.serverRenderReactComponent(" \ + '{name:"HelloWorld",props:{helloWorldData:{name:"Benchmark"}},domNodeId:"app"})' + } +].freeze + +# Benchmark configuration +RATE = env_or_default("RATE", "max") +CONNECTIONS = env_or_default("CONNECTIONS", 10).to_i +MAX_CONNECTIONS = env_or_default("MAX_CONNECTIONS", CONNECTIONS).to_i +DURATION = env_or_default("DURATION", "30s") +REQUEST_TIMEOUT = env_or_default("REQUEST_TIMEOUT", "60s") + +OUTDIR = "bench_results" +SUMMARY_TXT = "#{OUTDIR}/node_renderer_summary.txt".freeze + +# Local wrapper for add_summary_line to use local constant +def add_to_summary(*parts) + add_summary_line(SUMMARY_TXT, *parts) +end + +# Find all production bundles in the node-renderer bundles directory +def find_all_production_bundles + bundles_dir = File.expand_path( + "../react_on_rails_pro/spec/dummy/.node-renderer-bundles", + __dir__ + ) + + unless Dir.exist?(bundles_dir) + raise "Node renderer bundles directory not found: #{bundles_dir}\n" \ + "Make sure the Pro dummy app has been compiled with NODE_ENV=production" + end + + # Bundle directories have format: - (e.g., 623229694671afc1ac9137f2715bb654-production) + # Filter to only include production bundles with hash-like names + bundles = Dir.children(bundles_dir).select do |entry| + File.directory?(File.join(bundles_dir, entry)) && + entry.match?(/^[a-f0-9]+-production$/) + end + + raise "No production bundles found in #{bundles_dir}" if bundles.empty? + + bundles +end + +# Check if a bundle is an RSC bundle by evaluating ReactOnRails.isRSCBundle +# Returns true/false/nil (nil means couldn't determine) +# rubocop:disable Style/ReturnNilInPredicateMethodDefinition +def rsc_bundle?(bundle_timestamp) + url = render_url(bundle_timestamp, "rsc_check") + body = render_body("ReactOnRails.isRSCBundle") + + # Use curl with h2c since Net::HTTP doesn't support HTTP/2 + result, status = Open3.capture2( + "curl", "-s", "--http2-prior-knowledge", "-X", "POST", + "-H", "Content-Type: application/x-www-form-urlencoded", + "-d", body, + url + ) + return nil unless status.success? + + # The response should be "true" or "false" + result.strip == "true" +rescue StandardError => e + puts " Warning: Could not determine RSC status for #{bundle_timestamp}: #{e.message}" + nil +end +# rubocop:enable Style/ReturnNilInPredicateMethodDefinition + +# Categorize bundles into RSC and non-RSC +# Stops early once we find one of each type +def categorize_bundles(bundles) + rsc_bundle = nil + non_rsc_bundle = nil + + bundles.each do |bundle| + # Stop if we already have both types + break if rsc_bundle && non_rsc_bundle + + puts " Checking bundle #{bundle}..." + is_rsc = rsc_bundle?(bundle) + if is_rsc.nil? + puts " Could not determine bundle type, skipping" + elsif is_rsc + puts " RSC bundle" + rsc_bundle ||= bundle + else + puts " Non-RSC bundle" + non_rsc_bundle ||= bundle + end + end + + [rsc_bundle, non_rsc_bundle] +end + +# URL-encode special characters for form body +def url_encode(str) + URI.encode_www_form_component(str) +end + +# Build render URL for a bundle and render name +def render_url(bundle_timestamp, render_name) + "http://#{BASE_URL}/bundles/#{bundle_timestamp}/render/#{render_name}" +end + +# Build request body for a rendering request +def render_body(rendering_request) + [ + "protocolVersion=#{url_encode(PROTOCOL_VERSION)}", + "password=#{url_encode(PASSWORD)}", + "renderingRequest=#{url_encode(rendering_request)}" + ].join("&") +end + +# rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/MethodLength, Metrics/PerceivedComplexity + +# Run Vegeta benchmark for a single test case +def run_vegeta_benchmark(test_case, bundle_timestamp) + name = test_case[:name] + request = test_case[:request] + + puts "\n===> Vegeta h2c: #{name}" + + target_url = render_url(bundle_timestamp, name) + body = render_body(request) + + # Create temp files for Vegeta + targets_file = "#{OUTDIR}/#{name}_vegeta_targets.txt" + body_file = "#{OUTDIR}/#{name}_vegeta_body.txt" + vegeta_bin = "#{OUTDIR}/#{name}_vegeta.bin" + vegeta_json = "#{OUTDIR}/#{name}_vegeta.json" + vegeta_txt = "#{OUTDIR}/#{name}_vegeta.txt" + + # Write body file + File.write(body_file, body) + + # Write targets file (Vegeta format with @body reference) + File.write(targets_file, <<~TARGETS) + POST #{target_url} + Content-Type: application/x-www-form-urlencoded + @#{body_file} + TARGETS + + # Configure Vegeta arguments for max rate + is_max_rate = RATE == "max" + vegeta_args = + if is_max_rate + ["-rate=0", "-workers=#{CONNECTIONS}", "-max-workers=#{CONNECTIONS}"] + else + ["-rate=#{RATE}", "-workers=#{CONNECTIONS}", "-max-workers=#{MAX_CONNECTIONS}"] + end + + # Run Vegeta attack with h2c + vegeta_cmd = [ + "vegeta", "attack", + "-targets=#{targets_file}", + *vegeta_args, + "-duration=#{DURATION}", + "-timeout=#{REQUEST_TIMEOUT}", + "-h2c", # HTTP/2 Cleartext (required for node renderer) + "-max-body=0", + "> #{vegeta_bin}" + ].join(" ") + + raise "Vegeta attack failed for #{name}" unless system(vegeta_cmd) + + # Generate text report (display and save) + raise "Vegeta text report failed" unless system("vegeta report #{vegeta_bin} | tee #{vegeta_txt}") + + # Generate JSON report + raise "Vegeta JSON report failed" unless system("vegeta report -type=json #{vegeta_bin} > #{vegeta_json}") + + # Delete the large binary file to save disk space + FileUtils.rm_f(vegeta_bin) + + # Parse results + vegeta_data = parse_json_file(vegeta_json, "Vegeta") + vegeta_rps = vegeta_data["throughput"]&.round(2) || "missing" + vegeta_p50 = vegeta_data.dig("latencies", "50th")&./(1_000_000.0)&.round(2) || "missing" + vegeta_p90 = vegeta_data.dig("latencies", "90th")&./(1_000_000.0)&.round(2) || "missing" + vegeta_p99 = vegeta_data.dig("latencies", "99th")&./(1_000_000.0)&.round(2) || "missing" + vegeta_max = vegeta_data.dig("latencies", "max")&./(1_000_000.0)&.round(2) || "missing" + vegeta_status = vegeta_data["status_codes"]&.map { |k, v| "#{k}=#{v}" }&.join(",") || "missing" + + [vegeta_rps, vegeta_p50, vegeta_p90, vegeta_p99, vegeta_max, vegeta_status] +rescue StandardError => e + puts "Error: #{e.message}" + failure_metrics(e) +end + +# rubocop:enable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/MethodLength, Metrics/PerceivedComplexity + +# Main execution + +# Validate parameters +validate_rate(RATE) +validate_positive_integer(CONNECTIONS, "CONNECTIONS") +validate_positive_integer(MAX_CONNECTIONS, "MAX_CONNECTIONS") +validate_duration(DURATION, "DURATION") +validate_duration(REQUEST_TIMEOUT, "REQUEST_TIMEOUT") + +if RATE == "max" && CONNECTIONS != MAX_CONNECTIONS + raise "For RATE=max, CONNECTIONS must equal MAX_CONNECTIONS (got #{CONNECTIONS} and #{MAX_CONNECTIONS})" +end + +# Check required tools +check_required_tools(%w[vegeta curl column tee]) + +# Wait for node renderer to be ready +# Note: Node renderer only speaks HTTP/2, but we can still check with a simple GET +# that will fail - we just check it doesn't refuse connection +puts "\nWaiting for node renderer at #{BASE_URL}..." +start_time = Time.now +timeout_sec = 60 +loop do + # Try a simple TCP connection to check if server is up + + Socket.tcp(BASE_URL.split(":").first, BASE_URL.split(":").last.to_i, connect_timeout: 5, &:close) + puts " Node renderer is accepting connections" + break +rescue StandardError => e + elapsed = Time.now - start_time + puts " Attempt at #{elapsed.round(2)}s: #{e.message}" + raise "Node renderer at #{BASE_URL} not responding within #{timeout_sec}s" if elapsed > timeout_sec + + sleep 1 +end + +# Find and categorize bundles +puts "\nDiscovering and categorizing bundles..." +all_bundles = find_all_production_bundles +puts "Found #{all_bundles.length} production bundle(s)" +rsc_bundle, non_rsc_bundle = categorize_bundles(all_bundles) + +rsc_tests = TEST_CASES.select { |tc| tc[:rsc] } +non_rsc_tests = TEST_CASES.reject { |tc| tc[:rsc] } + +if rsc_tests.any? && rsc_bundle.nil? + puts "Warning: RSC tests requested but no RSC bundle found, skipping: #{rsc_tests.map { |tc| tc[:name] }.join(', ')}" + rsc_tests = [] +end + +if non_rsc_tests.any? && non_rsc_bundle.nil? + skipped = non_rsc_tests.map { |tc| tc[:name] }.join(", ") + puts "Warning: Non-RSC tests requested but no non-RSC bundle found, skipping: #{skipped}" + non_rsc_tests = [] +end + +# Print parameters +print_params( + "BASE_URL" => BASE_URL, + "RSC_BUNDLE" => rsc_bundle || "none", + "NON_RSC_BUNDLE" => non_rsc_bundle || "none", + "RATE" => RATE, + "DURATION" => DURATION, + "REQUEST_TIMEOUT" => REQUEST_TIMEOUT, + "CONNECTIONS" => CONNECTIONS, + "MAX_CONNECTIONS" => MAX_CONNECTIONS, + "RSC_TESTS" => rsc_tests.map { |tc| tc[:name] }.join(", ").then { |s| s.empty? ? "none" : s }, + "NON_RSC_TESTS" => non_rsc_tests.map { |tc| tc[:name] }.join(", ").then { |s| s.empty? ? "none" : s } +) + +# Create output directory +FileUtils.mkdir_p(OUTDIR) + +# Initialize summary file +File.write(SUMMARY_TXT, "") +add_to_summary("Test", "Bundle", "RPS", "p50(ms)", "p90(ms)", "p99(ms)", "max(ms)", "Status") + +# Run non-RSC benchmarks +non_rsc_tests.each do |test_case| + print_separator + puts "Benchmarking (non-RSC): #{test_case[:name]}" + puts " Bundle: #{non_rsc_bundle}" + puts " Request: #{test_case[:request]}" + print_separator + + metrics = run_vegeta_benchmark(test_case, non_rsc_bundle) + add_to_summary(test_case[:name], "non-RSC", *metrics) +end + +# Run RSC benchmarks +rsc_tests.each do |test_case| + print_separator + puts "Benchmarking (RSC): #{test_case[:name]}" + puts " Bundle: #{rsc_bundle}" + puts " Request: #{test_case[:request]}" + print_separator + + metrics = run_vegeta_benchmark(test_case, rsc_bundle) + add_to_summary(test_case[:name], "RSC", *metrics) +end + +# Display summary +display_summary(SUMMARY_TXT) diff --git a/benchmarks/bench.rb b/benchmarks/bench.rb new file mode 100755 index 0000000000..42a9e26a22 --- /dev/null +++ b/benchmarks/bench.rb @@ -0,0 +1,224 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +require "open3" +require "shellwords" +require_relative "lib/benchmark_helpers" + +# Benchmark parameters +PRO = ENV.fetch("PRO", "false") == "true" +APP_DIR = PRO ? "react_on_rails_pro/spec/dummy" : "react_on_rails/spec/dummy" +ROUTES = env_or_default("ROUTES", nil) +BASE_URL = env_or_default("BASE_URL", "localhost:3001") +# requests per second; if "max" will get maximum number of queries instead of a fixed rate +RATE = env_or_default("RATE", "50") +# concurrent connections/virtual users +CONNECTIONS = env_or_default("CONNECTIONS", 10).to_i +# maximum connections/virtual users +MAX_CONNECTIONS = env_or_default("MAX_CONNECTIONS", CONNECTIONS).to_i +# benchmark duration (duration string like "30s", "1m", "90s") +DURATION = env_or_default("DURATION", "30s") +# request timeout (duration string as above) +REQUEST_TIMEOUT = env_or_default("REQUEST_TIMEOUT", "60s") + +OUTDIR = "bench_results" +SUMMARY_TXT = "#{OUTDIR}/summary.txt".freeze + +# Local wrapper for add_summary_line to use local constant +def add_to_summary(*parts) + add_summary_line(SUMMARY_TXT, *parts) +end + +# Check if a route has required parameters (e.g., /rsc_payload/:component_name) +# Required parameters are :param NOT inside parentheses +# Optional parameters are inside parentheses like (/:optional_param) +def route_has_required_params?(path) + # Remove optional parameter sections (anything in parentheses) + path_without_optional = path.gsub(/\([^)]*\)/, "") + # Check if remaining path contains :param + path_without_optional.include?(":") +end + +# Strip optional parameters from route path for use in URLs +# e.g., "/route(/:optional)(.:format)" -> "/route" +def strip_optional_params(route) + route.gsub(/\([^)]*\)/, "") +end + +# Sanitize route name for use in filenames +# Removes characters that GitHub Actions disallows in artifacts and shell metacharacters +def sanitize_route_name(route) + name = strip_optional_params(route).gsub(%r{^/}, "").tr("/", "_") + name = "root" if name.empty? + # Replace invalid characters: " : < > | * ? \r \n $ ` ; & ( ) [ ] { } ! # + name.gsub(/[":.<>|*?\r\n$`;&#!()\[\]{}]+/, "_").squeeze("_").gsub(/^_|_$/, "") +end + +# Get routes from the Rails app filtered by pages# and react_router# controllers +def get_benchmark_routes(app_dir) + routes_output, status = Open3.capture2e("bundle", "exec", "rails", "routes", chdir: app_dir) + raise "Failed to get routes from #{app_dir}" unless status.success? + + routes = [] + routes_output.each_line do |line| + # Parse lines like: "server_side_hello_world GET /server_side_hello_world(.:format) pages#server_side_hello_world" + # We want GET routes only (not POST, etc.) served by pages# or react_router# controllers + # Capture path up to (.:format) part using [^(\s]+ (everything except '(' and whitespace) + next unless (match = line.match(/GET\s+([^(\s]+).*(pages|react_router)#/)) + + path = match[1] + path = "/" if path.empty? # Handle root route + + # Skip routes with required parameters (e.g., /rsc_payload/:component_name) + if route_has_required_params?(path) + puts "Skipping route with required parameters: #{path}" + next + end + + # Skip "_for_testing" routes (test-only endpoints not meant for benchmarking) + if path.include?("_for_testing") + puts "Skipping test-only route: #{path}" + next + end + + routes << path + end + raise "No pages# or react_router# routes found in #{app_dir}" if routes.empty? + + routes +end + +# Get all routes to benchmark +routes = + if ROUTES + ROUTES.split(",").map(&:strip).reject(&:empty?) + else + get_benchmark_routes(APP_DIR) + end + +raise "No routes to benchmark" if routes.empty? + +validate_rate(RATE) +validate_positive_integer(CONNECTIONS, "CONNECTIONS") +validate_positive_integer(MAX_CONNECTIONS, "MAX_CONNECTIONS") +validate_duration(DURATION, "DURATION") +validate_duration(REQUEST_TIMEOUT, "REQUEST_TIMEOUT") + +raise "MAX_CONNECTIONS (#{MAX_CONNECTIONS}) must be >= CONNECTIONS (#{CONNECTIONS})" if MAX_CONNECTIONS < CONNECTIONS + +# Check required tools are installed +check_required_tools(%w[k6 column tee]) + +puts <<~PARAMS + Benchmark parameters: + - APP_DIR: #{APP_DIR} + - ROUTES: #{ROUTES || 'auto-detect from Rails'} + - BASE_URL: #{BASE_URL} + - RATE: #{RATE} + - DURATION: #{DURATION} + - REQUEST_TIMEOUT: #{REQUEST_TIMEOUT} + - CONNECTIONS: #{CONNECTIONS} + - MAX_CONNECTIONS: #{MAX_CONNECTIONS} + - WEB_CONCURRENCY: #{ENV['WEB_CONCURRENCY'] || 'unset'} + - RAILS_MAX_THREADS: #{ENV['RAILS_MAX_THREADS'] || 'unset'} + - RAILS_MIN_THREADS: #{ENV['RAILS_MIN_THREADS'] || 'unset'} +PARAMS + +# Wait for the server to be ready +test_uri = URI.parse("http://#{BASE_URL}#{routes.first}") +wait_for_server(test_uri) +puts "Server is ready!" + +FileUtils.mkdir_p(OUTDIR) + +# Validate RATE=max constraint +IS_MAX_RATE = RATE == "max" +if IS_MAX_RATE && CONNECTIONS != MAX_CONNECTIONS + raise "For RATE=max, CONNECTIONS must be equal to MAX_CONNECTIONS (got #{CONNECTIONS} and #{MAX_CONNECTIONS})" +end + +# rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity + +# Benchmark a single route with k6 +def run_k6_benchmark(target, route_name) + puts "\n===> k6: #{route_name}" + + k6_script = File.expand_path("k6.ts", __dir__) + k6_summary_json = "#{OUTDIR}/#{route_name}_k6_summary.json" + k6_txt = "#{OUTDIR}/#{route_name}_k6.txt" + + # Build k6 command with environment variables + k6_env_vars = [ + "-e TARGET_URL=#{Shellwords.escape(target)}", + "-e RATE=#{RATE}", + "-e DURATION=#{DURATION}", + "-e CONNECTIONS=#{CONNECTIONS}", + "-e MAX_CONNECTIONS=#{MAX_CONNECTIONS}", + "-e REQUEST_TIMEOUT=#{REQUEST_TIMEOUT}" + ].join(" ") + + k6_command = "k6 run #{k6_env_vars} --summary-export=#{Shellwords.escape(k6_summary_json)} " \ + "--summary-trend-stats 'med,max,p(90),p(99)' #{k6_script}" + raise "k6 benchmark failed" unless system("#{k6_command} | tee #{Shellwords.escape(k6_txt)}") + + k6_data = parse_json_file(k6_summary_json, "k6") + k6_rps = k6_data.dig("metrics", "iterations", "rate")&.round(2) || "missing" + k6_p50 = k6_data.dig("metrics", "http_req_duration", "med")&.round(2) || "missing" + k6_p90 = k6_data.dig("metrics", "http_req_duration", "p(90)")&.round(2) || "missing" + k6_p99 = k6_data.dig("metrics", "http_req_duration", "p(99)")&.round(2) || "missing" + k6_max = k6_data.dig("metrics", "http_req_duration", "max")&.round(2) || "missing" + + # Status: extract counts from checks (status_200, status_3xx, status_4xx, status_5xx) + k6_reqs_total = k6_data.dig("metrics", "http_reqs", "count") || 0 + k6_checks = k6_data.dig("root_group", "checks") || {} + k6_known_count = 0 + k6_status_parts = k6_checks.filter_map do |name, check| + passes = check["passes"] || 0 + k6_known_count += passes + next if passes.zero? + + # Convert check names like "status_200" to "200", "status_4xx" to "4xx" + status_label = name.sub(/^status_/, "") + "#{status_label}=#{passes}" + end + k6_other = k6_reqs_total - k6_known_count + k6_status_parts << "other=#{k6_other}" if k6_other.positive? + k6_status = k6_status_parts.empty? ? "missing" : k6_status_parts.join(",") + + [k6_rps, k6_p50, k6_p90, k6_p99, k6_max, k6_status] +rescue StandardError => e + puts "Error: #{e.message}" + failure_metrics(e) +end + +# rubocop:enable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity + +# Initialize summary file +File.write(SUMMARY_TXT, "") +add_to_summary("Route", "RPS", "p50(ms)", "p90(ms)", "p99(ms)", "max(ms)", "Status") + +# Run benchmarks for each route +routes.each do |route| + separator = "=" * 80 + puts "\n#{separator}" + puts "Benchmarking route: #{route}" + puts separator + + # Strip optional parameters from route for URL (e.g., "(/:locale)" -> "") + target = URI.parse("http://#{BASE_URL}#{strip_optional_params(route)}") + + # Warm up server for this route + puts "Warming up server for #{route} with 10 requests..." + 10.times do + server_responding?(target) + sleep 0.5 + end + puts "Warm-up complete for #{route}" + + route_name = sanitize_route_name(route) + metrics = run_k6_benchmark(target, route_name) + add_to_summary(route, *metrics) +end + +puts "\nSummary saved to #{SUMMARY_TXT}" +system("column", "-t", "-s", "\t", SUMMARY_TXT) diff --git a/benchmarks/convert_to_benchmark_json.rb b/benchmarks/convert_to_benchmark_json.rb new file mode 100755 index 0000000000..7e57e2aa82 --- /dev/null +++ b/benchmarks/convert_to_benchmark_json.rb @@ -0,0 +1,173 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +# Converts benchmark summary files to JSON format for github-action-benchmark +# Outputs a single file with all metrics using customSmallerIsBetter: +# - benchmark.json (customSmallerIsBetter) +# - RPS values are negated (so higher RPS = lower negative value = better) +# - Latencies are kept as-is (lower is better) +# - Failed percentage is kept as-is (lower is better) +# +# Usage: ruby convert_to_benchmark_json.rb [prefix] [--append] +# prefix: Optional prefix for benchmark names (e.g., "Core: " or "Pro: ") +# --append: Append to existing benchmark.json instead of overwriting + +require "json" + +BENCH_RESULTS_DIR = "bench_results" +PREFIX = ARGV[0] || "" +APPEND_MODE = ARGV.include?("--append") + +# rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity + +# Parse a summary file and return array of hashes with metrics +# Expected format (tab-separated): +# Route RPS p50(ms) p90(ms) p99(ms) max(ms) Status +# or for node renderer: +# Test Bundle RPS p50(ms) p90(ms) p99(ms) max(ms) Status +def parse_summary_file(file_path, prefix: "") + return [] unless File.exist?(file_path) + + lines = File.readlines(file_path).map(&:strip).reject(&:empty?) + return [] if lines.length < 2 + + header = lines.first.split("\t") + results = [] + + lines[1..].each do |line| + cols = line.split("\t") + row = header.zip(cols).to_h + + # Determine the name based on available columns + name = row["Route"] || row["Test"] || "unknown" + bundle_suffix = row["Bundle"] ? " (#{row['Bundle']})" : "" + full_name = "#{prefix}#{name}#{bundle_suffix}" + + # Skip if we got FAILED values + next if row["RPS"] == "FAILED" + + # Parse numeric values + rps = row["RPS"]&.to_f + p50 = row["p50(ms)"]&.to_f + p90 = row["p90(ms)"]&.to_f + p99 = row["p99(ms)"]&.to_f + + # Calculate failed percentage from Status column + failed_pct = calculate_failed_percentage(row["Status"]) + + results << { + name: full_name, + rps: rps, + p50: p50, + p90: p90, + p99: p99, + failed_pct: failed_pct + } + end + + results +end + +# rubocop:enable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity + +# Calculate failed request percentage from status string +# Status format: "200=7508,302=100,5xx=10" etc. +def calculate_failed_percentage(status_str) + return 0.0 if status_str.nil? || status_str == "missing" + + total = 0 + failed = 0 + + status_str.split(",").each do |part| + code, count = part.split("=") + count = count.to_i + total += count + + # Consider 0 (for Vegeta), 4xx and 5xx as failures, also "other" + failed += count if code.match?(/^[045]/) || code == "other" + end + + return 0.0 if total.zero? + + (failed.to_f / total * 100).round(2) +end + +# Convert all results to customSmallerIsBetter format +# RPS is negated (higher RPS = lower negative value = better) +# Latencies and failure rates are kept as-is (lower is better) +def to_unified_json(results) + output = [] + + results.each do |r| + # Add negated RPS (higher RPS becomes lower negative value, which is better) + output << { + name: "#{r[:name]} - RPS", + unit: "requests/sec (negated)", + value: -r[:rps] + } + + # Add latencies (lower is better) + output << { + name: "#{r[:name]} - p50 latency", + unit: "ms", + value: r[:p50] + } + output << { + name: "#{r[:name]} - p90 latency", + unit: "ms", + value: r[:p90] + } + output << { + name: "#{r[:name]} - p99 latency", + unit: "ms", + value: r[:p99] + } + + # Add failure percentage (lower is better) + output << { + name: "#{r[:name]} - failed requests", + unit: "%", + value: r[:failed_pct] + } + end + + output +end + +# Main execution +all_results = [] + +# Parse Rails benchmark +rails_summary = File.join(BENCH_RESULTS_DIR, "summary.txt") +all_results.concat(parse_summary_file(rails_summary, prefix: PREFIX)) if File.exist?(rails_summary) + +# Parse Node Renderer benchmark +node_renderer_summary = File.join(BENCH_RESULTS_DIR, "node_renderer_summary.txt") +if File.exist?(node_renderer_summary) + all_results.concat(parse_summary_file(node_renderer_summary, prefix: "#{PREFIX}NodeRenderer: ")) +end + +if all_results.empty? + puts "No benchmark results found to convert" + exit 0 +end + +# Convert current results to JSON +new_metrics = to_unified_json(all_results) +output_path = File.join(BENCH_RESULTS_DIR, "benchmark.json") + +# In append mode, merge with existing metrics +if APPEND_MODE && File.exist?(output_path) + existing_metrics = JSON.parse(File.read(output_path)) + unified_json = existing_metrics + new_metrics + puts "Appended #{new_metrics.length} metrics to existing #{existing_metrics.length} metrics" +else + unified_json = new_metrics + puts "Created #{unified_json.length} new metrics" +end + +# Write unified JSON (all metrics using customSmallerIsBetter with negated RPS) +File.write(output_path, JSON.pretty_generate(unified_json)) +puts "Wrote #{unified_json.length} total metrics to benchmark.json (from #{all_results.length} benchmark results)" +puts " - RPS values are negated (higher RPS = lower negative value = better)" +puts " - Latencies and failure rates use original values (lower is better)" diff --git a/benchmarks/k6.ts b/benchmarks/k6.ts new file mode 100644 index 0000000000..1d00617ced --- /dev/null +++ b/benchmarks/k6.ts @@ -0,0 +1,80 @@ +/** + * k6 benchmark script for React on Rails + * + * This script is designed to be reusable across different routes and configurations. + * Configuration is passed via environment variables (using -e flag): + * + * Required: + * TARGET_URL - The full URL to benchmark (e.g., http://localhost:3001/server_side_hello_world) + * + * Optional: + * RATE - Requests per second ("max" for maximum throughput, or a number). Default: "max" + * DURATION - Test duration (e.g., "30s", "1m"). Default: "30s" + * CONNECTIONS - Number of concurrent connections/VUs. Default: 10 + * MAX_CONNECTIONS - Maximum VUs (for constant-arrival-rate). Default: same as CONNECTIONS + * REQUEST_TIMEOUT - Request timeout (e.g., "60s"). Default: "60s" + * + * Usage: + * k6 run -e TARGET_URL=http://localhost:3001/my_route benchmarks/k6.ts + * k6 run -e TARGET_URL=http://localhost:3001/my_route -e RATE=100 -e DURATION=1m benchmarks/k6.ts + */ +/* eslint-disable import/no-unresolved -- k6 is installed globally */ +import http from 'k6/http'; +import { Options, Scenario } from 'k6/options'; +import { check } from 'k6'; + +// Read configuration from environment variables +const targetUrl = __ENV.TARGET_URL; +const rate = __ENV.RATE || 'max'; +const duration = __ENV.DURATION || '30s'; +const vus = parseInt(__ENV.CONNECTIONS || '10', 10); +const maxVUs = __ENV.MAX_CONNECTIONS ? parseInt(__ENV.MAX_CONNECTIONS, 10) : vus; +const requestTimeout = __ENV.REQUEST_TIMEOUT || '60s'; + +if (!targetUrl) { + throw new Error('TARGET_URL environment variable is required'); +} + +// Configure scenarios based on rate mode +const scenarios: Record = + rate === 'max' + ? { + max_rate: { + executor: 'constant-vus', + vus, + duration, + }, + } + : { + constant_rate: { + executor: 'constant-arrival-rate', + rate: parseInt(rate, 10) || 50, // same default as in bench.rb + timeUnit: '1s', + duration, + preAllocatedVUs: vus, + maxVUs, + }, + }; + +export const options: Options = { + // "Highly recommended" in https://grafana.com/docs/k6/latest/using-k6/k6-options/reference/#discard-response-bodies + discardResponseBodies: true, + scenarios, + // Disable default thresholds to avoid noise in output + thresholds: {}, +}; + +export default () => { + const response = http.get(targetUrl, { + timeout: requestTimeout, + redirects: 0, + }); + + // Check for various status codes to get accurate reporting + check(response, { + status_200: (r) => r.status === 200, + status_3xx: (r) => r.status >= 300 && r.status < 400, + status_4xx: (r) => r.status >= 400 && r.status < 500, + status_5xx: (r) => r.status >= 500, + }); +}; diff --git a/benchmarks/lib/benchmark_helpers.rb b/benchmarks/lib/benchmark_helpers.rb new file mode 100644 index 0000000000..39811f838e --- /dev/null +++ b/benchmarks/lib/benchmark_helpers.rb @@ -0,0 +1,124 @@ +# frozen_string_literal: true + +require "json" +require "fileutils" +require "net/http" +require "uri" + +# Shared utilities for benchmark scripts + +# Helper to get env var with default, +# treating empty string and "0" as unset since they can come from the benchmark workflow. +def env_or_default(key, default) + value = ENV[key].to_s + value.empty? || value == "0" ? default : value +end + +# Validation helpers +def validate_rate(rate) + return if rate == "max" + return if rate.match?(/^\d+(\.\d+)?$/) && rate.to_f.positive? + + raise "RATE must be 'max' or a positive number (got: '#{rate}')" +end + +def validate_positive_integer(value, name) + return if value.is_a?(Integer) && value.positive? + + raise "#{name} must be a positive integer (got: '#{value}')" +end + +def validate_duration(value, name) + return if value.match?(/^(\d+(\.\d+)?[smh])+$/) + + raise "#{name} must be a duration like '10s', '1m', '1.5m' (got: '#{value}')" +end + +# JSON parsing with error handling +def parse_json_file(file_path, tool_name) + JSON.parse(File.read(file_path)) +rescue Errno::ENOENT + raise "#{tool_name} results file not found: #{file_path}" +rescue JSON::ParserError => e + raise "Failed to parse #{tool_name} JSON: #{e.message}" +rescue StandardError => e + raise "Failed to read #{tool_name} results: #{e.message}" +end + +# Create failure metrics array for summary +def failure_metrics(error) + ["FAILED", "FAILED", "FAILED", "FAILED", "FAILED", error.message] +end + +# Append a line to the summary file +def add_summary_line(summary_file, *parts) + File.open(summary_file, "a") do |f| + f.puts parts.join("\t") + end +end + +# HTTP server health check +def server_responding?(uri) + response = Net::HTTP.get_response(uri) + # Accept both success (2xx) and redirect (3xx) responses as "server is responding" + success = response.is_a?(Net::HTTPSuccess) || response.is_a?(Net::HTTPRedirection) + info = "HTTP #{response.code} #{response.message}" + info += " -> #{response['location']}" if response.is_a?(Net::HTTPRedirection) && response["location"] + { success: success, info: info } +rescue StandardError => e + { success: false, info: "#{e.class.name}: #{e.message}" } +end + +# Wait for a server to be ready with timeout and retries +def wait_for_server(uri, timeout_sec: 60) + puts "Checking server availability at #{uri.host}:#{uri.port}..." + start_time = Time.now + attempt_count = 0 + + loop do + attempt_count += 1 + attempt_start = Time.now + result = server_responding?(uri) + attempt_duration = Time.now - attempt_start + elapsed = Time.now - start_time + + if result[:success] + puts " Attempt #{attempt_count} at #{elapsed.round(2)}s: SUCCESS - #{result[:info]} " \ + "(took #{attempt_duration.round(3)}s)" + return true + else + puts " Attempt #{attempt_count} at #{elapsed.round(2)}s: FAILED - #{result[:info]} " \ + "(took #{attempt_duration.round(3)}s)" + end + + raise "Server at #{uri.host}:#{uri.port} not responding within #{timeout_sec}s" if elapsed > timeout_sec + + sleep 1 + end +end + +# Check that required CLI tools are installed +def check_required_tools(tools) + tools.each do |cmd| + raise "required tool '#{cmd}' is not installed" unless system("command -v #{cmd} >/dev/null 2>&1") + end +end + +# Print a section separator +def print_separator(char = "=", width = 80) + puts char * width +end + +# Print benchmark parameters +def print_params(params) + puts "Benchmark parameters:" + params.each do |key, value| + puts " - #{key}: #{value}" + end +end + +# Display summary using column command +def display_summary(summary_file) + puts "\nSummary saved to #{summary_file}" + system("column", "-t", "-s", "\t", summary_file) +end diff --git a/docs/planning/library-benchmarking.md b/docs/planning/library-benchmarking.md new file mode 100644 index 0000000000..f97a6cf288 --- /dev/null +++ b/docs/planning/library-benchmarking.md @@ -0,0 +1,61 @@ +# Library Benchmarking Strategy + +## Current Approach + +We use **max rate benchmarking** - each route is tested at maximum throughput to measure its capacity. + +### Configuration + +- `RATE=max` - Tests maximum throughput +- `CONNECTIONS=10` - Concurrent connections +- `DURATION=30s` - Test duration per route + +## Trade-offs: Max Rate vs Fixed Rate + +### Max Rate (Current) + +**Pros:** + +- Measures actual throughput capacity +- Self-adjusting - no need to maintain per-route rate configs +- Identifies bottlenecks and ceilings + +**Cons:** + +- Results vary with CI runner performance +- Harder to compare across commits when capacity changes significantly +- Noise from shared CI infrastructure + +### Fixed Rate + +**Pros:** + +- Consistent baseline across runs +- Latency comparisons are meaningful +- Detects regressions at a specific load level + +**Cons:** + +- Must be set below the slowest route's capacity +- If route capacity changes, historical data becomes incomparable +- Requires maintaining rate configuration per route + +## Why We Chose Max Rate + +Different routes have vastly different capacities: + +- `/empty` - ~1500 RPS +- SSR routes - ~50-200 RPS depending on component complexity + +A fixed rate low enough for all routes would under-utilize fast routes. A per-route fixed rate config would be painful to maintain and would break comparisons when capacity changes. + +For library benchmarking in CI, we accept some noise and focus on detecting significant regressions (>15-20%). + +## Future Considerations + +Options to improve accuracy if needed: + +1. **Multiple samples** - Run each benchmark 2-3 times, average results, flag high variance +2. **Adaptive rate** - Quick max-rate probe, then benchmark at 70% capacity +3. **Per-route fixed rates** - Maintain target RPS config (high maintenance burden) +4. **Dedicated benchmark runners** - Reduce CI noise with consistent hardware diff --git a/knip.ts b/knip.ts index 6cc0239ee3..b97ae9cef2 100644 --- a/knip.ts +++ b/knip.ts @@ -5,7 +5,7 @@ const config: KnipConfig = { workspaces: { // Root workspace - manages the monorepo and global tooling '.': { - entry: ['eslint.config.ts', 'jest.config.base.js'], + entry: ['eslint.config.ts', 'jest.config.base.js', 'benchmarks/k6.ts'], project: ['*.{js,mjs,ts}'], ignoreBinaries: [ // Has to be installed globally @@ -13,6 +13,8 @@ const config: KnipConfig = { // Pro package binaries used in Pro workflows 'playwright', 'e2e-test', + // Local binaries + 'bin/.*', ], ignore: ['react_on_rails_pro/**', 'react_on_rails/vendor/**'], ignoreDependencies: [ diff --git a/package.json b/package.json index 74169fd6fa..390db836a6 100644 --- a/package.json +++ b/package.json @@ -30,6 +30,7 @@ "@testing-library/react": "^16.2.0", "@tsconfig/node14": "^14.1.2", "@types/jest": "^29.5.14", + "@types/k6": "^1.4.0", "@types/node": "^20.17.16", "@types/react": "^19.0.0", "@types/react-dom": "^19.0.0", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 9403953675..dd9fab055a 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -63,6 +63,9 @@ importers: '@types/jest': specifier: ^29.5.14 version: 29.5.14 + '@types/k6': + specifier: ^1.4.0 + version: 1.4.0 '@types/node': specifier: ^20.17.16 version: 20.19.25 @@ -1635,6 +1638,9 @@ packages: '@types/jsonwebtoken@9.0.10': resolution: {integrity: sha512-asx5hIG9Qmf/1oStypjanR7iKTv0gXQ1Ov/jfrX6kS/EO0OFni8orbmGCn0672NHR3kXHwpAwR+B368ZGN/2rA==} + '@types/k6@1.4.0': + resolution: {integrity: sha512-2tgKVnzNXZTZT1TDAGLY/3cuvHPZLyOF751N7M8T2dBgWzInzUVZYjGn9zVW01S1yNLqAr1az9gctyJHTW6GRQ==} + '@types/lockfile@1.0.4': resolution: {integrity: sha512-Q8oFIHJHr+htLrTXN2FuZfg+WXVHQRwU/hC2GpUu+Q8e3FUM9EDkS2pE3R2AO1ZGu56f479ybdMCNF1DAu8cAQ==} @@ -6994,6 +7000,8 @@ snapshots: '@types/ms': 2.1.0 '@types/node': 20.19.25 + '@types/k6@1.4.0': {} + '@types/lockfile@1.0.4': {} '@types/mime@1.3.5': {} diff --git a/react_on_rails/spec/dummy/Gemfile.lock b/react_on_rails/spec/dummy/Gemfile.lock index 27fe392450..19085e901f 100644 --- a/react_on_rails/spec/dummy/Gemfile.lock +++ b/react_on_rails/spec/dummy/Gemfile.lock @@ -197,6 +197,8 @@ GEM nokogiri (1.18.10) mini_portile2 (~> 2.8.2) racc (~> 1.4) + nokogiri (1.18.10-x86_64-linux-gnu) + racc (~> 1.4) ostruct (0.6.3) package_json (0.1.0) parallel (1.24.0) @@ -423,6 +425,7 @@ GEM PLATFORMS ruby + x86_64-linux DEPENDENCIES amazing_print diff --git a/react_on_rails/spec/dummy/bin/prod b/react_on_rails/spec/dummy/bin/prod new file mode 100755 index 0000000000..81658fa284 --- /dev/null +++ b/react_on_rails/spec/dummy/bin/prod @@ -0,0 +1,29 @@ +#!/usr/bin/env bash + +# Run only after ./prod-assets + +# Check if assets are precompiled +MANIFEST="public/webpack/production/manifest.json" + +if [ ! -d "public/assets" ]; then + echo "ERROR: public/assets not found. Run ./bin/prod-assets first" + exit 1 +fi + +if [ ! -f "$MANIFEST" ]; then + echo "ERROR: $MANIFEST not found. Run ./bin/prod-assets first" + exit 1 +fi + +# Simple up-to-date check: warn if source files are newer than manifest.json +if find client config -type f \( -name "*.[jt]s" -o -name "*.[jt]sx" \) -newer "$MANIFEST" 2>/dev/null | grep -q .; then + echo "WARNING: client or config has changes newer than compiled assets" + echo "Consider running ./bin/prod-assets to rebuild" +fi + +if [ -f "pnpm-lock.yaml" ] && [ "pnpm-lock.yaml" -nt "$MANIFEST" ]; then + echo "WARNING: pnpm-lock.yaml is newer than compiled assets" + echo "Consider running ./bin/prod-assets to rebuild" +fi + +NODE_ENV=production RAILS_ENV=production bundle exec rails server -p 3001 diff --git a/react_on_rails/spec/dummy/bin/prod-assets b/react_on_rails/spec/dummy/bin/prod-assets new file mode 100755 index 0000000000..d4651b9961 --- /dev/null +++ b/react_on_rails/spec/dummy/bin/prod-assets @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +export NODE_ENV=production +export RAILS_ENV=production +if [ "$CI" = "true" ]; then + bundle exec bootsnap precompile --gemfile app/ lib/ config/ +fi +pnpm run build:rescript +bundle exec rake react_on_rails:generate_packs +bundle exec rails assets:precompile diff --git a/react_on_rails/spec/dummy/config/puma.rb b/react_on_rails/spec/dummy/config/puma.rb index de5feec982..e190c501cb 100644 --- a/react_on_rails/spec/dummy/config/puma.rb +++ b/react_on_rails/spec/dummy/config/puma.rb @@ -10,10 +10,12 @@ min_threads_count = ENV.fetch("RAILS_MIN_THREADS") { max_threads_count } threads min_threads_count, max_threads_count +rails_env = ENV.fetch("RAILS_ENV", "development") + # Specifies the `worker_timeout` threshold that Puma will use to wait before # terminating a worker in development environments. # -worker_timeout 3600 if ENV.fetch("RAILS_ENV", "development") == "development" +worker_timeout 3600 if rails_env == "development" # Specifies the `port` that Puma will listen on to receive requests; default is 3000. # @@ -21,25 +23,43 @@ # Specifies the `environment` that Puma will run in. # -environment ENV.fetch("RAILS_ENV", "development") +environment rails_env # Specifies the `pidfile` that Puma will use. pidfile ENV.fetch("PIDFILE", "tmp/pids/server.pid") -# Specifies the number of `workers` to boot in clustered mode. -# Workers are forked web server processes. If using threads and workers together -# the concurrency of the application would be max `threads` * `workers`. -# Workers do not work on JRuby or Windows (both of which do not support -# processes). -# -# workers ENV.fetch("WEB_CONCURRENCY") { 2 } +if rails_env == "production" + # Specifies the number of `workers` to boot in clustered mode. + # Workers are forked web server processes. If using threads and workers together + # the concurrency of the application would be max `threads` * `workers`. + # Workers do not work on JRuby or Windows (both of which do not support + # processes). + # + workers ENV.fetch("WEB_CONCURRENCY", 2) -# Use the `preload_app!` method when specifying a `workers` number. -# This directive tells Puma to first boot the application and load code -# before forking the application. This takes advantage of Copy On Write -# process behavior so workers use less memory. -# -# preload_app! + # Use the `preload_app!` method when specifying a `workers` number. + # This directive tells Puma to first boot the application and load code + # before forking the application. This takes advantage of Copy On Write + # process behavior so workers use less memory. + # + preload_app! + + # The code in the `on_worker_boot` will be called if you are using + # clustered mode by specifying a number of `workers`. After each worker + # process is booted this block will be run, if you are using `preload_app!` + # option you will want to use this block to reconnect to any threads + # or connections that may have been created at application boot, Ruby + # cannot share connections between processes. + # + on_worker_boot do + ActiveRecord::Base.establish_connection if defined?(ActiveRecord) + end + + # Specifies the `worker_shutdown_timeout` threshold that Puma will use to wait before + # terminating a worker. + # + worker_shutdown_timeout 60 +end # Allow puma to be restarted by `bin/rails restart` command. plugin :tmp_restart diff --git a/react_on_rails/spec/dummy/config/shakapacker.yml b/react_on_rails/spec/dummy/config/shakapacker.yml index 342b6cad1e..f4d1e586bd 100644 --- a/react_on_rails/spec/dummy/config/shakapacker.yml +++ b/react_on_rails/spec/dummy/config/shakapacker.yml @@ -20,6 +20,9 @@ default: &default cache_manifest: false nested_entries: true + # Extract and emit a css file + extract_css: true + # Hook to run before webpack compilation (e.g., for generating dynamic entry points) # SECURITY: Only reference trusted scripts within your project. Ensure the hook path # points to a file within the project root that you control. diff --git a/react_on_rails_pro/.prettierignore b/react_on_rails_pro/.prettierignore index 47322297f2..d095c03e8e 100644 --- a/react_on_rails_pro/.prettierignore +++ b/react_on_rails_pro/.prettierignore @@ -3,7 +3,7 @@ node_modules **/tmp **/public **/package.json -vendor/bundle +**/vendor **/.node-renderer-bundles spec/dummy/.yalc/ diff --git a/react_on_rails_pro/Gemfile.development_dependencies b/react_on_rails_pro/Gemfile.development_dependencies index 9c63e492d8..034cf5ea09 100644 --- a/react_on_rails_pro/Gemfile.development_dependencies +++ b/react_on_rails_pro/Gemfile.development_dependencies @@ -23,7 +23,6 @@ gem "pg" # Turbolinks makes following links in your web application faster. Read more: https://github.com/rails/turbolinks gem "turbolinks" gem "sqlite3", "~> 1.4" -gem "uglifier" gem "jquery-rails" gem "sprockets" gem "sass-rails" diff --git a/react_on_rails_pro/Gemfile.lock b/react_on_rails_pro/Gemfile.lock index 0872d940a7..4bcb787a23 100644 --- a/react_on_rails_pro/Gemfile.lock +++ b/react_on_rails_pro/Gemfile.lock @@ -458,8 +458,6 @@ GEM turbolinks-source (5.2.0) tzinfo (2.0.6) concurrent-ruby (~> 1.0) - uglifier (4.2.0) - execjs (>= 0.3.0, < 3) unicode-display_width (2.6.0) uri (1.0.3) useragent (0.16.11) @@ -538,7 +536,6 @@ DEPENDENCIES sprockets sqlite3 (~> 1.4) turbolinks - uglifier web-console webdrivers (= 5.3.0) webmock diff --git a/react_on_rails_pro/spec/dummy/Gemfile.lock b/react_on_rails_pro/spec/dummy/Gemfile.lock index f440bc32d4..a47b8f6147 100644 --- a/react_on_rails_pro/spec/dummy/Gemfile.lock +++ b/react_on_rails_pro/spec/dummy/Gemfile.lock @@ -487,8 +487,6 @@ GEM turbolinks-source (5.2.0) tzinfo (2.0.6) concurrent-ruby (~> 1.0) - uglifier (4.2.0) - execjs (>= 0.3.0, < 3) unicode-display_width (2.6.0) uri (1.0.3) useragent (0.16.11) @@ -580,7 +578,6 @@ DEPENDENCIES sprockets sqlite3 (~> 1.4) turbolinks - uglifier web-console webdrivers (= 5.3.0) webmock diff --git a/react_on_rails_pro/spec/dummy/Procfile.prod b/react_on_rails_pro/spec/dummy/Procfile.prod new file mode 100644 index 0000000000..d47e98ef15 --- /dev/null +++ b/react_on_rails_pro/spec/dummy/Procfile.prod @@ -0,0 +1,6 @@ +# Procfile for production mode (precompiled assets) + +rails: RAILS_ENV=production NODE_ENV=production bin/rails s -p 3001 + +# Start Node server for server rendering. +node-renderer: NODE_ENV=production RENDERER_LOG_LEVEL=error RENDERER_PORT=3800 node client/node-renderer.js diff --git a/react_on_rails_pro/spec/dummy/bin/prod b/react_on_rails_pro/spec/dummy/bin/prod new file mode 100755 index 0000000000..2e38289c15 --- /dev/null +++ b/react_on_rails_pro/spec/dummy/bin/prod @@ -0,0 +1,40 @@ +#!/usr/bin/env bash + +# Run only after ./prod-assets + +# Check if assets are precompiled +MANIFEST="public/webpack/production/manifest.json" + +if [ ! -d "public/assets" ]; then + echo "ERROR: public/assets not found. Run ./bin/prod-assets first" + exit 1 +fi + +if [ ! -f "$MANIFEST" ]; then + echo "ERROR: $MANIFEST not found. Run ./bin/prod-assets first" + exit 1 +fi + +# Simple up-to-date check: warn if source files are newer than manifest.json +if find client config -type f \( -name "*.[jt]s" -o -name "*.[jt]sx" \) -newer "$MANIFEST" 2>/dev/null | grep -q .; then + echo "WARNING: client or config has changes newer than compiled assets" + echo "Consider running ./bin/prod-assets to rebuild" +fi + +if [ -f "pnpm-lock.yaml" ] && [ "pnpm-lock.yaml" -nt "$MANIFEST" ]; then + echo "WARNING: pnpm-lock.yaml is newer than compiled assets" + echo "Consider running ./bin/prod-assets to rebuild" +fi + +export NODE_ENV=production +export RAILS_ENV=production + +if command -v overmind &> /dev/null; then + overmind start -f Procfile.prod +elif command -v foreman &> /dev/null; then + foreman start -f Procfile.prod +else + echo "Installing foreman..." + gem install foreman + foreman start -f Procfile.prod +fi diff --git a/react_on_rails_pro/spec/dummy/bin/prod-assets b/react_on_rails_pro/spec/dummy/bin/prod-assets new file mode 100755 index 0000000000..828b1e6ae8 --- /dev/null +++ b/react_on_rails_pro/spec/dummy/bin/prod-assets @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +export NODE_ENV=production +export RAILS_ENV=production +if [ "$CI" = "true" ]; then + bundle exec bootsnap precompile --gemfile app/ lib/ config/ +fi +bundle exec rails react_on_rails:generate_packs +bundle exec rails assets:precompile diff --git a/react_on_rails_pro/spec/dummy/config/database.yml b/react_on_rails_pro/spec/dummy/config/database.yml index 1c1a37ca8d..0d02f24980 100644 --- a/react_on_rails_pro/spec/dummy/config/database.yml +++ b/react_on_rails_pro/spec/dummy/config/database.yml @@ -6,7 +6,7 @@ # default: &default adapter: sqlite3 - pool: 5 + pool: <%= ENV.fetch("RAILS_MAX_THREADS") { 5 } %> timeout: 5000 development: diff --git a/react_on_rails_pro/spec/dummy/config/environments/production.rb b/react_on_rails_pro/spec/dummy/config/environments/production.rb index 519aa382d6..4c20d38667 100644 --- a/react_on_rails_pro/spec/dummy/config/environments/production.rb +++ b/react_on_rails_pro/spec/dummy/config/environments/production.rb @@ -3,6 +3,9 @@ Rails.application.configure do # Settings specified here will take precedence over those in config/application.rb. + # Use a hardcoded secret for this test/dummy app (not for real production use) + config.secret_key_base = ENV.fetch("SECRET_KEY_BASE", "dummy-secret-key-base-for-testing-only") + # Code is not reloaded between requests. config.cache_classes = true @@ -19,8 +22,9 @@ config.public_file_server.enabled = true # Compress JavaScripts and CSS. - config.assets.js_compressor = Uglifier.new(harmony: true) - config.assets.css_compressor = :csso + # JS/CSS compression handled by Webpack/Shakapacker, not needed for Sprockets + # config.assets.js_compressor = Uglifier.new(harmony: true) + # config.assets.css_compressor = :csso # Do not fallback to assets pipeline if a precompiled asset is missed. config.assets.compile = false @@ -42,9 +46,9 @@ # Force all access to the app over SSL, use Strict-Transport-Security, and use secure cookies. # config.force_ssl = true - # Use the lowest log level to ensure availability of diagnostic information - # when problems arise. - config.log_level = :debug + # Include generic and useful information about system operation, but avoid logging too much + # information to avoid inadvertent exposure of personally identifiable information (PII). + config.log_level = :info # Prepend all log lines with the following tags. config.log_tags = [:request_id] @@ -65,8 +69,8 @@ # the I18n.default_locale when a translation cannot be found). config.i18n.fallbacks = true - # Send deprecation notices to registered listeners. - config.active_support.deprecation = :notify + # Don't log any deprecations. + config.active_support.report_deprecations = false # Use default logging formatter so that PID and timestamp are not suppressed. config.log_formatter = Logger::Formatter.new diff --git a/react_on_rails_pro/spec/dummy/config/puma.rb b/react_on_rails_pro/spec/dummy/config/puma.rb index 035d43a2f6..c02737ef7b 100644 --- a/react_on_rails_pro/spec/dummy/config/puma.rb +++ b/react_on_rails_pro/spec/dummy/config/puma.rb @@ -1,13 +1,14 @@ # frozen_string_literal: true # Puma can serve each request in a thread from an internal thread pool. -# The `threads` method setting takes two numbers a minimum and maximum. +# The `threads` method setting takes two numbers: a minimum and maximum. # Any libraries that use thread pools should be configured to match # the maximum value specified for Puma. Default is set to 5 threads for minimum -# and maximum, this matches the default thread size of Active Record. +# and maximum; this matches the default thread size of Active Record. # -threads_count = ENV.fetch("RAILS_MAX_THREADS", 5).to_i -threads threads_count, threads_count +max_threads_count = ENV.fetch("RAILS_MAX_THREADS", 5) +min_threads_count = ENV.fetch("RAILS_MIN_THREADS") { max_threads_count } +threads min_threads_count, max_threads_count # Specifies the `port` that Puma will listen on to receive requests, default is 3000. # @@ -45,5 +46,10 @@ ActiveRecord::Base.establish_connection if defined?(ActiveRecord) end +# Specifies the `worker_shutdown_timeout` threshold that Puma will use to wait before +# terminating a worker. +# +worker_shutdown_timeout 60 + # Allow puma to be restarted by `rails restart` command. plugin :tmp_restart diff --git a/react_on_rails_pro/spec/dummy/config/shakapacker.yml b/react_on_rails_pro/spec/dummy/config/shakapacker.yml index 068bb30df4..672d872bf3 100644 --- a/react_on_rails_pro/spec/dummy/config/shakapacker.yml +++ b/react_on_rails_pro/spec/dummy/config/shakapacker.yml @@ -8,9 +8,10 @@ default: &default nested_entries: true javascript_transpiler: babel - cache_path: tmp/cache/webpacker + cache_path: tmp/cache/shakapacker check_yarn_integrity: false webpack_compile_output: false + ensure_consistent_versioning: true # Additional paths webpack should look up modules # ['app/assets', 'engine/foo/app/assets'] diff --git a/react_on_rails_pro/spec/dummy/lib/tasks/assets.rake b/react_on_rails_pro/spec/dummy/lib/tasks/assets.rake index ea766fa26b..d7837e3200 100644 --- a/react_on_rails_pro/spec/dummy/lib/tasks/assets.rake +++ b/react_on_rails_pro/spec/dummy/lib/tasks/assets.rake @@ -16,10 +16,10 @@ namespace :assets do desc "Compile assets with webpack" task :webpack do - sh "cd client && yarn run build:client" + sh "cd client && pnpm run build:client" # Skip next line if not doing server rendering - sh "cd client && yarn run build:server" + sh "cd client && pnpm run build:server" end task :clobber do