mirror of
https://github.com/dunglas/frankenphp.git
synced 2025-12-24 13:38:11 +08:00
feat: Adds automatic thread scaling at runtime and php_ini configuration in Caddyfile (#1266)
Adds option to scale threads at runtime Adds php_ini configuration in Caddyfile
This commit is contained in:
7
testdata/ini.php
vendored
Normal file
7
testdata/ini.php
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
<?php
|
||||
|
||||
require_once __DIR__.'/_executor.php';
|
||||
|
||||
return function () {
|
||||
echo $_GET['key'] . ':' . ini_get($_GET['key']);
|
||||
};
|
||||
29
testdata/performance/api.js
vendored
Normal file
29
testdata/performance/api.js
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
import http from 'k6/http'
|
||||
|
||||
/**
|
||||
* Many applications communicate with external APIs or microservices.
|
||||
* Latencies tend to be much higher than with databases in these cases.
|
||||
* We'll consider 10ms-150ms
|
||||
*/
|
||||
export const options = {
|
||||
stages: [
|
||||
{ duration: '20s', target: 150 },
|
||||
{ duration: '20s', target: 1000 },
|
||||
{ duration: '10s', target: 0 }
|
||||
],
|
||||
thresholds: {
|
||||
http_req_failed: ['rate<0.01']
|
||||
}
|
||||
}
|
||||
|
||||
/* global __ENV */
|
||||
export default function () {
|
||||
// 10-150ms latency
|
||||
const latency = Math.floor(Math.random() * 141) + 10
|
||||
// 1-30000 work units
|
||||
const work = Math.ceil(Math.random() * 30000)
|
||||
// 1-40 output units
|
||||
const output = Math.ceil(Math.random() * 40)
|
||||
|
||||
http.get(http.url`${__ENV.CADDY_HOSTNAME}/sleep.php?sleep=${latency}&work=${work}&output=${output}`)
|
||||
}
|
||||
27
testdata/performance/computation.js
vendored
Normal file
27
testdata/performance/computation.js
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
import http from 'k6/http'
|
||||
|
||||
/**
|
||||
* Simulate an application that does very little IO, but a lot of computation
|
||||
*/
|
||||
export const options = {
|
||||
stages: [
|
||||
{ duration: '20s', target: 80 },
|
||||
{ duration: '20s', target: 150 },
|
||||
{ duration: '5s', target: 0 }
|
||||
],
|
||||
thresholds: {
|
||||
http_req_failed: ['rate<0.01']
|
||||
}
|
||||
}
|
||||
|
||||
/* global __ENV */
|
||||
export default function () {
|
||||
// do 1-1,000,000 work units
|
||||
const work = Math.ceil(Math.random() * 1_000_000)
|
||||
// output 1-500 units
|
||||
const output = Math.ceil(Math.random() * 500)
|
||||
// simulate 0-2ms latency
|
||||
const latency = Math.floor(Math.random() * 3)
|
||||
|
||||
http.get(http.url`${__ENV.CADDY_HOSTNAME}/sleep.php?sleep=${latency}&work=${work}&output=${output}`)
|
||||
}
|
||||
30
testdata/performance/database.js
vendored
Normal file
30
testdata/performance/database.js
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
import http from 'k6/http'
|
||||
|
||||
/**
|
||||
* Modern databases tend to have latencies in the single-digit milliseconds.
|
||||
* We'll simulate 1-10ms latencies and 1-2 queries per request.
|
||||
*/
|
||||
export const options = {
|
||||
stages: [
|
||||
{ duration: '20s', target: 100 },
|
||||
{ duration: '30s', target: 200 },
|
||||
{ duration: '10s', target: 0 }
|
||||
],
|
||||
thresholds: {
|
||||
http_req_failed: ['rate<0.01']
|
||||
}
|
||||
}
|
||||
|
||||
/* global __ENV */
|
||||
export default function () {
|
||||
// 1-10ms latency
|
||||
const latency = Math.floor(Math.random() * 10) + 1
|
||||
// 1-2 iterations per request
|
||||
const iterations = Math.floor(Math.random() * 2) + 1
|
||||
// 1-30000 work units per iteration
|
||||
const work = Math.ceil(Math.random() * 30000)
|
||||
// 1-40 output units
|
||||
const output = Math.ceil(Math.random() * 40)
|
||||
|
||||
http.get(http.url`${__ENV.CADDY_HOSTNAME}/sleep.php?sleep=${latency}&work=${work}&output=${output}&iterations=${iterations}`)
|
||||
}
|
||||
16
testdata/performance/flamegraph.sh
vendored
Executable file
16
testdata/performance/flamegraph.sh
vendored
Executable file
@@ -0,0 +1,16 @@
|
||||
#!/bin/bash
|
||||
|
||||
# install brendangregg's FlameGraph
|
||||
if [ ! -d "/usr/local/src/flamegraph" ]; then
|
||||
mkdir /usr/local/src/flamegraph &&
|
||||
cd /usr/local/src/flamegraph &&
|
||||
git clone https://github.com/brendangregg/FlameGraph.git
|
||||
fi
|
||||
|
||||
# let the test warm up
|
||||
sleep 10
|
||||
|
||||
# run a 30 second profile on the Caddy admin port
|
||||
cd /usr/local/src/flamegraph/FlameGraph &&
|
||||
go tool pprof -raw -output=cpu.txt 'http://localhost:2019/debug/pprof/profile?seconds=30' &&
|
||||
./stackcollapse-go.pl cpu.txt | ./flamegraph.pl >/go/src/app/testdata/performance/flamegraph.svg
|
||||
28
testdata/performance/hanging-requests.js
vendored
Normal file
28
testdata/performance/hanging-requests.js
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
import http from 'k6/http'
|
||||
|
||||
/**
|
||||
* It is not uncommon for external services to hang for a long time.
|
||||
* Make sure the server is resilient in such cases and doesn't hang as well.
|
||||
*/
|
||||
export const options = {
|
||||
stages: [
|
||||
{ duration: '20s', target: 100 },
|
||||
{ duration: '20s', target: 500 },
|
||||
{ duration: '20s', target: 0 }
|
||||
],
|
||||
thresholds: {
|
||||
http_req_failed: ['rate<0.01']
|
||||
}
|
||||
}
|
||||
|
||||
/* global __ENV */
|
||||
export default function () {
|
||||
// 2% chance for a request that hangs for 15s
|
||||
if (Math.random() < 0.02) {
|
||||
http.get(`${__ENV.CADDY_HOSTNAME}/sleep.php?sleep=15000&work=10000&output=100`)
|
||||
return
|
||||
}
|
||||
|
||||
// a regular request
|
||||
http.get(`${__ENV.CADDY_HOSTNAME}/sleep.php?sleep=5&work=10000&output=100`)
|
||||
}
|
||||
20
testdata/performance/hello-world.js
vendored
Normal file
20
testdata/performance/hello-world.js
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
import http from 'k6/http'
|
||||
|
||||
/**
|
||||
* 'Hello world' tests the raw server performance.
|
||||
*/
|
||||
export const options = {
|
||||
stages: [
|
||||
{ duration: '5s', target: 100 },
|
||||
{ duration: '20s', target: 400 },
|
||||
{ duration: '5s', target: 0 }
|
||||
],
|
||||
thresholds: {
|
||||
http_req_failed: ['rate<0.01']
|
||||
}
|
||||
}
|
||||
|
||||
/* global __ENV */
|
||||
export default function () {
|
||||
http.get(`${__ENV.CADDY_HOSTNAME}/sleep.php`)
|
||||
}
|
||||
20
testdata/performance/k6.Caddyfile
vendored
Normal file
20
testdata/performance/k6.Caddyfile
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
{
|
||||
frankenphp {
|
||||
max_threads {$MAX_THREADS}
|
||||
num_threads {$NUM_THREADS}
|
||||
worker {
|
||||
file /go/src/app/testdata/{$WORKER_FILE:sleep.php}
|
||||
num {$WORKER_THREADS}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
:80 {
|
||||
route {
|
||||
root /go/src/app/testdata
|
||||
php {
|
||||
root /go/src/app/testdata
|
||||
enable_root_symlink false
|
||||
}
|
||||
}
|
||||
}
|
||||
39
testdata/performance/perf-test.sh
vendored
Executable file
39
testdata/performance/perf-test.sh
vendored
Executable file
@@ -0,0 +1,39 @@
|
||||
#!/bin/bash
|
||||
|
||||
# install the dev.Dockerfile, build the app and run k6 tests
|
||||
|
||||
docker build -t frankenphp-dev -f dev.Dockerfile .
|
||||
|
||||
export "CADDY_HOSTNAME=http://host.docker.internal"
|
||||
|
||||
select filename in ./testdata/performance/*.js; do
|
||||
read -r -p "How many worker threads? " workerThreads
|
||||
read -r -p "How many max threads? " maxThreads
|
||||
|
||||
numThreads=$((workerThreads + 1))
|
||||
|
||||
docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined \
|
||||
-p 8125:80 \
|
||||
-v "$PWD:/go/src/app" \
|
||||
--name load-test-container \
|
||||
-e "MAX_THREADS=$maxThreads" \
|
||||
-e "WORKER_THREADS=$workerThreads" \
|
||||
-e "NUM_THREADS=$numThreads" \
|
||||
-itd \
|
||||
frankenphp-dev \
|
||||
sh /go/src/app/testdata/performance/start-server.sh
|
||||
|
||||
docker exec -d load-test-container sh /go/src/app/testdata/performance/flamegraph.sh
|
||||
|
||||
sleep 10
|
||||
|
||||
docker run --entrypoint "" -it -v .:/app -w /app \
|
||||
--add-host "host.docker.internal:host-gateway" \
|
||||
grafana/k6:latest \
|
||||
k6 run -e "CADDY_HOSTNAME=$CADDY_HOSTNAME:8125" "./$filename"
|
||||
|
||||
docker exec load-test-container curl "http://localhost:2019/frankenphp/threads"
|
||||
|
||||
docker stop load-test-container
|
||||
docker rm load-test-container
|
||||
done
|
||||
19
testdata/performance/performance-testing.md
vendored
Normal file
19
testdata/performance/performance-testing.md
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
# Running Load tests
|
||||
|
||||
To run load tests with k6 you need to have Docker and Bash installed.
|
||||
Go the root of this repository and run:
|
||||
|
||||
```sh
|
||||
bash testdata/performance/perf-test.sh
|
||||
```
|
||||
|
||||
This will build the `frankenphp-dev` docker image and run it under the name 'load-test-container'
|
||||
in the background. Additionally, it will run the `grafana/k6` container and you'll be able to choose
|
||||
the load test you want to run. A `flamegraph.svg` will be created in the `testdata/performance` directory.
|
||||
|
||||
If the load test has stopped prematurely, you might have to remove the container manually:
|
||||
|
||||
```sh
|
||||
docker stop load-test-container
|
||||
docker rm load-test-container
|
||||
```
|
||||
7
testdata/performance/start-server.sh
vendored
Executable file
7
testdata/performance/start-server.sh
vendored
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
# build and run FrankenPHP with the k6.Caddyfile
|
||||
cd /go/src/app/caddy/frankenphp &&
|
||||
go build --buildvcs=false &&
|
||||
cd ../../testdata/performance &&
|
||||
/go/src/app/caddy/frankenphp/frankenphp run -c k6.Caddyfile
|
||||
32
testdata/performance/timeouts.js
vendored
Normal file
32
testdata/performance/timeouts.js
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
import http from 'k6/http'
|
||||
|
||||
/**
|
||||
* Databases or external resources can sometimes become unavailable for short periods of time.
|
||||
* Make sure the server can recover quickly from periods of unavailability.
|
||||
* This simulation swaps between a hanging and a working server every 10 seconds.
|
||||
*/
|
||||
export const options = {
|
||||
stages: [
|
||||
{ duration: '20s', target: 100 },
|
||||
{ duration: '20s', target: 500 },
|
||||
{ duration: '20s', target: 0 }
|
||||
],
|
||||
thresholds: {
|
||||
http_req_failed: ['rate<0.01']
|
||||
}
|
||||
}
|
||||
|
||||
/* global __ENV */
|
||||
export default function () {
|
||||
const tenSecondInterval = Math.floor(new Date().getSeconds() / 10)
|
||||
const shouldHang = tenSecondInterval % 2 === 0
|
||||
|
||||
// every 10 seconds requests lead to a max_execution-timeout
|
||||
if (shouldHang) {
|
||||
http.get(`${__ENV.CADDY_HOSTNAME}/sleep.php?sleep=50000`)
|
||||
return
|
||||
}
|
||||
|
||||
// every other 10 seconds the resource is back
|
||||
http.get(`${__ENV.CADDY_HOSTNAME}/sleep.php?sleep=5&work=30000&output=100`)
|
||||
}
|
||||
29
testdata/sleep.php
vendored
Normal file
29
testdata/sleep.php
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
<?php
|
||||
|
||||
require_once __DIR__ . '/_executor.php';
|
||||
|
||||
return function () {
|
||||
$sleep = (int)($_GET['sleep'] ?? 0);
|
||||
$work = (int)($_GET['work'] ?? 0);
|
||||
$output = (int)($_GET['output'] ?? 1);
|
||||
$iterations = (int)($_GET['iterations'] ?? 1);
|
||||
|
||||
for ($i = 0; $i < $iterations; $i++) {
|
||||
// simulate work
|
||||
// with 30_000 iterations we're in the range of a simple Laravel request
|
||||
// (without JIT and with debug symbols enabled)
|
||||
for ($j = 0; $j < $work; $j++) {
|
||||
$a = +$j;
|
||||
}
|
||||
|
||||
// simulate IO, sleep x milliseconds
|
||||
if ($sleep > 0) {
|
||||
usleep($sleep * 1000);
|
||||
}
|
||||
|
||||
// simulate output
|
||||
for ($k = 0; $k < $output; $k++) {
|
||||
echo "slept for $sleep ms and worked for $work iterations";
|
||||
}
|
||||
}
|
||||
};
|
||||
Reference in New Issue
Block a user