From 11841b320265bcd66af8756c16b5c1d29e5b2e84 Mon Sep 17 00:00:00 2001 From: Pablo Pozo Date: Fri, 14 Jul 2023 13:55:40 +0300 Subject: [PATCH 01/24] Backport engineer-man#519 parallel requests fix --- api/src/api/v2.js | 29 ++++--- api/src/job.js | 195 +++++++++++++++++++++++++--------------------- 2 files changed, 119 insertions(+), 105 deletions(-) diff --git a/api/src/api/v2.js b/api/src/api/v2.js index 38b7c859..3dfa3ee4 100644 --- a/api/src/api/v2.js +++ b/api/src/api/v2.js @@ -174,9 +174,9 @@ router.use((req, res, next) => { router.ws('/connect', async (ws, req) => { let job = null; - let eventBus = new events.EventEmitter(); + let event_bus = new events.EventEmitter(); - eventBus.on('stdout', data => + event_bus.on('stdout', data => ws.send( JSON.stringify({ type: 'data', @@ -185,7 +185,7 @@ router.ws('/connect', async (ws, req) => { }) ) ); - eventBus.on('stderr', data => + event_bus.on('stderr', data => ws.send( JSON.stringify({ type: 'data', @@ -194,10 +194,10 @@ router.ws('/connect', async (ws, req) => { }) ) ); - eventBus.on('stage', stage => + event_bus.on('stage', stage => ws.send(JSON.stringify({ type: 'stage', stage })) ); - eventBus.on('exit', (stage, status) => + event_bus.on('exit', (stage, status) => ws.send(JSON.stringify({ type: 'exit', stage, ...status })) ); @@ -220,7 +220,8 @@ router.ws('/connect', async (ws, req) => { }) ); - await job.execute_interactive(eventBus); + await job.execute(event_bus); + await job.cleanup(); ws.close(4999, 'Job Completed'); } else { @@ -230,7 +231,7 @@ router.ws('/connect', async (ws, req) => { case 'data': if (job !== null) { if (msg.stream === 'stdin') { - eventBus.emit('stdin', msg.data); + event_bus.emit('stdin', msg.data); } else { ws.close(4004, 'Can only write to stdin'); } @@ -241,7 +242,7 @@ router.ws('/connect', async (ws, req) => { case 'signal': if (job !== null) { if (SIGNALS.includes(msg.signal)) { - eventBus.emit('signal', msg.signal); + event_bus.emit('signal', msg.signal); } else { ws.close(4005, 'Invalid signal'); } @@ -257,12 +258,6 @@ router.ws('/connect', async (ws, req) => { } }); - ws.on('close', async () => { - if (job !== null) { - await job.cleanup(); - } - }); - setTimeout(() => { //Terminate the socket after 1 second, if not initialized. if (job === null) ws.close(4001, 'Initialization Timeout'); @@ -275,7 +270,11 @@ router.post('/execute', async (req, res) => { await job.prime(); - const result = await job.execute(); + let result = await job.execute(); + // Backward compatibility when the run stage is not started + if (result.run === undefined) { + result.run = result.compile; + } await job.cleanup(); diff --git a/api/src/job.js b/api/src/job.js index ecc19e51..f4a8c055 100644 --- a/api/src/job.js +++ b/api/src/job.js @@ -19,16 +19,12 @@ let uid = 0; let gid = 0; let remaining_job_spaces = config.max_concurrent_jobs; -let jobQueue = []; - -setInterval(() => { - // Every 10ms try resolve a new job, if there is an available slot - if (jobQueue.length > 0 && remaining_job_spaces > 0) { - jobQueue.shift()(); - } -}, 10); +let job_queue = []; class Job { + #active_timeouts; + #active_parent_processes; + constructor({ runtime, files, args, stdin, timeouts, memory_limits }) { this.uuid = uuidv4(); @@ -45,6 +41,13 @@ class Job { this.args = args; this.stdin = stdin; + // Add a trailing newline if it doesn't exist + if (this.stdin.slice(-1) !== '\n') { + this.stdin += '\n'; + } + + this.#active_timeouts = []; + this.#active_parent_processes = []; this.timeouts = timeouts; this.memory_limits = memory_limits; @@ -72,10 +75,9 @@ class Job { if (remaining_job_spaces < 1) { this.logger.info(`Awaiting job slot`); await new Promise(resolve => { - jobQueue.push(resolve); + job_queue.push(resolve); }); } - this.logger.info(`Priming job`); remaining_job_spaces--; this.logger.debug('Writing files to job cache'); @@ -110,7 +112,31 @@ class Job { this.logger.debug('Primed job'); } - async safe_call(file, args, timeout, memory_limit, eventBus = null) { + exit_cleanup() { + for (const timeout of this.#active_timeouts) { + clear_timeout(timeout); + } + this.#active_timeouts = []; + this.logger.debug('Cleared the active timeouts'); + + this.cleanup_processes(); + this.logger.debug(`Finished exit cleanup`); + } + + close_cleanup() { + for (const proc of this.#active_parent_processes) { + proc.stderr.destroy(); + if (!proc.stdin.destroyed) { + proc.stdin.end(); + proc.stdin.destroy(); + } + proc.stdout.destroy(); + } + this.#active_parent_processes = []; + this.logger.debug('Destroyed processes writables'); + } + + async safe_call(file, args, timeout, memory_limit, event_bus = null) { return new Promise((resolve, reject) => { const nonetwork = config.disable_networking ? ['nosocket'] : []; @@ -122,7 +148,10 @@ class Job { ]; const timeout_call = [ - 'timeout', '-s', '9', Math.ceil(timeout / 1000), + 'timeout', + '-s', + '9', + Math.ceil(timeout / 1000), ]; if (memory_limit >= 0) { @@ -155,16 +184,18 @@ class Job { detached: true, //give this process its own process group }); - if (eventBus === null) { + this.#active_parent_processes.push(proc); + + if (event_bus === null) { proc.stdin.write(this.stdin); proc.stdin.end(); proc.stdin.destroy(); } else { - eventBus.on('stdin', data => { + event_bus.on('stdin', data => { proc.stdin.write(data); }); - eventBus.on('kill', signal => { + event_bus.on('kill', signal => { proc.kill(signal); }); } @@ -176,10 +207,11 @@ class Job { process.kill(proc.pid, 'SIGKILL'); }, timeout)) || null; + this.#active_timeouts.push(kill_timeout); proc.stderr.on('data', async data => { - if (eventBus !== null) { - eventBus.emit('stderr', data); + if (event_bus !== null) { + event_bus.emit('stderr', data); } else if (stderr.length > this.runtime.output_max_size) { this.logger.info(`stderr length exceeded`); process.kill(proc.pid, 'SIGKILL'); @@ -190,8 +222,8 @@ class Job { }); proc.stdout.on('data', async data => { - if (eventBus !== null) { - eventBus.emit('stdout', data); + if (event_bus !== null) { + event_bus.emit('stdout', data); } else if (stdout.length > this.runtime.output_max_size) { this.logger.info(`stdout length exceeded`); process.kill(proc.pid, 'SIGKILL'); @@ -201,31 +233,24 @@ class Job { } }); - const exit_cleanup = () => { - clear_timeout(kill_timeout); - - proc.stderr.destroy(); - proc.stdout.destroy(); + proc.on('exit', () => this.exit_cleanup()); - this.cleanup_processes(); - this.logger.debug(`Finished exit cleanup`); - }; - - proc.on('exit', (code, signal) => { - exit_cleanup(); + proc.on('close', (code, signal) => { + this.close_cleanup(); resolve({ stdout, stderr, code, signal, output }); }); proc.on('error', err => { - exit_cleanup(); + this.exit_cleanup(); + this.close_cleanup(); reject({ error: err, stdout, stderr, output }); }); }); } - async execute() { + async execute(event_bus = null) { if (this.state !== job_states.PRIMED) { throw new Error( 'Job must be in primed state, current state: ' + @@ -242,24 +267,54 @@ class Job { this.logger.debug('Compiling'); let compile; + let compile_errored = false; + const { emit_event_bus_result, emit_event_bus_stage } = + event_bus === null + ? { + emit_event_bus_result: () => {}, + emit_event_bus_stage: () => {}, + } + : { + emit_event_bus_result: (stage, result, event_bus) => { + const { error, code, signal } = result; + event_bus.emit('exit', stage, { + error, + code, + signal, + }); + }, + emit_event_bus_stage: (stage, event_bus) => { + event_bus.emit('stage', stage); + }, + }; if (this.runtime.compiled) { + this.logger.debug('Compiling'); + emit_event_bus_stage('compile', event_bus); compile = await this.safe_call( path.join(this.runtime.pkgdir, 'compile'), code_files.map(x => x.name), this.timeouts.compile, - this.memory_limits.compile + this.memory_limits.compile, + event_bus ); + emit_event_bus_result('compile', compile, event_bus); + compile_errored = compile.code !== 0; } - this.logger.debug('Running'); - - const run = await this.safe_call( - path.join(this.runtime.pkgdir, 'run'), - [code_files[0].name, ...this.args], - this.timeouts.run, - this.memory_limits.run - ); + let run; + if (!compile_errored) { + this.logger.debug('Running'); + emit_event_bus_stage('run', event_bus); + run = await this.safe_call( + path.join(this.runtime.pkgdir, 'run'), + [code_files[0].name, ...this.args], + this.timeouts.run, + this.memory_limits.run, + event_bus + ); + emit_event_bus_result('run', run, event_bus); + } this.state = job_states.EXECUTED; @@ -271,50 +326,6 @@ class Job { }; } - async execute_interactive(eventBus) { - if (this.state !== job_states.PRIMED) { - throw new Error( - 'Job must be in primed state, current state: ' + - this.state.toString() - ); - } - - this.logger.info( - `Interactively executing job runtime=${this.runtime.toString()}` - ); - - const code_files = - (this.runtime.language === 'file' && this.files) || - this.files.filter(file => file.encoding == 'utf8'); - - if (this.runtime.compiled) { - eventBus.emit('stage', 'compile'); - const { error, code, signal } = await this.safe_call( - path.join(this.runtime.pkgdir, 'compile'), - code_files.map(x => x.name), - this.timeouts.compile, - this.memory_limits.compile, - eventBus - ); - - eventBus.emit('exit', 'compile', { error, code, signal }); - } - - this.logger.debug('Running'); - eventBus.emit('stage', 'run'); - const { error, code, signal } = await this.safe_call( - path.join(this.runtime.pkgdir, 'run'), - [code_files[0].name, ...this.args], - this.timeouts.run, - this.memory_limits.run, - eventBus - ); - - eventBus.emit('exit', 'run', { error, code, signal }); - - this.state = job_states.EXECUTED; - } - cleanup_processes(dont_wait = []) { let processes = [1]; const to_wait = []; @@ -345,11 +356,11 @@ class Job { const proc_id_int = parse_int(proc_id); // Skip over any processes that aren't ours. - if(ruid != this.uid && euid != this.uid) return -1; + if (ruid != this.uid && euid != this.uid) return -1; - if (state == 'Z'){ + if (state == 'Z') { // Zombie process, just needs to be waited, regardless of the user id - if(!to_wait.includes(proc_id_int)) + if (!to_wait.includes(proc_id_int)) to_wait.push(proc_id_int); return -1; @@ -386,7 +397,7 @@ class Job { // Then clear them out of the process tree try { process.kill(proc, 'SIGKILL'); - } catch(e) { + } catch { // Could already be dead and just needs to be waited on this.logger.debug( `Got error while SIGKILLing process ${proc}:`, @@ -440,10 +451,14 @@ class Job { async cleanup() { this.logger.info(`Cleaning up job`); - this.cleanup_processes(); // Run process janitor, just incase there are any residual processes somehow + this.exit_cleanup(); // Run process janitor, just incase there are any residual processes somehow + this.close_cleanup(); await this.cleanup_filesystem(); remaining_job_spaces++; + if (job_queue.length > 0) { + job_queue.shift()(); + } } } From ce852aa20d16e7f66ff6ba7f4de38f8768486ace Mon Sep 17 00:00:00 2001 From: Cory Date: Sat, 5 Aug 2023 21:53:50 +1000 Subject: [PATCH 02/24] add some additional py packages --- packages/python/3.10.0/build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/python/3.10.0/build.sh b/packages/python/3.10.0/build.sh index 00c839a0..62fef924 100755 --- a/packages/python/3.10.0/build.sh +++ b/packages/python/3.10.0/build.sh @@ -18,4 +18,4 @@ cd .. rm -rf build -bin/pip3 install numpy scipy pandas pycrypto whoosh bcrypt passlib sympy +bin/pip3 install numpy scipy pandas pycrypto whoosh bcrypt passlib sympy xxhash base58 cryptography PyNaCl From f70ecdd8b4f93497bbd8c0d6397326e256db7ad2 Mon Sep 17 00:00:00 2001 From: devnote-dev Date: Sun, 3 Sep 2023 20:29:22 +0100 Subject: [PATCH 03/24] feat(packages): replace old crystal version --- packages/crystal/{0.36.1 => 1.9.2}/build.sh | 2 +- packages/crystal/{0.36.1 => 1.9.2}/compile | 0 packages/crystal/{0.36.1 => 1.9.2}/environment | 0 packages/crystal/{0.36.1 => 1.9.2}/metadata.json | 2 +- packages/crystal/{0.36.1 => 1.9.2}/run | 0 packages/crystal/{0.36.1 => 1.9.2}/test.cr | 0 6 files changed, 2 insertions(+), 2 deletions(-) rename packages/crystal/{0.36.1 => 1.9.2}/build.sh (72%) mode change 100755 => 100644 rename packages/crystal/{0.36.1 => 1.9.2}/compile (100%) rename packages/crystal/{0.36.1 => 1.9.2}/environment (100%) rename packages/crystal/{0.36.1 => 1.9.2}/metadata.json (71%) rename packages/crystal/{0.36.1 => 1.9.2}/run (100%) rename packages/crystal/{0.36.1 => 1.9.2}/test.cr (100%) diff --git a/packages/crystal/0.36.1/build.sh b/packages/crystal/1.9.2/build.sh old mode 100755 new mode 100644 similarity index 72% rename from packages/crystal/0.36.1/build.sh rename to packages/crystal/1.9.2/build.sh index ba10f3f3..e95779ce --- a/packages/crystal/0.36.1/build.sh +++ b/packages/crystal/1.9.2/build.sh @@ -2,6 +2,6 @@ PREFIX=$(realpath $(dirname $0)) -curl -L "https://github.com/crystal-lang/crystal/releases/download/0.36.1/crystal-0.36.1-1-linux-x86_64.tar.gz" -o crystal.tar.gz +curl -L "https://github.com/crystal-lang/crystal/releases/download/1.9.2/crystal-1.9.2-1-linux-x86_64.tar.gz" -o crystal.tar.gz tar xzf crystal.tar.gz --strip-components=1 rm crystal.tar.gz diff --git a/packages/crystal/0.36.1/compile b/packages/crystal/1.9.2/compile similarity index 100% rename from packages/crystal/0.36.1/compile rename to packages/crystal/1.9.2/compile diff --git a/packages/crystal/0.36.1/environment b/packages/crystal/1.9.2/environment similarity index 100% rename from packages/crystal/0.36.1/environment rename to packages/crystal/1.9.2/environment diff --git a/packages/crystal/0.36.1/metadata.json b/packages/crystal/1.9.2/metadata.json similarity index 71% rename from packages/crystal/0.36.1/metadata.json rename to packages/crystal/1.9.2/metadata.json index ee995eba..09a23ad5 100644 --- a/packages/crystal/0.36.1/metadata.json +++ b/packages/crystal/1.9.2/metadata.json @@ -1,5 +1,5 @@ { "language": "crystal", - "version": "0.36.1", + "version": "1.9.2", "aliases": ["crystal", "cr"] } diff --git a/packages/crystal/0.36.1/run b/packages/crystal/1.9.2/run similarity index 100% rename from packages/crystal/0.36.1/run rename to packages/crystal/1.9.2/run diff --git a/packages/crystal/0.36.1/test.cr b/packages/crystal/1.9.2/test.cr similarity index 100% rename from packages/crystal/0.36.1/test.cr rename to packages/crystal/1.9.2/test.cr From fe2fc374aa0b4f1a6631047651511af98445da13 Mon Sep 17 00:00:00 2001 From: Omar Brikaa Date: Fri, 15 Sep 2023 16:48:35 +0300 Subject: [PATCH 04/24] Improve normal execution error handling - Properly differentiate between bad requests and internal server errors - Avoid clean up evasion by putting the cleanup in the finally block --- api/src/api/v2.js | 20 +++++++++++++++----- packages/bash/5.2.0/build.sh | 0 2 files changed, 15 insertions(+), 5 deletions(-) mode change 100644 => 100755 packages/bash/5.2.0/build.sh diff --git a/api/src/api/v2.js b/api/src/api/v2.js index 3dfa3ee4..ad074948 100644 --- a/api/src/api/v2.js +++ b/api/src/api/v2.js @@ -265,9 +265,13 @@ router.ws('/connect', async (ws, req) => { }); router.post('/execute', async (req, res) => { + let job; + try { + job = await get_job(req.body); + } catch (error) { + return res.status(400).json(error); + } try { - const job = await get_job(req.body); - await job.prime(); let result = await job.execute(); @@ -276,11 +280,17 @@ router.post('/execute', async (req, res) => { result.run = result.compile; } - await job.cleanup(); - return res.status(200).send(result); } catch (error) { - return res.status(400).json(error); + logger.error(`Error executing job: ${job.uuid}:\n${error}`); + return res.status(500).send(); + } finally { + try { + await job.cleanup(); + } catch (error) { + logger.error(`Error cleaning up job: ${job.uuid}:\n${error}`); + return res.status(500).send(); + } } }); diff --git a/packages/bash/5.2.0/build.sh b/packages/bash/5.2.0/build.sh old mode 100644 new mode 100755 From 040e19fdc2e03e241519e81d5bd7b4c866cf8b6b Mon Sep 17 00:00:00 2001 From: Omar Brikaa Date: Fri, 15 Sep 2023 20:39:15 +0300 Subject: [PATCH 05/24] Interactive execution: run job cleanup regardless of errors --- api/src/api/v2.js | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/api/src/api/v2.js b/api/src/api/v2.js index ad074948..1b015b56 100644 --- a/api/src/api/v2.js +++ b/api/src/api/v2.js @@ -210,19 +210,26 @@ router.ws('/connect', async (ws, req) => { if (job === null) { job = await get_job(msg); - await job.prime(); - - ws.send( - JSON.stringify({ - type: 'runtime', - language: job.runtime.language, - version: job.runtime.version.raw, - }) - ); - - await job.execute(event_bus); - await job.cleanup(); - + try { + await job.prime(); + + ws.send( + JSON.stringify({ + type: 'runtime', + language: job.runtime.language, + version: job.runtime.version.raw, + }) + ); + + await job.execute(event_bus); + } catch (error) { + logger.error( + `Error cleaning up job: ${job.uuid}:\n${error}` + ); + throw error; + } finally { + await job.cleanup(); + } ws.close(4999, 'Job Completed'); } else { ws.close(4000, 'Already Initialized'); From 6a47869578b89212c0e7f2a37d395e804c924d81 Mon Sep 17 00:00:00 2001 From: Omar Brikaa Date: Sat, 16 Sep 2023 21:37:09 +0300 Subject: [PATCH 06/24] Comments explaining the try-catch flow --- api/src/api/v2.js | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/api/src/api/v2.js b/api/src/api/v2.js index 1b015b56..032fd51d 100644 --- a/api/src/api/v2.js +++ b/api/src/api/v2.js @@ -230,7 +230,7 @@ router.ws('/connect', async (ws, req) => { } finally { await job.cleanup(); } - ws.close(4999, 'Job Completed'); + ws.close(4999, 'Job Completed'); // Will not execute if an error is thrown above } else { ws.close(4000, 'Already Initialized'); } @@ -293,10 +293,10 @@ router.post('/execute', async (req, res) => { return res.status(500).send(); } finally { try { - await job.cleanup(); + await job.cleanup(); // This gets executed before the returns in try/catch } catch (error) { logger.error(`Error cleaning up job: ${job.uuid}:\n${error}`); - return res.status(500).send(); + return res.status(500).send(); // On error, this replaces the return in the outer try-catch } } }); From fef00b96f169c1b1508d871ae901359d2ced82d2 Mon Sep 17 00:00:00 2001 From: Omar Brikaa Date: Tue, 3 Oct 2023 13:59:23 +0300 Subject: [PATCH 07/24] Improve containers stopping performance by handling SIGTERM --- api/src/index.js | 12 ++++++++++-- repo/Dockerfile | 2 +- repo/entrypoint.sh | 6 +++--- repo/serve.py | 18 ++++++++++++++++++ 4 files changed, 32 insertions(+), 6 deletions(-) create mode 100644 repo/serve.py diff --git a/api/src/index.js b/api/src/index.js index 8a21e570..6ad93907 100644 --- a/api/src/index.js +++ b/api/src/index.js @@ -35,7 +35,10 @@ expressWs(app); } } }); - fss.chmodSync(path.join(config.data_directory, globals.data_directories.jobs), 0o711) + fss.chmodSync( + path.join(config.data_directory, globals.data_directories.jobs), + 0o711 + ); logger.info('Loading packages'); const pkgdir = path.join( @@ -92,7 +95,12 @@ expressWs(app); logger.debug('Calling app.listen'); const [address, port] = config.bind_address.split(':'); - app.listen(port, address, () => { + const server = app.listen(port, address, () => { logger.info('API server started on', config.bind_address); }); + + process.on('SIGTERM', () => { + server.close(); + process.exit(0) + }); })(); diff --git a/repo/Dockerfile b/repo/Dockerfile index fe61a6f7..86be49a7 100644 --- a/repo/Dockerfile +++ b/repo/Dockerfile @@ -14,7 +14,7 @@ RUN apt-get update && apt-get install -y unzip autoconf build-essential libssl-d rm -rf /var/lib/apt/lists/* && \ update-alternatives --install /usr/bin/python python /usr/bin/python3.7 2 -ADD entrypoint.sh mkindex.sh / +ADD entrypoint.sh mkindex.sh serve.py / ENTRYPOINT ["bash","/entrypoint.sh"] CMD ["--no-build"] diff --git a/repo/entrypoint.sh b/repo/entrypoint.sh index 6c47e37c..15651439 100755 --- a/repo/entrypoint.sh +++ b/repo/entrypoint.sh @@ -27,7 +27,7 @@ do echo "Done with package $pkg" elif [[ $CI -eq 1 ]]; then echo "Commit SHA: $pkg" - + cd .. echo "Changed files:" git diff --name-only $pkg^1 $pkg @@ -52,8 +52,8 @@ echo "Index created" if [[ $SERVER -eq 1 ]]; then echo "Starting index server.." - python3 -m http.server + exec python3 /serve.py else echo "Skipping starting index server" fi -exit 0 \ No newline at end of file +exit 0 diff --git a/repo/serve.py b/repo/serve.py new file mode 100644 index 00000000..a821219c --- /dev/null +++ b/repo/serve.py @@ -0,0 +1,18 @@ +import signal +import sys +import http.server +import socketserver + +PORT = 8000 + +Handler = http.server.SimpleHTTPRequestHandler + + +def signal_handler(sig, frame): + sys.exit(0) + +signal.signal(signal.SIGTERM, signal_handler) + +with socketserver.TCPServer(("", PORT), Handler) as httpd: + print("serving at port", PORT) + httpd.serve_forever() From 016a8c086f7c9cc9d785f8ccc2ffb6975cb4a4da Mon Sep 17 00:00:00 2001 From: Omar Brikaa Date: Tue, 3 Oct 2023 15:21:48 +0300 Subject: [PATCH 08/24] exec comment --- repo/entrypoint.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/repo/entrypoint.sh b/repo/entrypoint.sh index 15651439..c167463f 100755 --- a/repo/entrypoint.sh +++ b/repo/entrypoint.sh @@ -52,6 +52,7 @@ echo "Index created" if [[ $SERVER -eq 1 ]]; then echo "Starting index server.." + # We want the child process to replace the shell to handle signals exec python3 /serve.py else echo "Skipping starting index server" From 18743a33690a3849d2b12263e62027950b86bf0c Mon Sep 17 00:00:00 2001 From: Aetheridon Date: Wed, 18 Oct 2023 19:43:32 +0100 Subject: [PATCH 09/24] Added files for Python 3.11.0 --- packages/python/3.11.0/build.sh | 21 +++++++++++++++++++++ packages/python/3.11.0/environment | 1 + packages/python/3.11.0/metadata.json | 5 +++++ packages/python/3.11.0/run | 3 +++ packages/python/3.11.0/test.py | 7 +++++++ 5 files changed, 37 insertions(+) create mode 100644 packages/python/3.11.0/build.sh create mode 100644 packages/python/3.11.0/environment create mode 100644 packages/python/3.11.0/metadata.json create mode 100644 packages/python/3.11.0/run create mode 100644 packages/python/3.11.0/test.py diff --git a/packages/python/3.11.0/build.sh b/packages/python/3.11.0/build.sh new file mode 100644 index 00000000..8db680c2 --- /dev/null +++ b/packages/python/3.11.0/build.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +PREFIX=$(realpath $(dirname $0)) + +mkdir -p build + +cd build + +curl "https://www.python.org/ftp/python/3.11.0/Python-3.11.0.tgz" -o python.tar.gz +tar xzf python.tar.gz --strip-components=1 +rm python.tar.gz + +./configure --prefix "$PREFIX" --with-ensurepip=install +make -j$(nproc) +make install -j$(nproc) + +cd .. + +rm -rf build + +bin/pip3 install numpy scipy pandas pycryptodome whoosh bcrypt passlib sympy xxhash base58 cryptography PyNaCl diff --git a/packages/python/3.11.0/environment b/packages/python/3.11.0/environment new file mode 100644 index 00000000..977a5e8f --- /dev/null +++ b/packages/python/3.11.0/environment @@ -0,0 +1 @@ +export PATH=$PWD/bin:$PATH diff --git a/packages/python/3.11.0/metadata.json b/packages/python/3.11.0/metadata.json new file mode 100644 index 00000000..c169a604 --- /dev/null +++ b/packages/python/3.11.0/metadata.json @@ -0,0 +1,5 @@ +{ + "language": "python", + "version": "3.11.0", + "aliases": ["py", "py3", "python3", "python3.11"] +} diff --git a/packages/python/3.11.0/run b/packages/python/3.11.0/run new file mode 100644 index 00000000..cf380892 --- /dev/null +++ b/packages/python/3.11.0/run @@ -0,0 +1,3 @@ +#!/bin/bash + +python3.11 "$@" diff --git a/packages/python/3.11.0/test.py b/packages/python/3.11.0/test.py new file mode 100644 index 00000000..1784bfa8 --- /dev/null +++ b/packages/python/3.11.0/test.py @@ -0,0 +1,7 @@ +working = True + +match working: + case True: + print("OK") + case False: + print() From dc4bb294b613e9c4e5e92ebb8fc855f88083f4b7 Mon Sep 17 00:00:00 2001 From: Shubham Sahai Date: Thu, 26 Oct 2023 02:42:28 +0800 Subject: [PATCH 10/24] bugfix: catch error - "e is not defined" --- api/src/job.js | 44 ++++++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/api/src/job.js b/api/src/job.js index f4a8c055..00ee14fd 100644 --- a/api/src/job.js +++ b/api/src/job.js @@ -146,7 +146,7 @@ class Job { '--nofile=' + this.runtime.max_open_files, '--fsize=' + this.runtime.max_file_size, ]; - + const timeout_call = [ 'timeout', '-s', @@ -158,7 +158,7 @@ class Job { prlimit.push('--as=' + memory_limit); } - const proc_call = [ + const proc_call = [ 'nice', ...timeout_call, ...prlimit, @@ -254,7 +254,7 @@ class Job { if (this.state !== job_states.PRIMED) { throw new Error( 'Job must be in primed state, current state: ' + - this.state.toString() + this.state.toString() ); } @@ -271,22 +271,22 @@ class Job { const { emit_event_bus_result, emit_event_bus_stage } = event_bus === null ? { - emit_event_bus_result: () => {}, - emit_event_bus_stage: () => {}, - } + emit_event_bus_result: () => { }, + emit_event_bus_stage: () => { }, + } : { - emit_event_bus_result: (stage, result, event_bus) => { - const { error, code, signal } = result; - event_bus.emit('exit', stage, { - error, - code, - signal, - }); - }, - emit_event_bus_stage: (stage, event_bus) => { - event_bus.emit('stage', stage); - }, - }; + emit_event_bus_result: (stage, result, event_bus) => { + const { error, code, signal } = result; + event_bus.emit('exit', stage, { + error, + code, + signal, + }); + }, + emit_event_bus_stage: (stage, event_bus) => { + event_bus.emit('stage', stage); + }, + }; if (this.runtime.compiled) { this.logger.debug('Compiling'); @@ -352,9 +352,9 @@ class Job { const [_, ruid, euid, suid, fuid] = uid_line.split(/\s+/); const [_1, state, user_friendly] = state_line.split(/\s+/); - + const proc_id_int = parse_int(proc_id); - + // Skip over any processes that aren't ours. if (ruid != this.uid && euid != this.uid) return -1; @@ -362,7 +362,7 @@ class Job { // Zombie process, just needs to be waited, regardless of the user id if (!to_wait.includes(proc_id_int)) to_wait.push(proc_id_int); - + return -1; } // We should kill in all other state (Sleep, Stopped & Running) @@ -397,7 +397,7 @@ class Job { // Then clear them out of the process tree try { process.kill(proc, 'SIGKILL'); - } catch { + } catch (e) { // Could already be dead and just needs to be waited on this.logger.debug( `Got error while SIGKILLing process ${proc}:`, From d8af1ee301abf1eff0a008f7625600b6b743b5b8 Mon Sep 17 00:00:00 2001 From: Shubham Sahai Date: Mon, 30 Oct 2023 20:09:01 +0800 Subject: [PATCH 11/24] Try-Catch process kills to handle dead processes --- api/src/job.js | 33 ++++++++++++++++++++++++++++++--- 1 file changed, 30 insertions(+), 3 deletions(-) diff --git a/api/src/job.js b/api/src/job.js index 00ee14fd..46efe27f 100644 --- a/api/src/job.js +++ b/api/src/job.js @@ -204,7 +204,16 @@ class Job { (timeout >= 0 && set_timeout(async _ => { this.logger.info(`Timeout exceeded timeout=${timeout}`); - process.kill(proc.pid, 'SIGKILL'); + try { + process.kill(proc.pid, 'SIGKILL'); + } + catch (e) { + // Could already be dead and just needs to be waited on + this.logger.debug( + `Got error while SIGKILLing process ${proc}:`, + e + ); + } }, timeout)) || null; this.#active_timeouts.push(kill_timeout); @@ -214,7 +223,16 @@ class Job { event_bus.emit('stderr', data); } else if (stderr.length > this.runtime.output_max_size) { this.logger.info(`stderr length exceeded`); - process.kill(proc.pid, 'SIGKILL'); + try { + process.kill(proc.pid, 'SIGKILL'); + } + catch (e) { + // Could already be dead and just needs to be waited on + this.logger.debug( + `Got error while SIGKILLing process ${proc}:`, + e + ); + } } else { stderr += data; output += data; @@ -226,7 +244,16 @@ class Job { event_bus.emit('stdout', data); } else if (stdout.length > this.runtime.output_max_size) { this.logger.info(`stdout length exceeded`); - process.kill(proc.pid, 'SIGKILL'); + try { + process.kill(proc.pid, 'SIGKILL'); + } + catch (e) { + // Could already be dead and just needs to be waited on + this.logger.debug( + `Got error while SIGKILLing process ${proc}:`, + e + ); + } } else { stdout += data; output += data; From c97324beb34570b99e95aef40cf2a68d62acf41a Mon Sep 17 00:00:00 2001 From: Aetheridon Date: Wed, 1 Nov 2023 21:29:36 +0000 Subject: [PATCH 12/24] Python 3.12.0 support --- packages/python/3.12.0/build.sh | 21 +++++++++++++++++++++ packages/python/3.12.0/environment | 1 + packages/python/3.12.0/metadata.json | 5 +++++ packages/python/3.12.0/run | 3 +++ packages/python/3.12.0/test.py | 7 +++++++ 5 files changed, 37 insertions(+) create mode 100644 packages/python/3.12.0/build.sh create mode 100644 packages/python/3.12.0/environment create mode 100644 packages/python/3.12.0/metadata.json create mode 100644 packages/python/3.12.0/run create mode 100644 packages/python/3.12.0/test.py diff --git a/packages/python/3.12.0/build.sh b/packages/python/3.12.0/build.sh new file mode 100644 index 00000000..4de24a79 --- /dev/null +++ b/packages/python/3.12.0/build.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +PREFIX=$(realpath $(dirname $0)) + +mkdir -p build + +cd build + +curl "https://www.python.org/ftp/python/3.12.0/Python-3.12.0.tgz" -o python.tar.gz +tar xzf python.tar.gz --strip-components=1 +rm python.tar.gz + +./configure --prefix "$PREFIX" --with-ensurepip=install +make -j$(nproc) +make install -j$(nproc) + +cd .. + +rm -rf build + +bin/pip3 install numpy scipy pandas pycryptodome whoosh bcrypt passlib sympy xxhash base58 cryptography PyNaCl diff --git a/packages/python/3.12.0/environment b/packages/python/3.12.0/environment new file mode 100644 index 00000000..977a5e8f --- /dev/null +++ b/packages/python/3.12.0/environment @@ -0,0 +1 @@ +export PATH=$PWD/bin:$PATH diff --git a/packages/python/3.12.0/metadata.json b/packages/python/3.12.0/metadata.json new file mode 100644 index 00000000..59d257ac --- /dev/null +++ b/packages/python/3.12.0/metadata.json @@ -0,0 +1,5 @@ +{ + "language": "python", + "version": "3.12.0", + "aliases": ["py", "py3", "python3", "python3.12"] +} diff --git a/packages/python/3.12.0/run b/packages/python/3.12.0/run new file mode 100644 index 00000000..6f779907 --- /dev/null +++ b/packages/python/3.12.0/run @@ -0,0 +1,3 @@ +#!/bin/bash + +python3.12 "$@" diff --git a/packages/python/3.12.0/test.py b/packages/python/3.12.0/test.py new file mode 100644 index 00000000..1784bfa8 --- /dev/null +++ b/packages/python/3.12.0/test.py @@ -0,0 +1,7 @@ +working = True + +match working: + case True: + print("OK") + case False: + print() From 647bc3a7c735a02ecd97d2b849340dc022dcee61 Mon Sep 17 00:00:00 2001 From: Ahmed Wael Date: Fri, 26 Jan 2024 08:41:28 +0200 Subject: [PATCH 13/24] handle stdout and stderr limits properly (#643) * handle stdout and stderr limits proberly Co-authored-by: Omar Brikaa * added environment to docker compose --------- Co-authored-by: Omar Brikaa --- api/src/job.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/src/job.js b/api/src/job.js index 46efe27f..a2641f93 100644 --- a/api/src/job.js +++ b/api/src/job.js @@ -221,7 +221,7 @@ class Job { proc.stderr.on('data', async data => { if (event_bus !== null) { event_bus.emit('stderr', data); - } else if (stderr.length > this.runtime.output_max_size) { + } else if ((stderr.length + data.length) > this.runtime.output_max_size) { this.logger.info(`stderr length exceeded`); try { process.kill(proc.pid, 'SIGKILL'); @@ -242,7 +242,7 @@ class Job { proc.stdout.on('data', async data => { if (event_bus !== null) { event_bus.emit('stdout', data); - } else if (stdout.length > this.runtime.output_max_size) { + } else if ((stdout.length + data.length) > this.runtime.output_max_size) { this.logger.info(`stdout length exceeded`); try { process.kill(proc.pid, 'SIGKILL'); From 684b47d2a2e3c7aa9c1c3af0e5482888ba5bee76 Mon Sep 17 00:00:00 2001 From: Kodie Date: Thu, 18 Apr 2024 23:38:20 +1200 Subject: [PATCH 14/24] pkg(node-20.11.0) Added Node 20.11.0 (#646) --- packages/node/20.11.1/build.sh | 4 ++++ packages/node/20.11.1/environment | 1 + packages/node/20.11.1/metadata.json | 10 ++++++++++ packages/node/20.11.1/run | 3 +++ packages/node/20.11.1/test.js | 1 + 5 files changed, 19 insertions(+) create mode 100644 packages/node/20.11.1/build.sh create mode 100644 packages/node/20.11.1/environment create mode 100644 packages/node/20.11.1/metadata.json create mode 100644 packages/node/20.11.1/run create mode 100644 packages/node/20.11.1/test.js diff --git a/packages/node/20.11.1/build.sh b/packages/node/20.11.1/build.sh new file mode 100644 index 00000000..4c7a3af8 --- /dev/null +++ b/packages/node/20.11.1/build.sh @@ -0,0 +1,4 @@ +#!/bin/bash +curl "https://nodejs.org/dist/v20.11.1/node-v20.11.1-linux-x64.tar.xz" -o node.tar.xz +tar xf node.tar.xz --strip-components=1 +rm node.tar.xz \ No newline at end of file diff --git a/packages/node/20.11.1/environment b/packages/node/20.11.1/environment new file mode 100644 index 00000000..bd0ff98f --- /dev/null +++ b/packages/node/20.11.1/environment @@ -0,0 +1 @@ +export PATH=$PWD/bin:$PATH \ No newline at end of file diff --git a/packages/node/20.11.1/metadata.json b/packages/node/20.11.1/metadata.json new file mode 100644 index 00000000..d401ad50 --- /dev/null +++ b/packages/node/20.11.1/metadata.json @@ -0,0 +1,10 @@ +{ + "language": "node", + "version": "20.11.1", + "provides": [ + { + "language": "javascript", + "aliases": ["node-javascript", "node-js", "javascript", "js"] + } + ] +} \ No newline at end of file diff --git a/packages/node/20.11.1/run b/packages/node/20.11.1/run new file mode 100644 index 00000000..6d1fdee5 --- /dev/null +++ b/packages/node/20.11.1/run @@ -0,0 +1,3 @@ +#!/bin/bash + +node "$@" \ No newline at end of file diff --git a/packages/node/20.11.1/test.js b/packages/node/20.11.1/test.js new file mode 100644 index 00000000..c9638433 --- /dev/null +++ b/packages/node/20.11.1/test.js @@ -0,0 +1 @@ +console.log('OK'); \ No newline at end of file From c4cf018be2e73a378de9bfd64f36b7804804f164 Mon Sep 17 00:00:00 2001 From: Damodar Lohani Date: Thu, 18 Apr 2024 17:25:14 +0545 Subject: [PATCH 15/24] add Dart 3.0.1 package (#602) --- packages/dart/3.0.1/build.sh | 11 +++++++++++ packages/dart/3.0.1/environment | 4 ++++ packages/dart/3.0.1/metadata.json | 5 +++++ packages/dart/3.0.1/run | 4 ++++ packages/dart/3.0.1/test.dart | 3 +++ 5 files changed, 27 insertions(+) create mode 100755 packages/dart/3.0.1/build.sh create mode 100644 packages/dart/3.0.1/environment create mode 100644 packages/dart/3.0.1/metadata.json create mode 100644 packages/dart/3.0.1/run create mode 100644 packages/dart/3.0.1/test.dart diff --git a/packages/dart/3.0.1/build.sh b/packages/dart/3.0.1/build.sh new file mode 100755 index 00000000..02e9f7e4 --- /dev/null +++ b/packages/dart/3.0.1/build.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +curl -L "https://storage.googleapis.com/dart-archive/channels/stable/release/3.0.1/sdk/dartsdk-linux-x64-release.zip" -o dart.zip + +unzip dart.zip +rm dart.zip + +cp -r dart-sdk/* . +rm -rf dart-sdk + +chmod -R +rx bin \ No newline at end of file diff --git a/packages/dart/3.0.1/environment b/packages/dart/3.0.1/environment new file mode 100644 index 00000000..c6ab0893 --- /dev/null +++ b/packages/dart/3.0.1/environment @@ -0,0 +1,4 @@ +#!/usr/bin/env bash + +# Put 'export' statements here for environment variables +export PATH=$PWD/bin:$PATH \ No newline at end of file diff --git a/packages/dart/3.0.1/metadata.json b/packages/dart/3.0.1/metadata.json new file mode 100644 index 00000000..afc3ae29 --- /dev/null +++ b/packages/dart/3.0.1/metadata.json @@ -0,0 +1,5 @@ +{ + "language": "dart", + "version": "3.0.1", + "aliases": [] +} \ No newline at end of file diff --git a/packages/dart/3.0.1/run b/packages/dart/3.0.1/run new file mode 100644 index 00000000..062d7c6a --- /dev/null +++ b/packages/dart/3.0.1/run @@ -0,0 +1,4 @@ +#!/usr/bin/env bash + +# Put instructions to run the runtime +dart run "$@" \ No newline at end of file diff --git a/packages/dart/3.0.1/test.dart b/packages/dart/3.0.1/test.dart new file mode 100644 index 00000000..27e87b23 --- /dev/null +++ b/packages/dart/3.0.1/test.dart @@ -0,0 +1,3 @@ +void main() { + print('OK'); +} \ No newline at end of file From 59338eee333606af6b7e5aa459ce63b6a3da2757 Mon Sep 17 00:00:00 2001 From: Brian Seymour Date: Mon, 24 Jun 2024 18:59:19 -0500 Subject: [PATCH 16/24] Update readme.md (#660) --- readme.md | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/readme.md b/readme.md index 7ef99b56..040e74d4 100644 --- a/readme.md +++ b/readme.md @@ -89,11 +89,10 @@ GET https://emkc.org/api/v2/piston/runtimes POST https://emkc.org/api/v2/piston/execute ``` -> Important Note: The Piston API is rate limited to 5 requests per second. If you have a need for more requests than that -> and it's for a good cause, please reach out to me (EngineerMan#0001) on [Discord](https://discord.gg/engineerman) -> so we can discuss potentially getting you an unlimited key. What is and isn't a good cause is up to me, but, in general -> if your project is a) open source, b) helping people at no cost to them, and c) not likely to use tons of resources -> thereby impairing another's ability to enjoy Piston, you'll likely be granted a key. +> Important Note: The Piston API is rate limited to 5 requests per second. Effective May 7, 2024, no additional +> unlimited keys will be granted and existing keys will be revoked on Jan 1, 2025. The public instance is at +> capacity and the public limit is already very generous. For usage beyond 5 requests/second, you should +> consider self hosting.
From bd42fe335707768e6adfbd5a95afa082c80c9378 Mon Sep 17 00:00:00 2001 From: Omar Brikaa Date: Sun, 8 Sep 2024 04:58:40 +0300 Subject: [PATCH 17/24] Improve isolation, execution limits and execution metrics by using Isolate (#683) * Initial: use Isolate for isolation * Continue: use Isolate for isolation * Bug fixes * timeout is wall-time for backward compatibility * Documentation, signal names, reported time in ms * Report memory usage in bytes * Add privileged flags where needed * Remove tmpfs * Remove tmpfs * Fix package installation * Fix path, fix Zig: CRLF -> LF --- .github/workflows/package-pr.yaml | 2 +- api/Dockerfile | 25 +- api/src/api/v2.js | 79 +--- api/src/config.js | 16 +- api/src/docker-entrypoint.sh | 13 + api/src/globals.js | 66 +++- api/src/index.js | 4 - api/src/job.js | 614 ++++++++++++++---------------- api/src/package.js | 6 +- api/src/runtime.js | 15 + builder/build.sh | 8 +- docker-compose.dev.yaml | 5 +- docker-compose.yaml | 2 +- docs/configuration.md | 19 +- packages/zig/0.10.1/compile | 12 +- packages/zig/0.10.1/environment | 8 +- packages/zig/0.10.1/metadata.json | 17 +- packages/zig/0.10.1/run | 8 +- packages/zig/0.10.1/test.zig | 12 +- packages/zig/0.8.0/metadata.json | 3 +- packages/zig/0.9.1/metadata.json | 3 +- readme.md | 27 +- 22 files changed, 509 insertions(+), 455 deletions(-) create mode 100755 api/src/docker-entrypoint.sh diff --git a/.github/workflows/package-pr.yaml b/.github/workflows/package-pr.yaml index 7a550f22..e0c5e121 100644 --- a/.github/workflows/package-pr.yaml +++ b/.github/workflows/package-pr.yaml @@ -92,7 +92,7 @@ jobs: docker run -v $(pwd)'/repo:/piston/repo' -v $(pwd)'/packages:/piston/packages' -d --name repo docker.pkg.github.com/engineer-man/piston/repo-builder --no-build docker pull docker.pkg.github.com/engineer-man/piston/api docker build -t piston-api api - docker run --network container:repo -v $(pwd)'/data:/piston' -e PISTON_LOG_LEVEL=DEBUG -e 'PISTON_REPO_URL=http://localhost:8000/index' -d --name api piston-api + docker run --privileged --network container:repo -v $(pwd)'/data:/piston' -e PISTON_LOG_LEVEL=DEBUG -e 'PISTON_REPO_URL=http://localhost:8000/index' -d --name api piston-api echo Waiting for API to start.. docker run --network container:api appropriate/curl -s --retry 10 --retry-connrefused http://localhost:2000/api/v2/runtimes diff --git a/api/Dockerfile b/api/Dockerfile index ec0d2a8d..51367f01 100644 --- a/api/Dockerfile +++ b/api/Dockerfile @@ -1,20 +1,29 @@ -FROM node:15.10.0-buster-slim +FROM buildpack-deps:bookworm AS isolate +RUN apt-get update && \ + apt-get install -y --no-install-recommends git libcap-dev && \ + rm -rf /var/lib/apt/lists/* && \ + git clone https://github.com/envicutor/isolate.git /tmp/isolate/ && \ + cd /tmp/isolate && \ + git checkout af6db68042c3aa0ded80787fbb78bc0846ea2114 && \ + make -j$(nproc) install && \ + rm -rf /tmp/* + +FROM node:20-bookworm-slim ENV DEBIAN_FRONTEND=noninteractive RUN dpkg-reconfigure -p critical dash -RUN for i in $(seq 1001 1500); do \ - groupadd -g $i runner$i && \ - useradd -M runner$i -g $i -u $i ; \ - done RUN apt-get update && \ apt-get install -y libxml2 gnupg tar coreutils util-linux libc6-dev \ binutils build-essential locales libpcre3-dev libevent-dev libgmp3-dev \ libncurses6 libncurses5 libedit-dev libseccomp-dev rename procps python3 \ libreadline-dev libblas-dev liblapack-dev libpcre3-dev libarpack2-dev \ libfftw3-dev libglpk-dev libqhull-dev libqrupdate-dev libsuitesparse-dev \ - libsundials-dev libpcre2-dev && \ + libsundials-dev libpcre2-dev libcap-dev && \ rm -rf /var/lib/apt/lists/* +RUN useradd -M piston +COPY --from=isolate /usr/local/bin/isolate /usr/local/bin +COPY --from=isolate /usr/local/etc/isolate /usr/local/etc/isolate RUN sed -i '/en_US.UTF-8/s/^# //g' /etc/locale.gen && locale-gen @@ -23,7 +32,5 @@ COPY ["package.json", "package-lock.json", "./"] RUN npm install COPY ./src ./src -RUN make -C ./src/nosocket/ all && make -C ./src/nosocket/ install - -CMD [ "node", "src"] +CMD ["/piston_api/src/docker-entrypoint.sh"] EXPOSE 2000/tcp diff --git a/api/src/api/v2.js b/api/src/api/v2.js index 032fd51d..f8e2b575 100644 --- a/api/src/api/v2.js +++ b/api/src/api/v2.js @@ -6,50 +6,9 @@ const events = require('events'); const runtime = require('../runtime'); const { Job } = require('../job'); const package = require('../package'); +const globals = require('../globals'); const logger = require('logplease').create('api/v2'); -const SIGNALS = [ - 'SIGABRT', - 'SIGALRM', - 'SIGBUS', - 'SIGCHLD', - 'SIGCLD', - 'SIGCONT', - 'SIGEMT', - 'SIGFPE', - 'SIGHUP', - 'SIGILL', - 'SIGINFO', - 'SIGINT', - 'SIGIO', - 'SIGIOT', - 'SIGKILL', - 'SIGLOST', - 'SIGPIPE', - 'SIGPOLL', - 'SIGPROF', - 'SIGPWR', - 'SIGQUIT', - 'SIGSEGV', - 'SIGSTKFLT', - 'SIGSTOP', - 'SIGTSTP', - 'SIGSYS', - 'SIGTERM', - 'SIGTRAP', - 'SIGTTIN', - 'SIGTTOU', - 'SIGUNUSED', - 'SIGURG', - 'SIGUSR1', - 'SIGUSR2', - 'SIGVTALRM', - 'SIGXCPU', - 'SIGXFSZ', - 'SIGWINCH', -]; -// ref: https://man7.org/linux/man-pages/man7/signal.7.html - function get_job(body) { let { language, @@ -61,6 +20,8 @@ function get_job(body) { run_memory_limit, run_timeout, compile_timeout, + run_cpu_time, + compile_cpu_time, } = body; return new Promise((resolve, reject) => { @@ -106,7 +67,7 @@ function get_job(body) { }); } - for (const constraint of ['memory_limit', 'timeout']) { + for (const constraint of ['memory_limit', 'timeout', 'cpu_time']) { for (const type of ['compile', 'run']) { const constraint_name = `${type}_${constraint}`; const constraint_value = body[constraint_name]; @@ -135,23 +96,23 @@ function get_job(body) { } } - compile_timeout = compile_timeout || rt.timeouts.compile; - run_timeout = run_timeout || rt.timeouts.run; - compile_memory_limit = compile_memory_limit || rt.memory_limits.compile; - run_memory_limit = run_memory_limit || rt.memory_limits.run; resolve( new Job({ runtime: rt, - args: args || [], - stdin: stdin || '', + args: args ?? [], + stdin: stdin ?? '', files, timeouts: { - run: run_timeout, - compile: compile_timeout, + run: run_timeout ?? rt.timeouts.run, + compile: compile_timeout ?? rt.timeouts.compile, + }, + cpu_times: { + run: run_cpu_time ?? rt.cpu_times.run, + compile: compile_cpu_time ?? rt.cpu_times.compile, }, memory_limits: { - run: run_memory_limit, - compile: compile_memory_limit, + run: run_memory_limit ?? rt.memory_limits.run, + compile: compile_memory_limit ?? rt.memory_limits.compile, }, }) ); @@ -211,7 +172,7 @@ router.ws('/connect', async (ws, req) => { job = await get_job(msg); try { - await job.prime(); + const box = await job.prime(); ws.send( JSON.stringify({ @@ -221,7 +182,7 @@ router.ws('/connect', async (ws, req) => { }) ); - await job.execute(event_bus); + await job.execute(box, event_bus); } catch (error) { logger.error( `Error cleaning up job: ${job.uuid}:\n${error}` @@ -248,7 +209,9 @@ router.ws('/connect', async (ws, req) => { break; case 'signal': if (job !== null) { - if (SIGNALS.includes(msg.signal)) { + if ( + Object.values(globals.SIGNALS).includes(msg.signal) + ) { event_bus.emit('signal', msg.signal); } else { ws.close(4005, 'Invalid signal'); @@ -279,9 +242,9 @@ router.post('/execute', async (req, res) => { return res.status(400).json(error); } try { - await job.prime(); + const box = await job.prime(); - let result = await job.execute(); + let result = await job.execute(box); // Backward compatibility when the run stage is not started if (result.run === undefined) { result.run = result.compile; diff --git a/api/src/config.js b/api/src/config.js index b8fa97d5..034e3b6a 100644 --- a/api/src/config.js +++ b/api/src/config.js @@ -90,6 +90,18 @@ const options = { parser: parse_int, validators: [(x, raw) => !is_nan(x) || `${raw} is not a number`], }, + compile_cpu_time: { + desc: 'Max CPU time allowed for compile stage in milliseconds', + default: 10000, // 10 seconds + parser: parse_int, + validators: [(x, raw) => !is_nan(x) || `${raw} is not a number`], + }, + run_cpu_time: { + desc: 'Max CPU time allowed for run stage in milliseconds', + default: 3000, // 3 seconds + parser: parse_int, + validators: [(x, raw) => !is_nan(x) || `${raw} is not a number`], + }, compile_memory_limit: { desc: 'Max memory usage for compile stage in bytes (set to -1 for no limit)', default: -1, // no limit @@ -117,7 +129,7 @@ const options = { limit_overrides: { desc: 'Per-language exceptions in JSON format for each of:\ max_process_count, max_open_files, max_file_size, compile_memory_limit,\ - run_memory_limit, compile_timeout, run_timeout, output_max_size', + run_memory_limit, compile_timeout, run_timeout, compile_cpu_time, run_cpu_time, output_max_size', default: {}, parser: parse_overrides, validators: [ @@ -165,6 +177,8 @@ function parse_overrides(overrides_string) { 'run_memory_limit', 'compile_timeout', 'run_timeout', + 'compile_cpu_time', + 'run_cpu_time', 'output_max_size', ].includes(key) ) { diff --git a/api/src/docker-entrypoint.sh b/api/src/docker-entrypoint.sh new file mode 100755 index 00000000..7cf37e33 --- /dev/null +++ b/api/src/docker-entrypoint.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +cd /sys/fs/cgroup && \ +mkdir isolate/ && \ +echo 1 > isolate/cgroup.procs && \ +echo '+cpuset +cpu +io +memory +pids' > cgroup.subtree_control && \ +cd isolate && \ +mkdir init && \ +echo 1 > init/cgroup.procs && \ +echo '+cpuset +memory' > cgroup.subtree_control && \ +echo "Initialized cgroup" && \ +chown -R piston:piston /piston && \ +exec su -- piston -c 'ulimit -n 65536 && node /piston_api/src' diff --git a/api/src/globals.js b/api/src/globals.js index 933d2ca9..c2fef423 100644 --- a/api/src/globals.js +++ b/api/src/globals.js @@ -7,14 +7,78 @@ const platform = `${is_docker() ? 'docker' : 'baremetal'}-${fs .split('\n') .find(x => x.startsWith('ID')) .replace('ID=', '')}`; +const SIGNALS = { + 1: 'SIGHUP', + 2: 'SIGINT', + 3: 'SIGQUIT', + 4: 'SIGILL', + 5: 'SIGTRAP', + 6: 'SIGABRT', + 7: 'SIGBUS', + 8: 'SIGFPE', + 9: 'SIGKILL', + 10: 'SIGUSR1', + 11: 'SIGSEGV', + 12: 'SIGUSR2', + 13: 'SIGPIPE', + 14: 'SIGALRM', + 15: 'SIGTERM', + 16: 'SIGSTKFLT', + 17: 'SIGCHLD', + 18: 'SIGCONT', + 19: 'SIGSTOP', + 20: 'SIGTSTP', + 21: 'SIGTTIN', + 22: 'SIGTTOU', + 23: 'SIGURG', + 24: 'SIGXCPU', + 25: 'SIGXFSZ', + 26: 'SIGVTALRM', + 27: 'SIGPROF', + 28: 'SIGWINCH', + 29: 'SIGIO', + 30: 'SIGPWR', + 31: 'SIGSYS', + 34: 'SIGRTMIN', + 35: 'SIGRTMIN+1', + 36: 'SIGRTMIN+2', + 37: 'SIGRTMIN+3', + 38: 'SIGRTMIN+4', + 39: 'SIGRTMIN+5', + 40: 'SIGRTMIN+6', + 41: 'SIGRTMIN+7', + 42: 'SIGRTMIN+8', + 43: 'SIGRTMIN+9', + 44: 'SIGRTMIN+10', + 45: 'SIGRTMIN+11', + 46: 'SIGRTMIN+12', + 47: 'SIGRTMIN+13', + 48: 'SIGRTMIN+14', + 49: 'SIGRTMIN+15', + 50: 'SIGRTMAX-14', + 51: 'SIGRTMAX-13', + 52: 'SIGRTMAX-12', + 53: 'SIGRTMAX-11', + 54: 'SIGRTMAX-10', + 55: 'SIGRTMAX-9', + 56: 'SIGRTMAX-8', + 57: 'SIGRTMAX-7', + 58: 'SIGRTMAX-6', + 59: 'SIGRTMAX-5', + 60: 'SIGRTMAX-4', + 61: 'SIGRTMAX-3', + 62: 'SIGRTMAX-2', + 63: 'SIGRTMAX-1', + 64: 'SIGRTMAX', +}; module.exports = { data_directories: { packages: 'packages', - jobs: 'jobs', }, version: require('../package.json').version, platform, pkg_installed_file: '.ppman-installed', //Used as indication for if a package was installed clean_directories: ['/dev/shm', '/run/lock', '/tmp', '/var/tmp'], + SIGNALS, }; diff --git a/api/src/index.js b/api/src/index.js index 6ad93907..4a6dd422 100644 --- a/api/src/index.js +++ b/api/src/index.js @@ -35,10 +35,6 @@ expressWs(app); } } }); - fss.chmodSync( - path.join(config.data_directory, globals.data_directories.jobs), - 0o711 - ); logger.info('Loading packages'); const pkgdir = path.join( diff --git a/api/src/job.js b/api/src/job.js index a2641f93..e5738435 100644 --- a/api/src/job.js +++ b/api/src/job.js @@ -1,13 +1,10 @@ const logplease = require('logplease'); -const logger = logplease.create('job'); const { v4: uuidv4 } = require('uuid'); const cp = require('child_process'); const path = require('path'); const config = require('./config'); -const globals = require('./globals'); const fs = require('fs/promises'); -const fss = require('fs'); -const wait_pid = require('waitpid'); +const globals = require('./globals'); const job_states = { READY: Symbol('Ready to be primed'), @@ -15,17 +12,26 @@ const job_states = { EXECUTED: Symbol('Executed and ready for cleanup'), }; -let uid = 0; -let gid = 0; +const MAX_BOX_ID = 999; +const ISOLATE_PATH = '/usr/local/bin/isolate'; +let box_id = 0; let remaining_job_spaces = config.max_concurrent_jobs; let job_queue = []; -class Job { - #active_timeouts; - #active_parent_processes; +const get_next_box_id = () => ++box_id % MAX_BOX_ID; - constructor({ runtime, files, args, stdin, timeouts, memory_limits }) { +class Job { + #dirty_boxes; + constructor({ + runtime, + files, + args, + stdin, + timeouts, + cpu_times, + memory_limits, + }) { this.uuid = uuidv4(); this.logger = logplease.create(`job/${this.uuid}`); @@ -46,29 +52,39 @@ class Job { this.stdin += '\n'; } - this.#active_timeouts = []; - this.#active_parent_processes = []; - this.timeouts = timeouts; + this.cpu_times = cpu_times; this.memory_limits = memory_limits; - this.uid = config.runner_uid_min + uid; - this.gid = config.runner_gid_min + gid; - - uid++; - gid++; - - uid %= config.runner_uid_max - config.runner_uid_min + 1; - gid %= config.runner_gid_max - config.runner_gid_min + 1; - - this.logger.debug(`Assigned uid=${this.uid} gid=${this.gid}`); - this.state = job_states.READY; - this.dir = path.join( - config.data_directory, - globals.data_directories.jobs, - this.uuid - ); + this.#dirty_boxes = []; + } + + async #create_isolate_box() { + const box_id = get_next_box_id(); + const metadata_file_path = `/tmp/${box_id}-metadata.txt`; + return new Promise((res, rej) => { + cp.exec( + `isolate --init --cg -b${box_id}`, + (error, stdout, stderr) => { + if (error) { + rej( + `Failed to run isolate --init: ${error.message}\nstdout: ${stdout}\nstderr: ${stderr}` + ); + } + if (stdout === '') { + rej('Received empty stdout from isolate --init'); + } + const box = { + id: box_id, + metadata_file_path, + dir: `${stdout.trim()}/box`, + }; + this.#dirty_boxes.push(box); + res(box); + } + ); + }); } async prime() { @@ -80,208 +96,235 @@ class Job { } this.logger.info(`Priming job`); remaining_job_spaces--; - this.logger.debug('Writing files to job cache'); - - this.logger.debug(`Transfering ownership`); - - await fs.mkdir(this.dir, { mode: 0o700 }); - await fs.chown(this.dir, this.uid, this.gid); + this.logger.debug('Running isolate --init'); + const box = await this.#create_isolate_box(); + this.logger.debug(`Creating submission files in Isolate box`); + const submission_dir = path.join(box.dir, 'submission'); + await fs.mkdir(submission_dir); for (const file of this.files) { - const file_path = path.join(this.dir, file.name); - const rel = path.relative(this.dir, file_path); - const file_content = Buffer.from(file.content, file.encoding); + const file_path = path.join(submission_dir, file.name); + const rel = path.relative(submission_dir, file_path); if (rel.startsWith('..')) throw Error( `File path "${file.name}" tries to escape parent directory: ${rel}` ); + const file_content = Buffer.from(file.content, file.encoding); + await fs.mkdir(path.dirname(file_path), { recursive: true, mode: 0o700, }); - await fs.chown(path.dirname(file_path), this.uid, this.gid); - await fs.write_file(file_path, file_content); - await fs.chown(file_path, this.uid, this.gid); } this.state = job_states.PRIMED; this.logger.debug('Primed job'); + return box; } - exit_cleanup() { - for (const timeout of this.#active_timeouts) { - clear_timeout(timeout); - } - this.#active_timeouts = []; - this.logger.debug('Cleared the active timeouts'); - - this.cleanup_processes(); - this.logger.debug(`Finished exit cleanup`); - } - - close_cleanup() { - for (const proc of this.#active_parent_processes) { - proc.stderr.destroy(); - if (!proc.stdin.destroyed) { - proc.stdin.end(); - proc.stdin.destroy(); - } - proc.stdout.destroy(); - } - this.#active_parent_processes = []; - this.logger.debug('Destroyed processes writables'); - } - - async safe_call(file, args, timeout, memory_limit, event_bus = null) { - return new Promise((resolve, reject) => { - const nonetwork = config.disable_networking ? ['nosocket'] : []; - - const prlimit = [ - 'prlimit', - '--nproc=' + this.runtime.max_process_count, - '--nofile=' + this.runtime.max_open_files, - '--fsize=' + this.runtime.max_file_size, - ]; - - const timeout_call = [ - 'timeout', + async safe_call( + box, + file, + args, + timeout, + cpu_time, + memory_limit, + event_bus = null + ) { + let stdout = ''; + let stderr = ''; + let output = ''; + let memory = null; + let code = null; + let signal = null; + let message = null; + let status = null; + let cpu_time_stat = null; + let wall_time_stat = null; + + const proc = cp.spawn( + ISOLATE_PATH, + [ + '--run', + `-b${box.id}`, + `--meta=${box.metadata_file_path}`, + '--cg', '-s', - '9', - Math.ceil(timeout / 1000), - ]; - - if (memory_limit >= 0) { - prlimit.push('--as=' + memory_limit); - } - - const proc_call = [ - 'nice', - ...timeout_call, - ...prlimit, - ...nonetwork, - 'bash', + '-c', + '/box/submission', + '-e', + `--dir=/runtime=${this.runtime.pkgdir}`, + `--dir=/etc:noexec`, + `--processes=${this.runtime.max_process_count}`, + `--open-files=${this.runtime.max_open_files}`, + `--fsize=${Math.floor(this.runtime.max_file_size / 1000)}`, + `--wall-time=${timeout / 1000}`, + `--time=${cpu_time / 1000}`, + `--extra-time=0`, + ...(memory_limit >= 0 + ? [`--cg-mem=${Math.floor(memory_limit / 1000)}`] + : []), + ...(config.disable_networking ? [] : ['--share-net']), + '--', + '/bin/bash', file, ...args, - ]; - - var stdout = ''; - var stderr = ''; - var output = ''; - - const proc = cp.spawn(proc_call[0], proc_call.splice(1), { + ], + { env: { ...this.runtime.env_vars, PISTON_LANGUAGE: this.runtime.language, }, stdio: 'pipe', - cwd: this.dir, - uid: this.uid, - gid: this.gid, - detached: true, //give this process its own process group - }); - - this.#active_parent_processes.push(proc); - - if (event_bus === null) { - proc.stdin.write(this.stdin); - proc.stdin.end(); - proc.stdin.destroy(); - } else { - event_bus.on('stdin', data => { - proc.stdin.write(data); - }); - - event_bus.on('kill', signal => { - proc.kill(signal); - }); } + ); - const kill_timeout = - (timeout >= 0 && - set_timeout(async _ => { - this.logger.info(`Timeout exceeded timeout=${timeout}`); - try { - process.kill(proc.pid, 'SIGKILL'); - } - catch (e) { - // Could already be dead and just needs to be waited on - this.logger.debug( - `Got error while SIGKILLing process ${proc}:`, - e - ); - } - }, timeout)) || - null; - this.#active_timeouts.push(kill_timeout); - - proc.stderr.on('data', async data => { - if (event_bus !== null) { - event_bus.emit('stderr', data); - } else if ((stderr.length + data.length) > this.runtime.output_max_size) { - this.logger.info(`stderr length exceeded`); - try { - process.kill(proc.pid, 'SIGKILL'); - } - catch (e) { - // Could already be dead and just needs to be waited on - this.logger.debug( - `Got error while SIGKILLing process ${proc}:`, - e - ); - } - } else { - stderr += data; - output += data; - } + if (event_bus === null) { + proc.stdin.write(this.stdin); + proc.stdin.end(); + proc.stdin.destroy(); + } else { + event_bus.on('stdin', data => { + proc.stdin.write(data); }); - proc.stdout.on('data', async data => { - if (event_bus !== null) { - event_bus.emit('stdout', data); - } else if ((stdout.length + data.length) > this.runtime.output_max_size) { - this.logger.info(`stdout length exceeded`); - try { - process.kill(proc.pid, 'SIGKILL'); - } - catch (e) { - // Could already be dead and just needs to be waited on - this.logger.debug( - `Got error while SIGKILLing process ${proc}:`, - e - ); - } - } else { - stdout += data; - output += data; - } + event_bus.on('kill', signal => { + proc.kill(signal); }); + } - proc.on('exit', () => this.exit_cleanup()); + proc.stderr.on('data', async data => { + if (event_bus !== null) { + event_bus.emit('stderr', data); + } else if ( + stderr.length + data.length > + this.runtime.output_max_size + ) { + message = 'stderr length exceeded'; + this.logger.info(message); + try { + process.kill(proc.pid, 'SIGABRT'); + } catch (e) { + // Could already be dead and just needs to be waited on + this.logger.debug( + `Got error while SIGABRTing process ${proc}:`, + e + ); + } + } else { + stderr += data; + output += data; + } + }); - proc.on('close', (code, signal) => { - this.close_cleanup(); + proc.stdout.on('data', async data => { + if (event_bus !== null) { + event_bus.emit('stdout', data); + } else if ( + stdout.length + data.length > + this.runtime.output_max_size + ) { + message = 'stdout length exceeded'; + this.logger.info(message); + try { + process.kill(proc.pid, 'SIGABRT'); + } catch (e) { + // Could already be dead and just needs to be waited on + this.logger.debug( + `Got error while SIGABRTing process ${proc}:`, + e + ); + } + } else { + stdout += data; + output += data; + } + }); - resolve({ stdout, stderr, code, signal, output }); + const data = await new Promise((res, rej) => { + proc.on('exit', (_, signal) => { + res({ + signal, + }); }); proc.on('error', err => { - this.exit_cleanup(); - this.close_cleanup(); - - reject({ error: err, stdout, stderr, output }); + rej({ + error: err, + }); }); }); + + try { + const metadata_str = ( + await fs.read_file(box.metadata_file_path) + ).toString(); + const metadata_lines = metadata_str.split('\n'); + for (const line of metadata_lines) { + if (!line) continue; + + const [key, value] = line.split(':'); + if (key === undefined || value === undefined) { + throw new Error( + `Failed to parse metadata file, received: ${line}` + ); + } + switch (key) { + case 'cg-mem': + memory = parse_int(value) * 1000; + break; + case 'exitcode': + code = parse_int(value); + break; + case 'exitsig': + signal = globals.SIGNALS[parse_int(value)] ?? null; + break; + case 'message': + message = message || value; + break; + case 'status': + status = value; + break; + case 'time': + cpu_time_stat = parse_float(value) * 1000; + break; + case 'time-wall': + wall_time_stat = parse_float(value) * 1000; + break; + default: + break; + } + } + } catch (e) { + throw new Error( + `Error reading metadata file: ${box.metadata_file_path}\nError: ${e.message}\nIsolate run stdout: ${stdout}\nIsolate run stderr: ${stderr}` + ); + } + + return { + ...data, + stdout, + stderr, + code, + signal, + output, + memory, + message, + status, + cpu_time: cpu_time_stat, + wall_time: wall_time_stat, + }; } - async execute(event_bus = null) { + async execute(box, event_bus = null) { if (this.state !== job_states.PRIMED) { throw new Error( 'Job must be in primed state, current state: ' + - this.state.toString() + this.state.toString() ); } @@ -298,49 +341,61 @@ class Job { const { emit_event_bus_result, emit_event_bus_stage } = event_bus === null ? { - emit_event_bus_result: () => { }, - emit_event_bus_stage: () => { }, - } + emit_event_bus_result: () => {}, + emit_event_bus_stage: () => {}, + } : { - emit_event_bus_result: (stage, result, event_bus) => { - const { error, code, signal } = result; - event_bus.emit('exit', stage, { - error, - code, - signal, - }); - }, - emit_event_bus_stage: (stage, event_bus) => { - event_bus.emit('stage', stage); - }, - }; + emit_event_bus_result: (stage, result) => { + const { error, code, signal } = result; + event_bus.emit('exit', stage, { + error, + code, + signal, + }); + }, + emit_event_bus_stage: stage => { + event_bus.emit('stage', stage); + }, + }; if (this.runtime.compiled) { this.logger.debug('Compiling'); - emit_event_bus_stage('compile', event_bus); + emit_event_bus_stage('compile'); compile = await this.safe_call( - path.join(this.runtime.pkgdir, 'compile'), + box, + '/runtime/compile', code_files.map(x => x.name), this.timeouts.compile, + this.cpu_times.compile, this.memory_limits.compile, event_bus ); - emit_event_bus_result('compile', compile, event_bus); + emit_event_bus_result('compile', compile); compile_errored = compile.code !== 0; + if (!compile_errored) { + const old_box_dir = box.dir; + box = await this.#create_isolate_box(); + await fs.rename( + path.join(old_box_dir, 'submission'), + path.join(box.dir, 'submission') + ); + } } let run; if (!compile_errored) { this.logger.debug('Running'); - emit_event_bus_stage('run', event_bus); + emit_event_bus_stage('run'); run = await this.safe_call( - path.join(this.runtime.pkgdir, 'run'), + box, + '/runtime/run', [code_files[0].name, ...this.args], this.timeouts.run, + this.cpu_times.run, this.memory_limits.run, event_bus ); - emit_event_bus_result('run', run, event_bus); + emit_event_bus_result('run', run); } this.state = job_states.EXECUTED; @@ -353,139 +408,34 @@ class Job { }; } - cleanup_processes(dont_wait = []) { - let processes = [1]; - const to_wait = []; - this.logger.debug(`Cleaning up processes`); - - while (processes.length > 0) { - processes = []; - - const proc_ids = fss.readdir_sync('/proc'); - - processes = proc_ids.map(proc_id => { - if (isNaN(proc_id)) return -1; - try { - const proc_status = fss.read_file_sync( - path.join('/proc', proc_id, 'status') - ); - const proc_lines = proc_status.to_string().split('\n'); - const state_line = proc_lines.find(line => - line.starts_with('State:') - ); - const uid_line = proc_lines.find(line => - line.starts_with('Uid:') - ); - const [_, ruid, euid, suid, fuid] = uid_line.split(/\s+/); - - const [_1, state, user_friendly] = state_line.split(/\s+/); - - const proc_id_int = parse_int(proc_id); - - // Skip over any processes that aren't ours. - if (ruid != this.uid && euid != this.uid) return -1; - - if (state == 'Z') { - // Zombie process, just needs to be waited, regardless of the user id - if (!to_wait.includes(proc_id_int)) - to_wait.push(proc_id_int); - - return -1; - } - // We should kill in all other state (Sleep, Stopped & Running) - - return proc_id_int; - } catch { - return -1; - } - - return -1; - }); - - processes = processes.filter(p => p > 0); - - if (processes.length > 0) - this.logger.debug(`Got processes to kill: ${processes}`); - - for (const proc of processes) { - // First stop the processes, but keep their resources allocated so they cant re-fork - try { - process.kill(proc, 'SIGSTOP'); - } catch (e) { - // Could already be dead - this.logger.debug( - `Got error while SIGSTOPping process ${proc}:`, - e - ); - } - } - - for (const proc of processes) { - // Then clear them out of the process tree - try { - process.kill(proc, 'SIGKILL'); - } catch (e) { - // Could already be dead and just needs to be waited on - this.logger.debug( - `Got error while SIGKILLing process ${proc}:`, - e - ); - } - - to_wait.push(proc); - } - } - - this.logger.debug( - `Finished kill-loop, calling wait_pid to end any zombie processes` - ); - - for (const proc of to_wait) { - if (dont_wait.includes(proc)) continue; - - wait_pid(proc); - } - - this.logger.debug(`Cleaned up processes`); - } - - async cleanup_filesystem() { - for (const clean_path of globals.clean_directories) { - const contents = await fs.readdir(clean_path); - - for (const file of contents) { - const file_path = path.join(clean_path, file); - - try { - const stat = await fs.stat(file_path); - - if (stat.uid === this.uid) { - await fs.rm(file_path, { - recursive: true, - force: true, - }); - } - } catch (e) { - // File was somehow deleted in the time that we read the dir to when we checked the file - this.logger.warn(`Error removing file ${file_path}: ${e}`); - } - } - } - - await fs.rm(this.dir, { recursive: true, force: true }); - } - async cleanup() { this.logger.info(`Cleaning up job`); - this.exit_cleanup(); // Run process janitor, just incase there are any residual processes somehow - this.close_cleanup(); - await this.cleanup_filesystem(); - remaining_job_spaces++; if (job_queue.length > 0) { job_queue.shift()(); } + await Promise.all( + this.#dirty_boxes.map(async box => { + cp.exec( + `isolate --cleanup --cg -b${box.id}`, + (error, stdout, stderr) => { + if (error) { + this.logger.error( + `Failed to run isolate --cleanup: ${error.message} on box #${box.id}\nstdout: ${stdout}\nstderr: ${stderr}` + ); + } + } + ); + try { + await fs.rm(box.metadata_file_path); + } catch (e) { + this.logger.error( + `Failed to remove the metadata directory of box #${box.id}. Error: ${e.message}` + ); + } + }) + ); } } diff --git a/api/src/package.js b/api/src/package.js index 11e4f342..8edb0086 100644 --- a/api/src/package.js +++ b/api/src/package.js @@ -145,7 +145,11 @@ class Package { await fs.write_file(path.join(this.install_path, '.env'), filtered_env); logger.debug('Changing Ownership of package directory'); - await util.promisify(chownr)(this.install_path, 0, 0); + await util.promisify(chownr)( + this.install_path, + process.getuid(), + process.getgid() + ); logger.debug('Writing installed state to disk'); await fs.write_file( diff --git a/api/src/runtime.js b/api/src/runtime.js index 6c6f10ed..1d4a8fc9 100644 --- a/api/src/runtime.js +++ b/api/src/runtime.js @@ -15,6 +15,7 @@ class Runtime { pkgdir, runtime, timeouts, + cpu_times, memory_limits, max_process_count, max_open_files, @@ -27,6 +28,7 @@ class Runtime { this.pkgdir = pkgdir; this.runtime = runtime; this.timeouts = timeouts; + this.cpu_times = cpu_times; this.memory_limits = memory_limits; this.max_process_count = max_process_count; this.max_open_files = max_open_files; @@ -62,6 +64,18 @@ class Runtime { language_limit_overrides ), }, + cpu_times: { + compile: this.compute_single_limit( + language_name, + 'compile_cpu_time', + language_limit_overrides + ), + run: this.compute_single_limit( + language_name, + 'run_cpu_time', + language_limit_overrides + ), + }, memory_limits: { compile: this.compute_single_limit( language_name, @@ -171,6 +185,7 @@ class Runtime { .split('\n') .map(line => line.split('=', 2)) .forEach(([key, val]) => { + val = val.replace_all(this.pkgdir, '/runtime'); this._env_vars[key.trim()] = val.trim(); }); } diff --git a/builder/build.sh b/builder/build.sh index eaae21d8..8087e9fa 100755 --- a/builder/build.sh +++ b/builder/build.sh @@ -23,20 +23,20 @@ fetch_packages(){ mkdir build # Start a piston container docker run \ + --privileged \ -v "$PWD/build":'/piston/packages' \ - --tmpfs /piston/jobs \ -dit \ -p $port:2000 \ --name builder_piston_instance \ ghcr.io/engineer-man/piston - + # Ensure the CLI is installed cd ../cli npm i cd - # Evalulate the specfile - ../cli/index.js -u "http://127.0.0.1:$port" ppman spec $1 + ../cli/index.js -u "http://127.0.0.1:$port" ppman spec $1 } build_container(){ @@ -61,4 +61,4 @@ fetch_packages $SPEC_FILE build_container $TAG echo "Start your custom piston container with" -echo "$ docker run --tmpfs /piston/jobs -dit -p 2000:2000 $TAG" +echo "$ docker run --privileged -dit -p 2000:2000 $TAG" diff --git a/docker-compose.dev.yaml b/docker-compose.dev.yaml index 8a0d3856..33f615d4 100644 --- a/docker-compose.dev.yaml +++ b/docker-compose.dev.yaml @@ -4,8 +4,7 @@ services: api: build: api container_name: piston_api - cap_add: - - CAP_SYS_ADMIN + privileged: true restart: always ports: - 2000:2000 @@ -13,8 +12,6 @@ services: - ./data/piston/packages:/piston/packages environment: - PISTON_REPO_URL=http://repo:8000/index - tmpfs: - - /piston/jobs:exec,uid=1000,gid=1000,mode=711 repo: # Local testing of packages build: repo diff --git a/docker-compose.yaml b/docker-compose.yaml index 839b3408..ea62b06e 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -5,10 +5,10 @@ services: image: ghcr.io/engineer-man/piston container_name: piston_api restart: always + privileged: true ports: - 2000:2000 volumes: - ./data/piston/packages:/piston/packages tmpfs: - - /piston/jobs:exec,uid=1000,gid=1000,mode=711 - /tmp:exec diff --git a/docs/configuration.md b/docs/configuration.md index 1a6f5bd2..163cd08f 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -135,8 +135,21 @@ key: default: 3000 ``` -The maximum time that is allowed to be taken by a stage in milliseconds. -Use -1 for unlimited time. +The maximum time that is allowed to be taken by a stage in milliseconds. This is the wall-time of the stage. The time that the CPU does not spend working on the stage (e.g, due to context switches or IO) is counted. + +## Compile/Run CPU-Time + +```yaml +key: + - PISTON_COMPILE_CPU_TIME +default: 10000 + +key: + - PISTON_RUN_CPU_TIME +default: 3000 +``` + +The maximum CPU-time that is allowed to be consumed by a stage in milliseconds. The time that the CPU does not spend working on the stage (e.g, IO and context switches) is not counted. This option is typically used in algorithm contests. ## Compile/Run memory limits @@ -178,7 +191,7 @@ default: {} ``` Per-language overrides/exceptions for the each of `max_process_count`, `max_open_files`, `max_file_size`, -`compile_memory_limit`, `run_memory_limit`, `compile_timeout`, `run_timeout`, `output_max_size`. Defined as follows: +`compile_memory_limit`, `run_memory_limit`, `compile_timeout`, `run_timeout`, `compile_cpu_time`, `run_cpu_time`, `output_max_size`. Defined as follows: ``` PISTON_LIMIT_OVERRIDES={"c++":{"max_process_count":128}} diff --git a/packages/zig/0.10.1/compile b/packages/zig/0.10.1/compile index c0b03d15..75ee6ba4 100644 --- a/packages/zig/0.10.1/compile +++ b/packages/zig/0.10.1/compile @@ -1,6 +1,6 @@ -#!/usr/bin/env bash - -# optimizing for small programs -rename 's/$/\.zig/' "$@" # Add .zig extension - -zig build-exe -O ReleaseSafe --color off --cache-dir . --global-cache-dir . --name out *.zig \ No newline at end of file +#!/usr/bin/env bash + +# optimizing for small programs +rename 's/$/\.zig/' "$@" # Add .zig extension + +zig build-exe -O ReleaseSafe --color off --cache-dir . --global-cache-dir . --name out *.zig diff --git a/packages/zig/0.10.1/environment b/packages/zig/0.10.1/environment index 8752bdbc..a85000c8 100644 --- a/packages/zig/0.10.1/environment +++ b/packages/zig/0.10.1/environment @@ -1,4 +1,4 @@ -#!/usr/bin/env bash - -# compiler path -export PATH=$PWD/bin:$PATH \ No newline at end of file +#!/usr/bin/env bash + +# compiler path +export PATH=$PWD/bin:$PATH diff --git a/packages/zig/0.10.1/metadata.json b/packages/zig/0.10.1/metadata.json index 9ecb9551..c7bed08d 100644 --- a/packages/zig/0.10.1/metadata.json +++ b/packages/zig/0.10.1/metadata.json @@ -1,8 +1,9 @@ -{ - "language": "zig", - "version": "0.10.1", - "aliases": [], - "limit_overrides": { - "compile_timeout": 15000 - } -} \ No newline at end of file +{ + "language": "zig", + "version": "0.10.1", + "aliases": [], + "limit_overrides": { + "compile_timeout": 15000, + "compile_cpu_time": 15000 + } +} diff --git a/packages/zig/0.10.1/run b/packages/zig/0.10.1/run index 70376eb3..d96e06fa 100644 --- a/packages/zig/0.10.1/run +++ b/packages/zig/0.10.1/run @@ -1,4 +1,4 @@ -#!/usr/bin/env bash - -shift # Filename is only used in compile step, so we can take it out here -./out "$@" \ No newline at end of file +#!/usr/bin/env bash + +shift # Filename is only used in compile step, so we can take it out here +./out "$@" diff --git a/packages/zig/0.10.1/test.zig b/packages/zig/0.10.1/test.zig index f0e287e1..7cb5eb6c 100644 --- a/packages/zig/0.10.1/test.zig +++ b/packages/zig/0.10.1/test.zig @@ -1,6 +1,6 @@ -const std = @import("std"); - -pub fn main() !void { - const stdout = std.io.getStdOut().writer(); - try stdout.print("OK\n", .{}); -} \ No newline at end of file +const std = @import("std"); + +pub fn main() !void { + const stdout = std.io.getStdOut().writer(); + try stdout.print("OK\n", .{}); +} diff --git a/packages/zig/0.8.0/metadata.json b/packages/zig/0.8.0/metadata.json index 8c02d33f..38bc1fca 100644 --- a/packages/zig/0.8.0/metadata.json +++ b/packages/zig/0.8.0/metadata.json @@ -3,6 +3,7 @@ "version": "0.8.0", "aliases": ["zig"], "limit_overrides": { - "compile_timeout": 15000 + "compile_timeout": 15000, + "compile_cpu_time": 15000 } } diff --git a/packages/zig/0.9.1/metadata.json b/packages/zig/0.9.1/metadata.json index e7061cd8..1ad7a701 100644 --- a/packages/zig/0.9.1/metadata.json +++ b/packages/zig/0.9.1/metadata.json @@ -3,6 +3,7 @@ "version": "0.9.1", "aliases": ["zig"], "limit_overrides": { - "compile_timeout": 15000 + "compile_timeout": 15000, + "compile_cpu_time": 15000 } } diff --git a/readme.md b/readme.md index 040e74d4..18e9e5a1 100644 --- a/readme.md +++ b/readme.md @@ -104,7 +104,8 @@ POST https://emkc.org/api/v2/piston/execute - Docker - Docker Compose -- Node JS (>= 13, preferably >= 15) +- Node JS (>= 15) +- cgroup v2 enabled, and cgroup v1 disabled ### After system dependencies are installed, clone this repository: @@ -135,8 +136,8 @@ The API will now be online with no language runtimes installed. To install runti ```sh docker run \ + --privileged \ -v $PWD:'/piston' \ - --tmpfs /piston/jobs \ -dit \ -p 2000:2000 \ --name piston_api \ @@ -245,8 +246,10 @@ This endpoint requests execution of some arbitrary code. - `files[].encoding` (_optional_) The encoding scheme used for the file content. One of `base64`, `hex` or `utf8`. Defaults to `utf8`. - `stdin` (_optional_) The text to pass as stdin to the program. Must be a string or left out. Defaults to blank string. - `args` (_optional_) The arguments to pass to the program. Must be an array or left out. Defaults to `[]`. -- `compile_timeout` (_optional_) The maximum time allowed for the compile stage to finish before bailing out in milliseconds. Must be a number or left out. Defaults to `10000` (10 seconds). -- `run_timeout` (_optional_) The maximum time allowed for the run stage to finish before bailing out in milliseconds. Must be a number or left out. Defaults to `3000` (3 seconds). +- `compile_timeout` (_optional_) The maximum wall-time allowed for the compile stage to finish before bailing out in milliseconds. Must be a number or left out. Defaults to `10000` (10 seconds). +- `run_timeout` (_optional_) The maximum wall-time allowed for the run stage to finish before bailing out in milliseconds. Must be a number or left out. Defaults to `3000` (3 seconds). +- `compile_cpu_time` (_optional_) The maximum CPU-time allowed for the compile stage to finish before bailing out in milliseconds. Must be a number or left out. Defaults to `10000` (10 seconds). +- `run_cpu_time` (_optional_) The maximum CPU-time allowed for the run stage to finish before bailing out in milliseconds. Must be a number or left out. Defaults to `3000` (3 seconds). - `compile_memory_limit` (_optional_) The maximum amount of memory the compile stage is allowed to use in bytes. Must be a number or left out. Defaults to `-1` (no limit) - `run_memory_limit` (_optional_) The maximum amount of memory the run stage is allowed to use in bytes. Must be a number or left out. Defaults to `-1` (no limit) @@ -264,6 +267,8 @@ This endpoint requests execution of some arbitrary code. "args": ["1", "2", "3"], "compile_timeout": 10000, "run_timeout": 3000, + "compile_cpu_time": 10000, + "run_cpu_time": 3000, "compile_memory_limit": -1, "run_memory_limit": -1 } @@ -273,7 +278,12 @@ A typical response upon successful execution will contain 1 or 2 keys `run` and `compile` will only be present if the language requested requires a compile stage. Each of these keys has an identical structure, containing both a `stdout` and `stderr` key, which is a string containing the text outputted during the stage into each buffer. -It also contains the `code` and `signal` which was returned from each process. +It also contains the `code` and `signal` which was returned from each process. It also includes a nullable human-readable `message` which is a description of why a stage has failed and a two-letter `status` that is either: + +- `RE` for runtime error +- `SG` for dying on a signal +- `TO` for timeout (either via `timeout` or `cpu_time`) +- `XX` for internal error ```json HTTP/1.1 200 OK @@ -287,7 +297,12 @@ Content-Type: application/json "stderr": "", "output": "[\n '/piston/packages/node/15.10.0/bin/node',\n '/piston/jobs/9501b09d-0105-496b-b61a-e5148cf66384/my_cool_code.js',\n '1',\n '2',\n '3'\n]\n", "code": 0, - "signal": null + "signal": null, + "message": null, + "status": null, + "cpu_time": 8, + "wall_time": 154, + "memory": 1160000 } } ``` From a99ce9ae4763a6a7a014c5cf43f6fd53ab3cffe8 Mon Sep 17 00:00:00 2001 From: Omar Brikaa Date: Fri, 13 Sep 2024 15:14:16 +0300 Subject: [PATCH 18/24] Remove nosocket, update security principles in docs --- api/src/nosocket/Makefile | 19 ------------ api/src/nosocket/nosocket.c | 62 ------------------------------------- readme.md | 20 ++++++------ 3 files changed, 10 insertions(+), 91 deletions(-) delete mode 100644 api/src/nosocket/Makefile delete mode 100644 api/src/nosocket/nosocket.c diff --git a/api/src/nosocket/Makefile b/api/src/nosocket/Makefile deleted file mode 100644 index a86a8f50..00000000 --- a/api/src/nosocket/Makefile +++ /dev/null @@ -1,19 +0,0 @@ -CC = gcc -CFLAGS = -O2 -Wall -lseccomp -TARGET = nosocket -BUILD_PATH = ./ -INSTALL_PATH = /usr/local/bin/ -SOURCE = nosocket.c - -all: $(TARGET) - -$(TARGET): $(SOURCE) - $(CC) $(BUILD_PATH)$(SOURCE) $(CFLAGS) -o $(TARGET) - -install: - mv $(TARGET) $(INSTALL_PATH) - -clean: - $(RM) $(TARGET) - $(RM) $(INSTALL_PATH)$(TARGET) - diff --git a/api/src/nosocket/nosocket.c b/api/src/nosocket/nosocket.c deleted file mode 100644 index 4efab88f..00000000 --- a/api/src/nosocket/nosocket.c +++ /dev/null @@ -1,62 +0,0 @@ -/* -nosocket.c - -Disables access to the `socket` syscall and runs a program provided as the first -commandline argument. -*/ -#include -#include -#include -#include -#include - -int main(int argc, char *argv[]) -{ - // Disallow any new capabilities from being added - prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); - - // SCMP_ACT_ALLOW lets the filter have no effect on syscalls not matching a - // configured filter rule (allow all by default) - scmp_filter_ctx ctx = seccomp_init(SCMP_ACT_ALLOW); - if (!ctx) - { - fprintf(stderr, "Unable to initialize seccomp filter context\n"); - return 1; - } - - // Add 32 bit and 64 bit architectures to seccomp filter - int rc; - uint32_t arch[] = {SCMP_ARCH_X86_64, SCMP_ARCH_X86, SCMP_ARCH_X32}; - // We first remove the existing arch, otherwise our subsequent call to add - // it will fail - seccomp_arch_remove(ctx, seccomp_arch_native()); - for (int i = 0; i < sizeof(arch) / sizeof(arch[0]); i++) - { - rc = seccomp_arch_add(ctx, arch[i]); - if (rc != 0) - { - fprintf(stderr, "Unable to add arch: %d\n", arch[i]); - return 1; - } - } - - // Add a seccomp rule to the syscall blacklist - blacklist the socket syscall - if (seccomp_rule_add(ctx, SCMP_ACT_ERRNO(EACCES), SCMP_SYS(socket), 0) < 0) - { - fprintf(stderr, "Unable to add seccomp rule to context\n"); - return 1; - } - -#ifdef DEBUG - seccomp_export_pfc(ctx, 0); -#endif - - if (argc < 2) - { - fprintf(stderr, "Usage %s: %s \n", argv[0], argv[0]); - return 1; - } - seccomp_load(ctx); - execvp(argv[1], argv + 1); - return 1; -} diff --git a/readme.md b/readme.md index 18e9e5a1..2c19b5e3 100644 --- a/readme.md +++ b/readme.md @@ -411,26 +411,26 @@ Content-Type: application/json # Principle of Operation -Piston uses Docker as the primary mechanism for sandboxing. There is an API within the container written in Node -which takes in execution requests and executees them within the container safely. -High level, the API writes any source code to a temporary directory in `/piston/jobs`. +Piston uses [Isolate](https://www.ucw.cz/moe/isolate.1.html) inside Docker as the primary mechanism for sandboxing. There is an API within the container written in Node +which takes in execution requests and executes them within the container safely. +High level, the API writes any source code and executes it inside an Isolate sandbox. The source file is either ran or compiled and ran (in the case of languages like c, c++, c#, go, etc.).
# Security -Docker provides a great deal of security out of the box in that it's separate from the system. -Piston takes additional steps to make it resistant to -various privilege escalation, denial-of-service, and resource saturation threats. These steps include: +Piston uses Isolate which makes use of Linux namespaces, chroot, multiple unprivileged users, and cgroup for sandboxing and resource limiting. Code execution submissions on Piston shall not be aware of each other, shall not affect each other and shall not affect the underlying host system. This is ensured through multiple steps including: -- Disabling outgoing network interaction +- Disabling outgoing network interaction by default - Capping max processes at 256 by default (resists `:(){ :|: &}:;`, `while True: os.fork()`, etc.) - Capping max files at 2048 (resists various file based attacks) - Cleaning up all temp space after each execution (resists out of drive space attacks) -- Running as a variety of unprivileged users -- Capping runtime execution at 3 seconds -- Capping stdout to 65536 characters (resists yes/no bombs and runaway output) +- Running each submission as a different unprivileged user +- Running each submission with its own isolated Linux namespaces +- Capping runtime execution at 3 seconds by default (CPU-time and wall-time) +- Capping the peak memory that all the submission's processes can use +- Capping stdout to 1024 characters by default (resists yes/no bombs and runaway output) - SIGKILLing misbehaving code
From ecdced9ee7104074cb05bd7aac156cafeaf31a69 Mon Sep 17 00:00:00 2001 From: Omar Brikaa Date: Fri, 13 Sep 2024 16:19:09 +0300 Subject: [PATCH 19/24] Add SIGKILL signal for output limits and timeout, add status for output limits --- api/src/job.js | 6 ++++-- readme.md | 2 ++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/api/src/job.js b/api/src/job.js index e5738435..881aa9f0 100644 --- a/api/src/job.js +++ b/api/src/job.js @@ -205,6 +205,7 @@ class Job { this.runtime.output_max_size ) { message = 'stderr length exceeded'; + status = 'EL'; this.logger.info(message); try { process.kill(proc.pid, 'SIGABRT'); @@ -229,6 +230,7 @@ class Job { this.runtime.output_max_size ) { message = 'stdout length exceeded'; + status = 'OL'; this.logger.info(message); try { process.kill(proc.pid, 'SIGABRT'); @@ -287,7 +289,7 @@ class Job { message = message || value; break; case 'status': - status = value; + status = status || value; break; case 'time': cpu_time_stat = parse_float(value) * 1000; @@ -310,7 +312,7 @@ class Job { stdout, stderr, code, - signal, + signal: ['TO', 'OL', 'EL'].includes(status) ? 'SIGKILL' : signal, output, memory, message, diff --git a/readme.md b/readme.md index 2c19b5e3..f1172d33 100644 --- a/readme.md +++ b/readme.md @@ -283,6 +283,8 @@ It also contains the `code` and `signal` which was returned from each process. I - `RE` for runtime error - `SG` for dying on a signal - `TO` for timeout (either via `timeout` or `cpu_time`) +- `OL` for stdout length exceeded +- `EL` for stderr length exceeded - `XX` for internal error ```json From c4afd97a38013f7062a51a13f32802c6266fb089 Mon Sep 17 00:00:00 2001 From: Omar Brikaa Date: Sun, 15 Sep 2024 20:48:45 +0300 Subject: [PATCH 20/24] Use pkgdir inside isolate sandbox to account for packages that have been built with a custom PREFIX closes #686 --- api/src/job.js | 8 ++++---- api/src/runtime.js | 1 - 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/api/src/job.js b/api/src/job.js index 881aa9f0..d46120bd 100644 --- a/api/src/job.js +++ b/api/src/job.js @@ -157,7 +157,7 @@ class Job { '-c', '/box/submission', '-e', - `--dir=/runtime=${this.runtime.pkgdir}`, + `--dir=${this.runtime.pkgdir}`, `--dir=/etc:noexec`, `--processes=${this.runtime.max_process_count}`, `--open-files=${this.runtime.max_open_files}`, @@ -171,7 +171,7 @@ class Job { ...(config.disable_networking ? [] : ['--share-net']), '--', '/bin/bash', - file, + path.join(this.runtime.pkgdir, file), ...args, ], { @@ -365,7 +365,7 @@ class Job { emit_event_bus_stage('compile'); compile = await this.safe_call( box, - '/runtime/compile', + 'compile', code_files.map(x => x.name), this.timeouts.compile, this.cpu_times.compile, @@ -390,7 +390,7 @@ class Job { emit_event_bus_stage('run'); run = await this.safe_call( box, - '/runtime/run', + 'run', [code_files[0].name, ...this.args], this.timeouts.run, this.cpu_times.run, diff --git a/api/src/runtime.js b/api/src/runtime.js index 1d4a8fc9..9a2adf4a 100644 --- a/api/src/runtime.js +++ b/api/src/runtime.js @@ -185,7 +185,6 @@ class Runtime { .split('\n') .map(line => line.split('=', 2)) .forEach(([key, val]) => { - val = val.replace_all(this.pkgdir, '/runtime'); this._env_vars[key.trim()] = val.trim(); }); } From 47661343dae343f06a34268997a28025720e6c60 Mon Sep 17 00:00:00 2001 From: Omar Brikaa Date: Tue, 17 Sep 2024 22:32:23 +0300 Subject: [PATCH 21/24] Downgrade base docker images because some packages were built on the previous image version (#687) --- api/Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/Dockerfile b/api/Dockerfile index 51367f01..7ab81bfc 100644 --- a/api/Dockerfile +++ b/api/Dockerfile @@ -1,4 +1,4 @@ -FROM buildpack-deps:bookworm AS isolate +FROM buildpack-deps:buster AS isolate RUN apt-get update && \ apt-get install -y --no-install-recommends git libcap-dev && \ rm -rf /var/lib/apt/lists/* && \ @@ -8,7 +8,7 @@ RUN apt-get update && \ make -j$(nproc) install && \ rm -rf /tmp/* -FROM node:20-bookworm-slim +FROM node:15.10.0-buster-slim ENV DEBIAN_FRONTEND=noninteractive From 24c5c05308abaffe59f84a403b0d7c27c772700b Mon Sep 17 00:00:00 2001 From: Omar Brikaa Date: Fri, 4 Oct 2024 19:30:57 +0300 Subject: [PATCH 22/24] Give friendlier messages when cgroup v2 is not enabled --- api/src/docker-entrypoint.sh | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/api/src/docker-entrypoint.sh b/api/src/docker-entrypoint.sh index 7cf37e33..2a4e49c3 100755 --- a/api/src/docker-entrypoint.sh +++ b/api/src/docker-entrypoint.sh @@ -1,5 +1,21 @@ #!/bin/bash +CGROUP_FS="/sys/fs/cgroup" +if [ ! -e "$CGROUP_FS" ]; then + echo "Cannot find $CGROUP_FS. Please make sure your system is using cgroup v2" + exit 1 +fi + +if [ -e "$CGROUP_FS/unified" ]; then + echo "Combined cgroup v1+v2 mode is not supported. Please make sure your system is using pure cgroup v2" + exit 1 +fi + +if [ ! -e "$CGROUP_FS/cgroup.subtree_control" ]; then + echo "Cgroup v2 not found. Please make sure cgroup v2 is enabled on your system" + exit 1 +fi + cd /sys/fs/cgroup && \ mkdir isolate/ && \ echo 1 > isolate/cgroup.procs && \ From 512b63d2b50902cb4a79ad85dfa1e726ec0f4041 Mon Sep 17 00:00:00 2001 From: Omar Brikaa Date: Fri, 4 Oct 2024 19:53:33 +0300 Subject: [PATCH 23/24] Document interactive execution --- readme.md | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/readme.md b/readme.md index f1172d33..9034264e 100644 --- a/readme.md +++ b/readme.md @@ -320,6 +320,40 @@ Content-Type: application/json } ``` +#### Interactive execution endpoint (not available through the public API) + +To interact with running processes in real time, you can establish a WebSocket connection at `/api/v2/connect`. This allows you to both receive output and send input to active processes. + +Each message is structured as a JSON object with a `type` key, which indicates the action to perform. Below is a list of message types, their directions, and descriptions: + +- **init** (client -> server): Initializes a job with the same parameters as the `/execute` endpoint, except that stdin is discarded. +- **runtime** (server -> client): Provides details on the runtime environment, including the version and language. +- **stage** (server -> client): Indicates the current execution stage, either "compile" or "run." +- **data** (server <-> client): Exchanges data between the client and server, such as stdin, stdout, or stderr streams. +- **signal** (client -> server): Sends a signal (e.g., for termination) to the running process, whether it's in the "compile" or "run" stage. +- **exit** (server -> client): Signals the end of a stage, along with the exit code or signal. +- **error** (server -> client): Reports an error, typically right before the WebSocket is closed. + +An example of this endpoint in use is depicted below (**<** = client to server, **>** = server to client) + +1. Client establishes WebSocket connection to `/api/v2/connect` +2. **<** `{"type":"init", "language":"bash", "version":"*", "files":[{"content": "cat"}]}` +3. **>** `{"type":"runtime","language": "bash", "version": "5.1.0"}` +4. **>** `{"type":"stage", "stage":"run"}` +5. **<** `{"type":"data", "stream":"stdin", "data":"Hello World!"}` +6. **>** `{"type":"data", "stream":"stdout", "data":"Hello World!"}` +7. _time passes_ +8. **>** `{"type":"exit", "stage":"run", "code":null, "signal": "SIGKILL"}` + +Errors may return status codes as follows: + +- **4000: Already Initialized**: Sent when a second `init` command is issued. +- **4001: Initialization Timeout**: No `init` command was sent within 1 second of connection. +- **4002: Notified Error**: A fatal error occurred, and an `error` packet was transmitted. +- **4003: Not yet Initialized**: A non-`init` command was sent without a job context. +- **4004: Can only write to stdin**: The client attempted to write to a stream other than stdin. +- **4005: Invalid Signal**: An invalid signal was sent in a `signal` packet. +
# Supported Languages From 4e361dcf924b4594f038511ff69752ee1a3f49a9 Mon Sep 17 00:00:00 2001 From: Omar Brikaa Date: Fri, 11 Oct 2024 21:42:58 +0300 Subject: [PATCH 24/24] Add note to ensure the repository is cloned with LF line endings --- readme.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/readme.md b/readme.md index 9034264e..7724fdb9 100644 --- a/readme.md +++ b/readme.md @@ -114,6 +114,10 @@ POST https://emkc.org/api/v2/piston/execute git clone https://github.com/engineer-man/piston ``` +> [!NOTE] +> +> Ensure the repository is cloned with LF line endings + ### Installation ```sh