Compare commits
61 Commits
fc193afe92
...
dev
Author | SHA1 | Date | |
---|---|---|---|
fb536f5292
|
|||
82f186fe71
|
|||
8999a65f4e
|
|||
426bd3864a
|
|||
e6ae8a9c2f
|
|||
82e03b79fc
|
|||
081b534bd2
|
|||
eb04c74c1b
|
|||
8f34ec0be0
|
|||
2daa111196
|
|||
48854bf40d
|
|||
8d0941b406
|
|||
bd0160aa85
|
|||
1a0578c541
|
|||
2df0d81782
|
|||
77547cedce
|
|||
24ec99bacd
|
|||
354b1cbedd
|
|||
e96cfac95c
|
|||
9bb15afa21
|
|||
27abdc3687
|
|||
c53e0cc99b
|
|||
ef4a9364a1
|
|||
0bc66ec025
|
|||
1814fe1144
|
|||
77442a31b1
|
|||
4e3313b6db
|
|||
287ef19530
|
|||
5bb4e3ff45
|
|||
fee3b44c6e
|
|||
a852e2d9e9
|
|||
7cb5877fd0
|
|||
b32c097446
|
|||
77aecc7548
|
|||
20cd32243d
|
|||
a7ecc354a9
|
|||
cba1abc7f4
|
|||
3f0c28014c
|
|||
c3c97c3828
|
|||
cdfa480479
|
|||
ad5254ded3
|
|||
c3e5a138b4
|
|||
5fcc874985
|
|||
b626012591
|
|||
51158fae47
|
|||
e76b63f392
|
|||
7b3c927313
|
|||
01b9c2e9f2
|
|||
f33ff25708
|
|||
9b379eded8
|
|||
d3a575e5e3
|
|||
75dd8f4a51
|
|||
8437d61df7
|
|||
12e269cfc1
|
|||
61e3e6d5e8
|
|||
79706099ec
|
|||
793ac0071d
|
|||
a79ea1905a
|
|||
331876127a
|
|||
ee1262e983
|
|||
e51a528838
|
13
.github/workflows/e2e.yml
vendored
13
.github/workflows/e2e.yml
vendored
@ -2,13 +2,19 @@ name: E2E
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 0 * * 0'
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- dev
|
||||
paths-ignore:
|
||||
- ".github/workflows/images-*"
|
||||
- ".github/workflows/ubuntu.yml"
|
||||
- ".github/workflows/macos.yml"
|
||||
- ".github/workflows/debian-cache.yml"
|
||||
- "docs/**"
|
||||
- 'tools/**'
|
||||
- ".readthedocs.yaml"
|
||||
- "README.md"
|
||||
|
||||
@ -31,6 +37,7 @@ jobs:
|
||||
{ distro: Ubuntu, release: 18.04 },
|
||||
{ distro: Ubuntu, release: 20.04 },
|
||||
{ distro: Ubuntu, release: 22.04 },
|
||||
{ distro: Ubuntu, release: 24.04 },
|
||||
{ distro: CentOS, release: 6 },
|
||||
{ distro: CentOS, release: 7 },
|
||||
{ distro: CentOS, release: 8 },
|
||||
@ -119,7 +126,7 @@ jobs:
|
||||
echo 'distro = { id = "${{ matrix.os.distro }}", release = "${{ matrix.os.release }}" }' >> examples/kernel-module/.out-of-tree.toml
|
||||
echo 'kernel = { regex = ".*" }' >> examples/kernel-module/.out-of-tree.toml
|
||||
echo '[qemu]' >> examples/kernel-module/.out-of-tree.toml
|
||||
echo 'timeout = "10m"' >> examples/kernel-module/.out-of-tree.toml
|
||||
echo 'timeout = "5m"' >> examples/kernel-module/.out-of-tree.toml
|
||||
echo 'after_start_timeout = "10s"' >> examples/kernel-module/.out-of-tree.toml
|
||||
|
||||
echo 'modprobe uio || modprobe 9p || modprobe xfs' >> examples/kernel-module/test.sh
|
||||
@ -135,8 +142,8 @@ jobs:
|
||||
echo 'Type=oneshot' >> test.service
|
||||
echo 'WorkingDirectory=/root/test' >> test.service
|
||||
echo 'TimeoutStopSec=1' >> test.service
|
||||
echo 'ExecStart=/usr/local/bin/out-of-tree kernel --no-container-cache autogen --threads=8 --max=128 --shuffle' >> test.service
|
||||
echo 'ExecStart=/usr/local/bin/out-of-tree pew --qemu-timeout=10m --threads=4 --include-internal-errors' >> test.service
|
||||
echo 'ExecStart=/usr/local/bin/out-of-tree kernel --no-prebuilt-containers autogen --threads=8 --max=64 --shuffle' >> test.service
|
||||
echo 'ExecStart=/usr/local/bin/out-of-tree pew --threads=4 --include-internal-errors' >> test.service
|
||||
|
||||
scp test.service root@$IP:/etc/systemd/system/test.service
|
||||
|
||||
|
87
.github/workflows/images-centos.yml
vendored
Normal file
87
.github/workflows/images-centos.yml
vendored
Normal file
@ -0,0 +1,87 @@
|
||||
name: CentOS images
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
paths:
|
||||
- 'tools/qemu-centos-img/**'
|
||||
- '.github/workflows/images-centos.yml'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
images-centos:
|
||||
name: Qemu Images
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: digitalocean/action-doctl@v2
|
||||
with:
|
||||
token: ${{ secrets.DIGITALOCEAN_ACCESS_TOKEN }}
|
||||
|
||||
- uses: webfactory/ssh-agent@v0.8.0
|
||||
with:
|
||||
ssh-private-key: ${{ secrets.SSH_PRIVATE_KEY }}
|
||||
|
||||
- name: create droplet
|
||||
run: doctl compute droplet create --ssh-keys='b4:4c:66:7d:be:19:25:43:1c:e0:02:61:9f:49:12:94,37:46:77:a8:4a:96:3b:20:16:46:35:04:95:ca:0c:5c' --tag-name=github-actions ga-out-of-tree-images-centos-$GITHUB_SHA --size s-1vcpu-1gb --image ubuntu-22-04-x64 --wait
|
||||
|
||||
# TODO Move to common script
|
||||
- name: generate images
|
||||
shell: bash
|
||||
run: |
|
||||
sleep 1m
|
||||
|
||||
IP=$(doctl compute droplet list --tag-name=github-actions --format "Name,Public IPv4" | grep -v ID | grep ga-out-of-tree-images-centos-$GITHUB_SHA | awk '{print $2}')
|
||||
|
||||
while ! ssh -o StrictHostKeyChecking=accept-new root@$IP echo
|
||||
do
|
||||
sleep 1s
|
||||
done
|
||||
|
||||
ssh root@$IP "cloud-init status --wait | grep done"
|
||||
|
||||
ssh root@$IP apt-get update
|
||||
ssh root@$IP apt-get install -y git podman s3cmd
|
||||
ssh root@$IP git clone https://github.com/out-of-tree/out-of-tree
|
||||
ssh root@$IP "cd out-of-tree && git checkout $GITHUB_SHA"
|
||||
|
||||
ssh root@$IP "echo -e '[Unit]\nDescription=CentOS image generator and uploader\n[Service]\nRemainAfterExit=yes\nStandardError=append:/var/log/images-centos.log\nStandardOutput=append:/var/log/images-centos.log\nType=oneshot' >> /etc/systemd/system/images-centos.service"
|
||||
|
||||
ssh root@$IP "echo Environment=HOST=fra1.digitaloceanspaces.com >> /etc/systemd/system/images-centos.service"
|
||||
ssh root@$IP "echo Environment=HOST_BUCKET=out-of-tree.fra1.digitaloceanspaces.com >> /etc/systemd/system/images-centos.service"
|
||||
ssh root@$IP "echo Environment=ACCESS_KEY=${{ secrets.DIGITALOCEAN_SPACES_ACCESS_KEY }} >> /etc/systemd/system/images-centos.service"
|
||||
ssh root@$IP "echo Environment=SECRET_KEY=${{ secrets.DIGITALOCEAN_SPACES_SECRET_KEY }} >> /etc/systemd/system/images-centos.service"
|
||||
|
||||
ssh root@$IP "echo 'ExecStart=/root/out-of-tree/tools/qemu-centos-img/6/generate.sh' >> /etc/systemd/system/images-centos.service"
|
||||
ssh root@$IP "echo 'ExecStart=/root/out-of-tree/tools/qemu-centos-img/7/generate.sh' >> /etc/systemd/system/images-centos.service"
|
||||
ssh root@$IP "echo 'ExecStart=/root/out-of-tree/tools/qemu-centos-img/8/generate.sh' >> /etc/systemd/system/images-centos.service"
|
||||
|
||||
ssh root@$IP 'echo ExecStart=/bin/sh -c \"s3cmd put --acl-public /root/out-of-tree/tools/qemu-centos-img/*/*.tar.gz s3://out-of-tree/3.0.0/ --host=\$HOST --host-bucket=\$HOST_BUCKET --access_key=\$ACCESS_KEY --secret_key=\$SECRET_KEY\" >> /etc/systemd/system/images-centos.service'
|
||||
|
||||
ssh root@$IP "echo TimeoutStopSec=1 >> /etc/systemd/system/images-centos.service"
|
||||
|
||||
ssh root@$IP systemctl daemon-reload
|
||||
|
||||
ssh root@$IP systemctl start images-centos --no-block
|
||||
|
||||
while ! ssh root@$IP systemctl show images-centos -p SubState --value | grep -E '(failed|exited)'
|
||||
do
|
||||
sleep 3m
|
||||
done
|
||||
|
||||
scp root@$IP:/var/log/images-centos.log .
|
||||
|
||||
ssh root@$IP systemctl is-active images-centos
|
||||
|
||||
- name: Archive logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: images-centos-log
|
||||
path: images-centos.log
|
||||
|
||||
- name: delete droplet
|
||||
if: always()
|
||||
run: doctl compute droplet delete -f ga-out-of-tree-images-centos-$GITHUB_SHA
|
9
.github/workflows/images-debian.yml
vendored
9
.github/workflows/images-debian.yml
vendored
@ -1,4 +1,4 @@
|
||||
name: Debian
|
||||
name: Debian images
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@ -25,7 +25,7 @@ jobs:
|
||||
ssh-private-key: ${{ secrets.SSH_PRIVATE_KEY }}
|
||||
|
||||
- name: create droplet
|
||||
run: doctl compute droplet create --ssh-keys='b4:4c:66:7d:be:19:25:43:1c:e0:02:61:9f:49:12:94' --tag-name=github-actions ga-out-of-tree-images-debian-$GITHUB_SHA --size s-1vcpu-1gb --image ubuntu-22-04-x64 --wait
|
||||
run: doctl compute droplet create --ssh-keys='b4:4c:66:7d:be:19:25:43:1c:e0:02:61:9f:49:12:94,37:46:77:a8:4a:96:3b:20:16:46:35:04:95:ca:0c:5c' --tag-name=github-actions ga-out-of-tree-images-debian-$GITHUB_SHA --size s-1vcpu-1gb --image ubuntu-22-04-x64 --wait
|
||||
|
||||
# TODO Move to common script
|
||||
- name: generate images
|
||||
@ -40,8 +40,7 @@ jobs:
|
||||
sleep 1s
|
||||
done
|
||||
|
||||
sleep 5m
|
||||
ssh root@$IP pkill apt-get || true
|
||||
ssh root@$IP "cloud-init status --wait | grep done"
|
||||
|
||||
ssh root@$IP apt-get update
|
||||
ssh root@$IP apt-get install -y git podman s3cmd
|
||||
@ -57,7 +56,7 @@ jobs:
|
||||
|
||||
ssh root@$IP "echo 'ExecStart=/root/out-of-tree/tools/qemu-debian-img/generate-images.sh' >> /etc/systemd/system/images-debian.service"
|
||||
|
||||
ssh root@$IP 'echo ExecStart=/bin/sh -c \"s3cmd put --acl-public /root/out-of-tree/tools/qemu-debian-img/*.tar.gz s3://out-of-tree/1.0.0/ --host=\$HOST --host-bucket=\$HOST_BUCKET --access_key=\$ACCESS_KEY --secret_key=\$SECRET_KEY\" >> /etc/systemd/system/images-debian.service'
|
||||
ssh root@$IP 'echo ExecStart=/bin/sh -c \"s3cmd put --acl-public /root/out-of-tree/tools/qemu-debian-img/*.tar.gz s3://out-of-tree/3.0.0/ --host=\$HOST --host-bucket=\$HOST_BUCKET --access_key=\$ACCESS_KEY --secret_key=\$SECRET_KEY\" >> /etc/systemd/system/images-debian.service'
|
||||
|
||||
ssh root@$IP "echo TimeoutStopSec=1 >> /etc/systemd/system/images-debian.service"
|
||||
|
||||
|
16
.github/workflows/images-oraclelinux.yml
vendored
16
.github/workflows/images-oraclelinux.yml
vendored
@ -1,4 +1,4 @@
|
||||
name: Oracle Linux
|
||||
name: Oracle Linux images
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@ -25,7 +25,7 @@ jobs:
|
||||
ssh-private-key: ${{ secrets.SSH_PRIVATE_KEY }}
|
||||
|
||||
- name: create droplet
|
||||
run: doctl compute droplet create --ssh-keys='b4:4c:66:7d:be:19:25:43:1c:e0:02:61:9f:49:12:94' --tag-name=github-actions ga-out-of-tree-images-oraclelinux-$GITHUB_SHA --size s-1vcpu-1gb --image ubuntu-22-04-x64 --wait
|
||||
run: doctl compute droplet create --ssh-keys='b4:4c:66:7d:be:19:25:43:1c:e0:02:61:9f:49:12:94,37:46:77:a8:4a:96:3b:20:16:46:35:04:95:ca:0c:5c' --tag-name=github-actions ga-out-of-tree-images-oraclelinux-$GITHUB_SHA --size s-1vcpu-2gb --image ubuntu-22-04-x64 --wait
|
||||
|
||||
# TODO Move to common script
|
||||
- name: generate images
|
||||
@ -40,8 +40,7 @@ jobs:
|
||||
sleep 1s
|
||||
done
|
||||
|
||||
sleep 5m
|
||||
ssh root@$IP pkill apt-get || true
|
||||
ssh root@$IP "cloud-init status --wait | grep done"
|
||||
|
||||
ssh root@$IP apt-get update
|
||||
ssh root@$IP apt-get install -y git podman s3cmd
|
||||
@ -57,7 +56,7 @@ jobs:
|
||||
|
||||
ssh root@$IP "echo 'ExecStart=/root/out-of-tree/tools/qemu-oraclelinux-img/generate-images.sh' >> /etc/systemd/system/images-oraclelinux.service"
|
||||
|
||||
ssh root@$IP 'echo ExecStart=/bin/sh -c \"s3cmd put --acl-public /root/out-of-tree/tools/qemu-oraclelinux-img/*.tar.gz s3://out-of-tree/1.0.0/ --host=\$HOST --host-bucket=\$HOST_BUCKET --access_key=\$ACCESS_KEY --secret_key=\$SECRET_KEY\" >> /etc/systemd/system/images-oraclelinux.service'
|
||||
ssh root@$IP 'echo ExecStart=/bin/sh -c \"s3cmd put --acl-public /root/out-of-tree/tools/qemu-oraclelinux-img/*.tar.gz s3://out-of-tree/3.0.0/ --host=\$HOST --host-bucket=\$HOST_BUCKET --access_key=\$ACCESS_KEY --secret_key=\$SECRET_KEY\" >> /etc/systemd/system/images-oraclelinux.service'
|
||||
|
||||
ssh root@$IP "echo TimeoutStopSec=1 >> /etc/systemd/system/images-oraclelinux.service"
|
||||
|
||||
@ -74,6 +73,13 @@ jobs:
|
||||
|
||||
ssh root@$IP systemctl is-active images-oraclelinux
|
||||
|
||||
- name: Archive logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: images-oraclelinux-log
|
||||
path: images-oraclelinux.log
|
||||
|
||||
- name: delete droplet
|
||||
if: always()
|
||||
run: doctl compute droplet delete -f ga-out-of-tree-images-oraclelinux-$GITHUB_SHA
|
||||
|
89
.github/workflows/images-ubuntu.yml
vendored
Normal file
89
.github/workflows/images-ubuntu.yml
vendored
Normal file
@ -0,0 +1,89 @@
|
||||
name: Ubuntu images
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
paths:
|
||||
- 'tools/qemu-ubuntu-img/**'
|
||||
- '.github/workflows/images-ubuntu.yml'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
images:
|
||||
name: Qemu Images
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: digitalocean/action-doctl@v2
|
||||
with:
|
||||
token: ${{ secrets.DIGITALOCEAN_ACCESS_TOKEN }}
|
||||
|
||||
- uses: webfactory/ssh-agent@v0.8.0
|
||||
with:
|
||||
ssh-private-key: ${{ secrets.SSH_PRIVATE_KEY }}
|
||||
|
||||
- name: create droplet
|
||||
run: doctl compute droplet create --ssh-keys='b4:4c:66:7d:be:19:25:43:1c:e0:02:61:9f:49:12:94,37:46:77:a8:4a:96:3b:20:16:46:35:04:95:ca:0c:5c' --tag-name=github-actions ga-out-of-tree-images-ubuntu-$GITHUB_SHA --size s-1vcpu-1gb --image ubuntu-22-04-x64 --wait
|
||||
|
||||
# TODO Move to common script
|
||||
- name: generate images
|
||||
shell: bash
|
||||
run: |
|
||||
set -x
|
||||
|
||||
sleep 1m
|
||||
|
||||
IP=$(doctl compute droplet list --tag-name=github-actions --format "Name,Public IPv4" | grep -v ID | grep ga-out-of-tree-images-ubuntu-$GITHUB_SHA | awk '{print $2}')
|
||||
|
||||
while ! ssh -o StrictHostKeyChecking=accept-new root@$IP echo
|
||||
do
|
||||
sleep 1s
|
||||
done
|
||||
|
||||
ssh root@$IP "cloud-init status --wait | grep done"
|
||||
|
||||
ssh root@$IP apt-get update
|
||||
ssh root@$IP apt-get install -y git podman s3cmd
|
||||
ssh root@$IP git clone https://github.com/out-of-tree/out-of-tree
|
||||
ssh root@$IP "cd out-of-tree && git checkout $GITHUB_SHA"
|
||||
|
||||
ssh root@$IP "echo -e '[Unit]\nDescription=Ubuntu image generator and uploader\n[Service]\nRemainAfterExit=yes\nStandardError=append:/var/log/images-ubuntu.log\nStandardOutput=append:/var/log/images-ubuntu.log\nType=oneshot' >> /etc/systemd/system/images-ubuntu.service"
|
||||
|
||||
ssh root@$IP "echo Environment=HOST=fra1.digitaloceanspaces.com >> /etc/systemd/system/images-ubuntu.service"
|
||||
ssh root@$IP "echo Environment=HOST_BUCKET=out-of-tree.fra1.digitaloceanspaces.com >> /etc/systemd/system/images-ubuntu.service"
|
||||
ssh root@$IP "echo Environment=ACCESS_KEY=${{ secrets.DIGITALOCEAN_SPACES_ACCESS_KEY }} >> /etc/systemd/system/images-ubuntu.service"
|
||||
ssh root@$IP "echo Environment=SECRET_KEY=${{ secrets.DIGITALOCEAN_SPACES_SECRET_KEY }} >> /etc/systemd/system/images-ubuntu.service"
|
||||
|
||||
ssh root@$IP "echo 'ExecStart=/root/out-of-tree/tools/qemu-ubuntu-img/generate-images.py' >> /etc/systemd/system/images-ubuntu.service"
|
||||
|
||||
ssh root@$IP 'echo ExecStart=/bin/sh -c \"s3cmd put --acl-public /root/out-of-tree/tools/qemu-ubuntu-img/*.tar.gz s3://out-of-tree/3.0.0/ --host=\$HOST --host-bucket=\$HOST_BUCKET --access_key=\$ACCESS_KEY --secret_key=\$SECRET_KEY\" >> /etc/systemd/system/images-ubuntu.service'
|
||||
|
||||
ssh root@$IP "echo TimeoutStopSec=1 >> /etc/systemd/system/images-ubuntu.service"
|
||||
|
||||
ssh root@$IP systemctl daemon-reload
|
||||
|
||||
ssh root@$IP systemctl start images-ubuntu --no-block
|
||||
|
||||
while ! ssh root@$IP systemctl show images-ubuntu -p SubState --value | grep -E '(failed|exited)'
|
||||
do
|
||||
sleep 1m
|
||||
done
|
||||
|
||||
scp root@$IP:/var/log/images-ubuntu.log .
|
||||
|
||||
cat images-ubuntu.log
|
||||
|
||||
ssh root@$IP systemctl is-active images-ubuntu
|
||||
|
||||
- name: Archive logs
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: images-ubuntu-log
|
||||
path: images-ubuntu.log
|
||||
|
||||
- name: delete droplet
|
||||
if: always()
|
||||
run: doctl compute droplet delete -f ga-out-of-tree-images-ubuntu-$GITHUB_SHA
|
2
.github/workflows/macos.yml
vendored
2
.github/workflows/macos.yml
vendored
@ -18,7 +18,7 @@ concurrency:
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: macOS-latest
|
||||
runs-on: macOS-12
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
|
||||
|
5
.github/workflows/ubuntu.yml
vendored
5
.github/workflows/ubuntu.yml
vendored
@ -2,6 +2,8 @@ name: Ubuntu
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 0 * * 0'
|
||||
push:
|
||||
paths-ignore:
|
||||
- ".github/workflows/images-*"
|
||||
@ -113,6 +115,7 @@ jobs:
|
||||
{ distro: Ubuntu, release: 18.04 },
|
||||
{ distro: Ubuntu, release: 20.04 },
|
||||
{ distro: Ubuntu, release: 22.04 },
|
||||
{ distro: Ubuntu, release: 24.04 },
|
||||
{ distro: CentOS, release: 6 },
|
||||
{ distro: CentOS, release: 7 },
|
||||
{ distro: CentOS, release: 8 },
|
||||
@ -200,7 +203,7 @@ jobs:
|
||||
|
||||
cp ../examples/kernel-module/{module.c,Makefile,test.sh} .
|
||||
|
||||
../out-of-tree --log-level=debug kernel list-remote --distro=${{ matrix.os.distro }} --ver=${{ matrix.os.release }}
|
||||
../out-of-tree --log-level=debug kernel list-remote --distro-id=${{ matrix.os.distro }} --distro-release=${{ matrix.os.release }}
|
||||
../out-of-tree --log-level=debug kernel autogen --max=1 --shuffle
|
||||
../out-of-tree --log-level=debug pew --qemu-timeout=20m --include-internal-errors
|
||||
|
||||
|
10
README.md
10
README.md
@ -8,8 +8,6 @@
|
||||
|
||||
*out-of-tree* was created to reduce the complexity of the environment for developing, testing and debugging Linux kernel exploits and out-of-tree kernel modules (hence the name "out-of-tree").
|
||||
|
||||

|
||||
|
||||
## Installation
|
||||
|
||||
### GNU/Linux (with [Nix](https://nixos.org/nix/))
|
||||
@ -42,9 +40,9 @@ Read [documentation](https://out-of-tree.readthedocs.io) for further info.
|
||||
|
||||
## Examples
|
||||
|
||||
Generate all Ubuntu 22.04 kernels:
|
||||
Download all Ubuntu 24.04 kernels:
|
||||
|
||||
$ out-of-tree kernel genall --distro=Ubuntu --ver=22.04
|
||||
$ out-of-tree kernel genall --distro-id=Ubuntu --distro-release=24.04
|
||||
|
||||
Run tests based on .out-of-tree.toml definitions:
|
||||
|
||||
@ -52,8 +50,8 @@ Run tests based on .out-of-tree.toml definitions:
|
||||
|
||||
Test with a specific kernel:
|
||||
|
||||
$ out-of-tree pew --kernel='Ubuntu:5.4.0-29-generic'
|
||||
$ out-of-tree pew --realtime-output --distro-id=ubuntu --kernel-regex=6.8.0-41-generic
|
||||
|
||||
Run debug environment:
|
||||
|
||||
$ out-of-tree debug --kernel='Ubuntu:5.4.0-29-generic'
|
||||
$ out-of-tree debug --distro-id=ubuntu --distro-release=24.04 --kernel-regex=6.8.0-41-generic
|
||||
|
@ -56,6 +56,8 @@ type Job struct {
|
||||
RepoName string
|
||||
Commit string
|
||||
|
||||
Description string
|
||||
|
||||
Artifact artifact.Artifact
|
||||
Target distro.KernelInfo
|
||||
|
||||
|
@ -12,7 +12,6 @@ import (
|
||||
|
||||
"github.com/naoina/toml"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
@ -132,11 +131,15 @@ type Patch struct {
|
||||
|
||||
// Artifact is for .out-of-tree.toml
|
||||
type Artifact struct {
|
||||
Name string
|
||||
Type ArtifactType
|
||||
TestFiles []FileTransfer
|
||||
SourcePath string
|
||||
Targets []Target
|
||||
Name string
|
||||
Type ArtifactType
|
||||
|
||||
SourcePath string
|
||||
SourceFiles []string
|
||||
|
||||
TestFiles []FileTransfer
|
||||
|
||||
Targets []Target
|
||||
|
||||
Script string
|
||||
|
||||
@ -237,8 +240,9 @@ func (ka Artifact) Supported(ki distro.KernelInfo) (supported bool, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// TODO too many parameters
|
||||
func (ka Artifact) Process(slog zerolog.Logger, ki distro.KernelInfo,
|
||||
endless bool, cBinary,
|
||||
outputOnSuccess, realtimeOutput, endless bool, cBinary,
|
||||
cEndlessStress string, cEndlessTimeout time.Duration,
|
||||
dump func(q *qemu.System, ka Artifact, ki distro.KernelInfo,
|
||||
result *Result)) {
|
||||
@ -330,12 +334,22 @@ func (ka Artifact) Process(slog zerolog.Logger, ki distro.KernelInfo,
|
||||
// TODO: build should return structure
|
||||
start := time.Now()
|
||||
result.BuildDir, result.BuildArtifact, result.Build.Output, err =
|
||||
Build(slog, tmp, ka, ki, ka.Docker.Timeout.Duration)
|
||||
Build(slog, tmp, ka, ki, ka.Docker.Timeout.Duration, realtimeOutput)
|
||||
slog.Debug().Str("duration", time.Since(start).String()).
|
||||
Msg("build done")
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("build")
|
||||
if !realtimeOutput {
|
||||
slog.Error().Err(err).Msgf("build failure\n%v\n", result.Build.Output)
|
||||
} else {
|
||||
slog.Error().Err(err).Msg("build failure")
|
||||
}
|
||||
return
|
||||
} else {
|
||||
if outputOnSuccess && !realtimeOutput {
|
||||
slog.Info().Msgf("build success\n%v\n", result.Build.Output)
|
||||
} else {
|
||||
slog.Info().Msg("build success")
|
||||
}
|
||||
}
|
||||
result.Build.Ok = true
|
||||
} else {
|
||||
@ -357,6 +371,8 @@ func (ka Artifact) Process(slog zerolog.Logger, ki distro.KernelInfo,
|
||||
ka.Qemu.Timeout.Duration = time.Minute
|
||||
}
|
||||
|
||||
slog.Info().Msg("wait for vm initialisation")
|
||||
|
||||
err = q.WaitForSSH(ka.Qemu.Timeout.Duration)
|
||||
if err != nil {
|
||||
result.InternalError = err
|
||||
@ -393,11 +409,40 @@ func (ka Artifact) Process(slog zerolog.Logger, ki distro.KernelInfo,
|
||||
return
|
||||
}
|
||||
|
||||
var qemuTestOutput string
|
||||
q.SetQemuOutputHandler(func(s string) {
|
||||
if realtimeOutput {
|
||||
fmt.Printf("kmsg: %s\n", s)
|
||||
} else {
|
||||
qemuTestOutput += s + "\n"
|
||||
}
|
||||
})
|
||||
|
||||
if realtimeOutput {
|
||||
q.SetCommandsOutputHandler(func(s string) {
|
||||
fmt.Printf("test: %s\n", s)
|
||||
})
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
copyArtifactAndTest(slog, q, ka, &result, remoteTest)
|
||||
slog.Info().Msg("copy artifact and run test")
|
||||
copyArtifactAndTest(slog, q, ka, &result, remoteTest, outputOnSuccess, realtimeOutput)
|
||||
slog.Debug().Str("duration", time.Since(start).String()).
|
||||
Msgf("test completed (success: %v)", result.Test.Ok)
|
||||
|
||||
if result.Build.Ok && !realtimeOutput {
|
||||
if !result.Run.Ok || !result.Test.Ok {
|
||||
slog.Error().Msgf("qemu output\n%v\n", qemuTestOutput)
|
||||
} else if outputOnSuccess {
|
||||
slog.Info().Msgf("qemu output\n%v\n", qemuTestOutput)
|
||||
}
|
||||
}
|
||||
|
||||
if realtimeOutput {
|
||||
q.CloseCommandsOutputHandler()
|
||||
}
|
||||
q.CloseQemuOutputHandler()
|
||||
|
||||
if !endless {
|
||||
return
|
||||
}
|
||||
|
@ -37,7 +37,7 @@ func preload(q *qemu.System, ki distro.KernelInfo, pm PreloadModule,
|
||||
|
||||
var workPath, cache string
|
||||
if pm.Path != "" {
|
||||
log.Print("Use non-git path for preload module (no cache)")
|
||||
log.Debug().Msg("Use non-git path for preload module (no cache)")
|
||||
workPath = pm.Path
|
||||
} else if pm.Repo != "" {
|
||||
workPath, cache, err = cloneOrPull(pm.Repo, ki)
|
||||
@ -85,7 +85,7 @@ func buildAndInsmod(workPath string, q *qemu.System, ki distro.KernelInfo,
|
||||
|
||||
output, err := q.CopyAndInsmod(af)
|
||||
if err != nil {
|
||||
log.Print(output)
|
||||
log.Error().Err(err).Msg(output)
|
||||
return
|
||||
}
|
||||
return
|
||||
@ -111,7 +111,7 @@ func buildPreload(workPath, tmp string, ki distro.KernelInfo,
|
||||
dockerTimeout = ka.Docker.Timeout.Duration
|
||||
}
|
||||
|
||||
_, af, _, err = Build(log.Logger, tmp, ka, ki, dockerTimeout)
|
||||
_, af, _, err = Build(log.Logger, tmp, ka, ki, dockerTimeout, false)
|
||||
return
|
||||
}
|
||||
|
||||
@ -147,7 +147,7 @@ func cloneOrPull(repo string, ki distro.KernelInfo) (workPath, cache string,
|
||||
|
||||
err = w.Pull(&git.PullOptions{})
|
||||
if err != nil && err != git.NoErrAlreadyUpToDate {
|
||||
log.Print(repo, "pull error:", err)
|
||||
log.Error().Err(err).Msgf("pull %s error", repo)
|
||||
}
|
||||
} else {
|
||||
r, err = git.PlainClone(workPath, false, &git.CloneOptions{URL: repo})
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -102,7 +103,7 @@ func applyPatches(src string, ka Artifact) (err error) {
|
||||
}
|
||||
|
||||
func Build(flog zerolog.Logger, tmp string, ka Artifact,
|
||||
ki distro.KernelInfo, dockerTimeout time.Duration) (
|
||||
ki distro.KernelInfo, dockerTimeout time.Duration, realtimeOutput bool) (
|
||||
outdir, outpath, output string, err error) {
|
||||
|
||||
target := strings.Replace(ka.Name, " ", "_", -1)
|
||||
@ -112,7 +113,11 @@ func Build(flog zerolog.Logger, tmp string, ka Artifact,
|
||||
|
||||
outdir = tmp + "/source"
|
||||
|
||||
err = copy.Copy(ka.SourcePath, outdir)
|
||||
if len(ka.SourceFiles) == 0 {
|
||||
err = copy.Copy(ka.SourcePath, outdir)
|
||||
} else {
|
||||
err = CopyFiles(ka.SourcePath, ka.SourceFiles, outdir)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -150,9 +155,21 @@ func Build(flog zerolog.Logger, tmp string, ka Artifact,
|
||||
log.Fatal().Err(err).Msg("container creation failure")
|
||||
}
|
||||
|
||||
c.Args = append(c.Args, "--network", "none")
|
||||
|
||||
if realtimeOutput {
|
||||
c.SetCommandsOutputHandler(func(s string) {
|
||||
fmt.Printf("%s\n", s)
|
||||
})
|
||||
}
|
||||
|
||||
output, err = c.Run(outdir, []string{
|
||||
buildCommand + " && chmod -R 777 /work",
|
||||
})
|
||||
|
||||
if realtimeOutput {
|
||||
c.CloseCommandsOutputHandler()
|
||||
}
|
||||
} else {
|
||||
cmd := exec.Command("bash", "-c", "cd "+outdir+" && "+
|
||||
buildCommand)
|
||||
@ -226,6 +243,35 @@ type Result struct {
|
||||
InternalErrorString string
|
||||
}
|
||||
|
||||
func CopyFiles(path string, files []string, dest string) (err error) {
|
||||
err = os.MkdirAll(dest, os.ModePerm)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, sf := range files {
|
||||
if sf[0] == '/' {
|
||||
err = CopyFile(sf, filepath.Join(dest, filepath.Base(sf)))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
err = os.MkdirAll(filepath.Join(dest, filepath.Dir(sf)), os.ModePerm)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = CopyFile(filepath.Join(path, sf), filepath.Join(dest, sf))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func CopyFile(sourcePath, destinationPath string) (err error) {
|
||||
sourceFile, err := os.Open(sourcePath)
|
||||
if err != nil {
|
||||
@ -245,7 +291,7 @@ func CopyFile(sourcePath, destinationPath string) (err error) {
|
||||
}
|
||||
|
||||
func copyArtifactAndTest(slog zerolog.Logger, q *qemu.System, ka Artifact,
|
||||
res *Result, remoteTest string) (err error) {
|
||||
res *Result, remoteTest string, outputOnSuccess, realtimeOutput bool) (err error) {
|
||||
|
||||
// Copy all test files to the remote machine
|
||||
for _, f := range ka.TestFiles {
|
||||
@ -277,8 +323,7 @@ func copyArtifactAndTest(slog zerolog.Logger, q *qemu.System, ka Artifact,
|
||||
|
||||
res.Test.Output, err = testKernelModule(q, ka, remoteTest)
|
||||
if err != nil {
|
||||
slog.Error().Err(err).Msg(res.Test.Output)
|
||||
return
|
||||
break
|
||||
}
|
||||
res.Test.Ok = true
|
||||
case KernelExploit:
|
||||
@ -291,24 +336,36 @@ func copyArtifactAndTest(slog zerolog.Logger, q *qemu.System, ka Artifact,
|
||||
res.Test.Output, err = testKernelExploit(q, ka, remoteTest,
|
||||
remoteExploit)
|
||||
if err != nil {
|
||||
slog.Error().Err(err).Msg(res.Test.Output)
|
||||
return
|
||||
break
|
||||
}
|
||||
res.Run.Ok = true // does not really used
|
||||
res.Test.Ok = true
|
||||
case Script:
|
||||
res.Test.Output, err = runScript(q, remoteTest)
|
||||
if err != nil {
|
||||
slog.Error().Err(err).Msg(res.Test.Output)
|
||||
return
|
||||
break
|
||||
}
|
||||
slog.Info().Msgf("\n%v\n", res.Test.Output)
|
||||
res.Run.Ok = true
|
||||
res.Test.Ok = true
|
||||
default:
|
||||
slog.Fatal().Msg("Unsupported artifact type")
|
||||
}
|
||||
|
||||
if err != nil || !res.Test.Ok {
|
||||
if !realtimeOutput {
|
||||
slog.Error().Err(err).Msgf("test failure\n%v\n", res.Test.Output)
|
||||
} else {
|
||||
slog.Error().Err(err).Msg("test failure")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if outputOnSuccess && !realtimeOutput {
|
||||
slog.Info().Msgf("test success\n%v\n", res.Test.Output)
|
||||
} else {
|
||||
slog.Info().Msg("test success")
|
||||
}
|
||||
|
||||
_, err = q.Command("root", "echo")
|
||||
if err != nil {
|
||||
slog.Error().Err(err).Msg("after-test ssh reconnect")
|
||||
|
@ -198,9 +198,11 @@ func (c Client) PushRepo(repo api.Repo) (err error) {
|
||||
remote := fmt.Sprintf("git://%s/%s", addr, repo.Name)
|
||||
log.Debug().Msgf("git proxy remote: %v", remote)
|
||||
|
||||
raw, err := exec.Command("git", "--work-tree", repo.Path, "push", "--force", remote).
|
||||
raw, err := exec.Command("git", "-c", "push.default=current",
|
||||
"--work-tree", repo.Path, "push", "--force", remote).
|
||||
CombinedOutput()
|
||||
if err != nil {
|
||||
log.Error().Msgf("push repo %v\n%v", repo, string(raw))
|
||||
return
|
||||
}
|
||||
|
||||
|
106
cmd/container.go
106
cmd/container.go
@ -1,39 +1,74 @@
|
||||
// Copyright 2023 Mikhail Klementev. All rights reserved.
|
||||
// Copyright 2024 Mikhail Klementev. All rights reserved.
|
||||
// Use of this source code is governed by a AGPLv3 license
|
||||
// (or later) that can be found in the LICENSE file.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/container"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
)
|
||||
|
||||
type ContainerCmd struct {
|
||||
Filter string `help:"filter by name"`
|
||||
DistroID string `help:"filter by distribution"`
|
||||
DistroRelease string `help:"filter by distribution release"`
|
||||
|
||||
List ContainerListCmd `cmd:"" help:"list containers"`
|
||||
Update ContainerUpdateCmd `cmd:"" help:"update containers"`
|
||||
Save ContainerSaveCmd `cmd:"" help:"save containers"`
|
||||
Cleanup ContainerCleanupCmd `cmd:"" help:"cleanup containers"`
|
||||
|
||||
RealtimeOutput RealtimeContainerOutputFlag `help:"show realtime output"`
|
||||
}
|
||||
|
||||
func (cmd ContainerCmd) Containers() (names []string) {
|
||||
type RealtimeContainerOutputFlag bool
|
||||
|
||||
func (f RealtimeContainerOutputFlag) AfterApply() (err error) {
|
||||
container.Stdout = bool(f)
|
||||
return
|
||||
}
|
||||
|
||||
func (cmd ContainerCmd) Containers() (diis []container.Image, err error) {
|
||||
images, err := container.Images()
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
return
|
||||
}
|
||||
|
||||
var dt distro.Distro
|
||||
if cmd.DistroID != "" {
|
||||
dt.ID, err = distro.NewID(cmd.DistroID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if cmd.DistroRelease != "" {
|
||||
dt.Release = cmd.DistroRelease
|
||||
}
|
||||
} else if cmd.DistroRelease != "" {
|
||||
err = errors.New("--distro-release has no use on its own")
|
||||
return
|
||||
}
|
||||
|
||||
for _, img := range images {
|
||||
if cmd.Filter != "" && !strings.Contains(img.Name, cmd.Filter) {
|
||||
if dt.ID != distro.None && dt.ID != img.Distro.ID {
|
||||
log.Debug().Msgf("skip %s", img.Name)
|
||||
continue
|
||||
}
|
||||
names = append(names, img.Name)
|
||||
|
||||
if dt.Release != "" && dt.Release != img.Distro.Release {
|
||||
log.Debug().Msgf("skip %s", img.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debug().Msgf("append %s", img.Name)
|
||||
diis = append(diis, img)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -41,9 +76,40 @@ func (cmd ContainerCmd) Containers() (names []string) {
|
||||
type ContainerListCmd struct{}
|
||||
|
||||
func (cmd ContainerListCmd) Run(containerCmd *ContainerCmd) (err error) {
|
||||
for _, name := range containerCmd.Containers() {
|
||||
fmt.Println(name)
|
||||
images, err := containerCmd.Containers()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, img := range images {
|
||||
fmt.Printf("%s\n", img.Distro.String())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type ContainerUpdateCmd struct{}
|
||||
|
||||
func (cmd ContainerUpdateCmd) Run(g *Globals, containerCmd *ContainerCmd) (err error) {
|
||||
images, err := containerCmd.Containers()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
container.UseCache = false
|
||||
container.UsePrebuilt = false
|
||||
|
||||
// TODO move from all commands to main command line handler
|
||||
container.Commands = g.Config.Docker.Commands
|
||||
container.Registry = g.Config.Docker.Registry
|
||||
container.Timeout = g.Config.Docker.Timeout.Duration
|
||||
|
||||
for _, img := range images {
|
||||
_, err = img.Distro.Packages()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@ -52,13 +118,18 @@ type ContainerSaveCmd struct {
|
||||
}
|
||||
|
||||
func (cmd ContainerSaveCmd) Run(containerCmd *ContainerCmd) (err error) {
|
||||
for _, name := range containerCmd.Containers() {
|
||||
nlog := log.With().Str("name", name).Logger()
|
||||
images, err := containerCmd.Containers()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
output := filepath.Join(cmd.OutDir, name+".tar")
|
||||
for _, img := range images {
|
||||
nlog := log.With().Str("name", img.Name).Logger()
|
||||
|
||||
output := filepath.Join(cmd.OutDir, img.Name+".tar")
|
||||
nlog.Info().Msgf("saving to %v", output)
|
||||
|
||||
err = container.Save(name, output)
|
||||
err = container.Save(img.Name, output)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -81,9 +152,14 @@ func (cmd ContainerSaveCmd) Run(containerCmd *ContainerCmd) (err error) {
|
||||
type ContainerCleanupCmd struct{}
|
||||
|
||||
func (cmd ContainerCleanupCmd) Run(containerCmd *ContainerCmd) (err error) {
|
||||
images, err := containerCmd.Containers()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var output []byte
|
||||
for _, name := range containerCmd.Containers() {
|
||||
output, err = exec.Command(container.Runtime, "image", "rm", name).
|
||||
for _, img := range images {
|
||||
output, err = exec.Command(container.Runtime, "image", "rm", img.Name).
|
||||
CombinedOutput()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("output", string(output)).Msg("")
|
||||
|
44
cmd/debug.go
44
cmd/debug.go
@ -1,4 +1,4 @@
|
||||
// Copyright 2018 Mikhail Klementev. All rights reserved.
|
||||
// Copyright 2024 Mikhail Klementev. All rights reserved.
|
||||
// Use of this source code is governed by a AGPLv3 license
|
||||
// (or later) that can be found in the LICENSE file.
|
||||
|
||||
@ -22,8 +22,11 @@ import (
|
||||
)
|
||||
|
||||
type DebugCmd struct {
|
||||
Kernel string `help:"regexp (first match)" required:""`
|
||||
Gdb string `help:"gdb listen address" default:"tcp::1234"`
|
||||
KernelRegex string `required:"" help:"set kernel regex"`
|
||||
DistroID string `required:"" help:"set distribution"`
|
||||
DistroRelease string `required:"" help:"set distribution release"`
|
||||
|
||||
Gdb string `help:"gdb listen address" default:"tcp::1234"`
|
||||
|
||||
SshAddr string `help:"ssh address to listen" default:"127.0.0.1"`
|
||||
SshPort int `help:"ssh port to listen" default:"50022"`
|
||||
@ -45,7 +48,7 @@ type DebugCmd struct {
|
||||
func (cmd *DebugCmd) Run(g *Globals) (err error) {
|
||||
kcfg, err := config.ReadKernelConfig(g.Config.Kernels)
|
||||
if err != nil {
|
||||
log.Print(err)
|
||||
log.Error().Err(err).Msg("read kernel config")
|
||||
}
|
||||
|
||||
var configPath string
|
||||
@ -63,7 +66,17 @@ func (cmd *DebugCmd) Run(g *Globals) (err error) {
|
||||
ka.SourcePath = g.WorkDir
|
||||
}
|
||||
|
||||
ki, err := firstSupported(kcfg, ka, cmd.Kernel)
|
||||
var km artifact.Target
|
||||
km.Distro.ID, err = distro.NewID(cmd.DistroID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
km.Distro.Release = cmd.DistroRelease
|
||||
km.Kernel.Regex = cmd.KernelRegex
|
||||
|
||||
ka.Targets = []artifact.Target{km}
|
||||
|
||||
ki, err := firstSupported(kcfg, ka)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -161,14 +174,14 @@ func (cmd *DebugCmd) Run(g *Globals) (err error) {
|
||||
// Module depends on one of the standard modules
|
||||
err = artifact.CopyStandardModules(q, ki)
|
||||
if err != nil {
|
||||
log.Print(err)
|
||||
log.Error().Err(err).Msg("copy standard modules")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
err = artifact.PreloadModules(q, ka, ki, g.Config.Docker.Timeout.Duration)
|
||||
if err != nil {
|
||||
log.Print(err)
|
||||
log.Error().Err(err).Msg("preload modules")
|
||||
return
|
||||
}
|
||||
|
||||
@ -180,9 +193,10 @@ func (cmd *DebugCmd) Run(g *Globals) (err error) {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
buildDir, outFile, output, err = artifact.Build(log.Logger, tmp, ka, ki, g.Config.Docker.Timeout.Duration)
|
||||
buildDir, outFile, output, err = artifact.Build(
|
||||
log.Logger, tmp, ka, ki, g.Config.Docker.Timeout.Duration, false)
|
||||
if err != nil {
|
||||
log.Print(err, output)
|
||||
log.Error().Err(err).Msg(output)
|
||||
return
|
||||
}
|
||||
|
||||
@ -206,7 +220,7 @@ func (cmd *DebugCmd) Run(g *Globals) (err error) {
|
||||
}
|
||||
err = q.CopyFile(f.User, f.Local, f.Remote)
|
||||
if err != nil {
|
||||
log.Print("error copy err:", err, f.Local, f.Remote)
|
||||
log.Error().Err(err).Msgf("copy %s -> %s", f.Local, f.Remote)
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -223,15 +237,7 @@ func (cmd *DebugCmd) Run(g *Globals) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func firstSupported(kcfg config.KernelConfig, ka artifact.Artifact,
|
||||
kernel string) (ki distro.KernelInfo, err error) {
|
||||
|
||||
km, err := kernelMask(kernel)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
ka.Targets = []artifact.Target{km}
|
||||
func firstSupported(kcfg config.KernelConfig, ka artifact.Artifact) (ki distro.KernelInfo, err error) {
|
||||
|
||||
for _, ki = range kcfg.Kernels {
|
||||
var supported bool
|
||||
|
104
cmd/kernel.go
104
cmd/kernel.go
@ -11,37 +11,36 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cavaliergopher/grab/v3"
|
||||
"github.com/naoina/toml"
|
||||
"github.com/remeh/sizedwaitgroup"
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/artifact"
|
||||
"code.dumpstack.io/tools/out-of-tree/cache"
|
||||
"code.dumpstack.io/tools/out-of-tree/config"
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
"code.dumpstack.io/tools/out-of-tree/container"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
"code.dumpstack.io/tools/out-of-tree/fs"
|
||||
"code.dumpstack.io/tools/out-of-tree/kernel"
|
||||
)
|
||||
|
||||
type KernelCmd struct {
|
||||
NoDownload bool `help:"do not download qemu image while kernel generation"`
|
||||
UseHost bool `help:"also use host kernels"`
|
||||
Force bool `help:"force reinstall kernel"`
|
||||
NoHeaders bool `help:"do not install kernel headers"`
|
||||
Shuffle bool `help:"randomize kernels installation order"`
|
||||
Retries int `help:"amount of tries for each kernel" default:"2"`
|
||||
Threads int `help:"threads for parallel installation" default:"1"`
|
||||
Update bool `help:"update container"`
|
||||
ContainerCache bool `help:"try prebuilt container images first" default:"true" negatable:""`
|
||||
Max int `help:"maximum kernels to download" default:"100500"`
|
||||
NoPrune bool `help:"do not remove dangling or unused images from local storage after build"`
|
||||
NoCfgRegen bool `help:"do not update kernels.toml"`
|
||||
NoDownload bool `help:"do not download qemu image while kernel generation"`
|
||||
UseHost bool `help:"also use host kernels"`
|
||||
Force bool `help:"force reinstall kernel"`
|
||||
NoHeaders bool `help:"do not install kernel headers"`
|
||||
Shuffle bool `help:"randomize kernels installation order"`
|
||||
Retries int `help:"amount of tries for each kernel" default:"2"`
|
||||
Threads int `help:"threads for parallel installation" default:"1"`
|
||||
Update bool `help:"update container"`
|
||||
PrebuiltContainers bool `help:"try prebuilt container images first" default:"true" negatable:""`
|
||||
Max int `help:"maximum kernels to download" default:"100500"`
|
||||
NoPrune bool `help:"do not remove dangling or unused images from local storage after build"`
|
||||
NoCfgRegen bool `help:"do not update kernels.toml"`
|
||||
|
||||
ContainerTimeout time.Duration `help:"container timeout"`
|
||||
|
||||
RealtimeOutput RealtimeContainerOutputFlag `help:"show realtime output"`
|
||||
|
||||
List KernelListCmd `cmd:"" help:"list kernels"`
|
||||
ListRemote KernelListRemoteCmd `cmd:"" help:"list remote kernels"`
|
||||
Autogen KernelAutogenCmd `cmd:"" help:"generate kernels based on the current config"`
|
||||
@ -168,33 +167,6 @@ func (cmd *KernelCmd) GenKernel(km artifact.Target, pkg string) {
|
||||
}
|
||||
}
|
||||
|
||||
func (cmd *KernelCmd) fetchContainerCache(c container.Container) {
|
||||
if !cmd.ContainerCache {
|
||||
return
|
||||
}
|
||||
if c.Exist() {
|
||||
return
|
||||
}
|
||||
|
||||
tmp, err := fs.TempDir()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
resp, err := grab.Get(tmp, cache.ContainerURL(c.Name()))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
defer os.Remove(resp.Filename)
|
||||
|
||||
err = container.Load(resp.Filename, c.Name())
|
||||
if err == nil {
|
||||
log.Info().Msgf("use prebuilt container %s", c.Name())
|
||||
}
|
||||
}
|
||||
|
||||
func (cmd *KernelCmd) Generate(g *Globals, km artifact.Target) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
@ -211,6 +183,8 @@ func (cmd *KernelCmd) Generate(g *Globals, km artifact.Target) (err error) {
|
||||
container.Prune = false
|
||||
}
|
||||
|
||||
container.UsePrebuilt = cmd.PrebuiltContainers
|
||||
|
||||
cmd.kcfg, err = config.ReadKernelConfig(g.Config.Kernels)
|
||||
if err != nil {
|
||||
log.Debug().Err(err).Msg("read kernels config")
|
||||
@ -230,13 +204,6 @@ func (cmd *KernelCmd) Generate(g *Globals, km artifact.Target) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
c, err := container.New(km.Distro)
|
||||
if err != nil || cmd.shutdown {
|
||||
return
|
||||
}
|
||||
|
||||
cmd.fetchContainerCache(c)
|
||||
|
||||
pkgs, err := kernel.MatchPackages(km)
|
||||
if err != nil || cmd.shutdown {
|
||||
return
|
||||
@ -263,7 +230,7 @@ func (cmd *KernelCmd) Generate(g *Globals, km artifact.Target) (err error) {
|
||||
}
|
||||
|
||||
if cmd.stats.success >= cmd.Max {
|
||||
log.Print("Max is reached")
|
||||
log.Info().Msg("Max is reached")
|
||||
swg.Done()
|
||||
break
|
||||
}
|
||||
@ -300,8 +267,8 @@ func (cmd *KernelListCmd) Run(g *Globals) (err error) {
|
||||
}
|
||||
|
||||
type KernelListRemoteCmd struct {
|
||||
Distro string `required:"" help:"distribution"`
|
||||
Ver string `help:"distro version"`
|
||||
DistroID string `required:"" help:"distribution"`
|
||||
DistroRelease string `help:"distro version"`
|
||||
}
|
||||
|
||||
func (cmd *KernelListRemoteCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
|
||||
@ -312,13 +279,15 @@ func (cmd *KernelListRemoteCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error
|
||||
container.Prune = false
|
||||
}
|
||||
|
||||
distroType, err := distro.NewID(cmd.Distro)
|
||||
container.UsePrebuilt = kernelCmd.PrebuiltContainers
|
||||
|
||||
distroType, err := distro.NewID(cmd.DistroID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
km := artifact.Target{
|
||||
Distro: distro.Distro{ID: distroType, Release: cmd.Ver},
|
||||
Distro: distro.Distro{ID: distroType, Release: cmd.DistroRelease},
|
||||
Kernel: artifact.Kernel{Regex: ".*"},
|
||||
}
|
||||
|
||||
@ -330,13 +299,6 @@ func (cmd *KernelListRemoteCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error
|
||||
container.Registry = g.Config.Docker.Registry
|
||||
container.Commands = g.Config.Docker.Commands
|
||||
|
||||
c, err := container.New(km.Distro)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
kernelCmd.fetchContainerCache(c)
|
||||
|
||||
pkgs, err := kernel.MatchPackages(km)
|
||||
// error check skipped on purpose
|
||||
|
||||
@ -376,12 +338,12 @@ func (cmd *KernelAutogenCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
|
||||
}
|
||||
|
||||
type KernelGenallCmd struct {
|
||||
Distro string `help:"distribution"`
|
||||
Ver string `help:"distro version"`
|
||||
DistroID string `help:"distribution"`
|
||||
DistroRelease string `help:"distro version"`
|
||||
}
|
||||
|
||||
func (cmd *KernelGenallCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
|
||||
distroType, err := distro.NewID(cmd.Distro)
|
||||
distroType, err := distro.NewID(cmd.DistroID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -397,7 +359,7 @@ func (cmd *KernelGenallCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
|
||||
continue
|
||||
}
|
||||
|
||||
if cmd.Ver != "" && dist.Release != cmd.Ver {
|
||||
if cmd.DistroRelease != "" && dist.Release != cmd.DistroRelease {
|
||||
continue
|
||||
}
|
||||
|
||||
@ -416,13 +378,13 @@ func (cmd *KernelGenallCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
|
||||
}
|
||||
|
||||
type KernelInstallCmd struct {
|
||||
Distro string `required:"" help:"distribution"`
|
||||
Ver string `required:"" help:"distro version"`
|
||||
Kernel string `required:"" help:"kernel release mask"`
|
||||
DistroID string `required:"" help:"distribution"`
|
||||
DistroRelease string `required:"" help:"distro version"`
|
||||
KernelRegex string `required:"" help:"kernel release mask"`
|
||||
}
|
||||
|
||||
func (cmd *KernelInstallCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
|
||||
distroType, err := distro.NewID(cmd.Distro)
|
||||
distroType, err := distro.NewID(cmd.DistroID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -430,8 +392,8 @@ func (cmd *KernelInstallCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
|
||||
kernel.SetSigintHandler(&kernelCmd.shutdown)
|
||||
|
||||
km := artifact.Target{
|
||||
Distro: distro.Distro{ID: distroType, Release: cmd.Ver},
|
||||
Kernel: artifact.Kernel{Regex: cmd.Kernel},
|
||||
Distro: distro.Distro{ID: distroType, Release: cmd.DistroRelease},
|
||||
Kernel: artifact.Kernel{Regex: cmd.KernelRegex},
|
||||
}
|
||||
err = kernelCmd.Generate(g, km)
|
||||
if err != nil {
|
||||
|
18
cmd/log.go
18
cmd/log.go
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Mikhail Klementev. All rights reserved.
|
||||
// Copyright 2024 Mikhail Klementev. All rights reserved.
|
||||
// Use of this source code is governed by a AGPLv3 license
|
||||
// (or later) that can be found in the LICENSE file.
|
||||
|
||||
@ -42,7 +42,7 @@ func (cmd *LogQueryCmd) Run(g *Globals) (err error) {
|
||||
|
||||
ka, kaErr := artifact.Artifact{}.Read(g.WorkDir + "/.out-of-tree.toml")
|
||||
if kaErr == nil {
|
||||
log.Print(".out-of-tree.toml found, filter by artifact name")
|
||||
log.Debug().Msg(".out-of-tree.toml found, filter by artifact name")
|
||||
les, err = getAllArtifactLogs(db, cmd.Tag, cmd.Num, ka)
|
||||
} else {
|
||||
les, err = getAllLogs(db, cmd.Tag, cmd.Num)
|
||||
@ -212,7 +212,12 @@ func center(s string, w int) string {
|
||||
}
|
||||
|
||||
func genOkFailCentered(name string, ok bool) (aurv aurora.Value) {
|
||||
name = center(name, 10)
|
||||
if ok {
|
||||
name += " OK"
|
||||
} else {
|
||||
name += " FAIL"
|
||||
}
|
||||
name = center(name, 14)
|
||||
if ok {
|
||||
aurv = aurora.BgGreen(aurora.Black(name))
|
||||
} else {
|
||||
@ -225,7 +230,7 @@ func logLogEntry(l logEntry) {
|
||||
distroInfo := fmt.Sprintf("%s-%s {%s}", l.Distro.ID,
|
||||
l.Distro.Release, l.KernelRelease)
|
||||
|
||||
artifactInfo := fmt.Sprintf("{[%s] %s}", l.Type, l.Name)
|
||||
artifactInfo := fmt.Sprintf("%s", l.Name)
|
||||
|
||||
timestamp := l.Timestamp.Format("2006-01-02 15:04")
|
||||
|
||||
@ -257,7 +262,10 @@ func logLogEntry(l logEntry) {
|
||||
additional = "(timeout)"
|
||||
}
|
||||
|
||||
colored := aurora.Sprintf("[%4d %4s] [%s] %s %-70s: %s %s",
|
||||
if len(distroInfo) > 40 {
|
||||
distroInfo = distroInfo[:40]
|
||||
}
|
||||
colored := aurora.Sprintf("[%4d %4s] [%s] %s %-40s: %s %s",
|
||||
l.ID, l.Tag, timestamp, artifactInfo, distroInfo, status,
|
||||
additional)
|
||||
|
||||
|
@ -33,7 +33,7 @@ type PackCmd struct {
|
||||
|
||||
func (cmd *PackCmd) Run(g *Globals) (err error) {
|
||||
tag := fmt.Sprintf("pack_run_%d", time.Now().Unix())
|
||||
log.Print("Tag:", tag)
|
||||
log.Info().Msgf("Tag: %s", tag)
|
||||
|
||||
files, err := os.ReadDir(g.WorkDir)
|
||||
if err != nil {
|
||||
@ -65,7 +65,7 @@ func (cmd *PackCmd) Run(g *Globals) (err error) {
|
||||
}
|
||||
}
|
||||
|
||||
log.Print(f.Name())
|
||||
log.Info().Msg(f.Name())
|
||||
|
||||
pew := PewCmd{
|
||||
Max: cmd.KernelRuns,
|
||||
|
304
cmd/pew.go
304
cmd/pew.go
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 Mikhail Klementev. All rights reserved.
|
||||
// Copyright 2024 Mikhail Klementev. All rights reserved.
|
||||
// Use of this source code is governed by a AGPLv3 license
|
||||
// (or later) that can be found in the LICENSE file.
|
||||
|
||||
@ -12,6 +12,7 @@ import (
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -64,7 +65,6 @@ func successRate(state runstate) float64 {
|
||||
type PewCmd struct {
|
||||
Max int64 `help:"test no more than X kernels" default:"100500"`
|
||||
Runs int64 `help:"runs per each kernel" default:"1"`
|
||||
Kernel string `help:"override kernel regex"`
|
||||
RootFS string `help:"override rootfs image" type:"existingfile"`
|
||||
Guess bool `help:"try all defined kernels"`
|
||||
Shuffle bool `help:"randomize kernels test order"`
|
||||
@ -75,6 +75,10 @@ type PewCmd struct {
|
||||
Tag string `help:"log tagging"`
|
||||
Timeout time.Duration `help:"timeout after tool will not spawn new tests"`
|
||||
|
||||
KernelRegex string `help:"set kernel regex"`
|
||||
DistroID string `help:"set distribution"`
|
||||
DistroRelease string `help:"set distribution release"`
|
||||
|
||||
ArtifactConfig string `help:"path to artifact config" type:"path"`
|
||||
|
||||
QemuTimeout time.Duration `help:"timeout for qemu"`
|
||||
@ -83,6 +87,12 @@ type PewCmd struct {
|
||||
|
||||
Threshold float64 `help:"reliablity threshold for exit code" default:"1.00"`
|
||||
IncludeInternalErrors bool `help:"count internal errors as part of the success rate"`
|
||||
InternalErrorsRetries int `help:"amount of retries on internal errors" default:"3"`
|
||||
|
||||
OutputOnSuccess bool `help:"show output on success"`
|
||||
RealtimeOutput bool `help:"show realtime output"`
|
||||
|
||||
LogDir string `help:"write logs to directory"`
|
||||
|
||||
Endless bool `help:"endless tests"`
|
||||
EndlessTimeout time.Duration `help:"timeout between tests" default:"1m"`
|
||||
@ -158,6 +168,11 @@ func (cmd *PewCmd) Run(g *Globals) (err error) {
|
||||
cmd.useRemote = g.Remote
|
||||
cmd.remoteAddr = g.RemoteAddr
|
||||
|
||||
if cmd.RealtimeOutput && cmd.Threads != 1 {
|
||||
log.Warn().Msg("realtime output disables multithreading")
|
||||
cmd.Threads = 1
|
||||
}
|
||||
|
||||
if cmd.useRemote {
|
||||
c := client.Client{RemoteAddr: cmd.remoteAddr}
|
||||
cmd.Kcfg.Kernels, err = c.Kernels()
|
||||
@ -220,32 +235,68 @@ func (cmd *PewCmd) Run(g *Globals) (err error) {
|
||||
ka.SourcePath = g.WorkDir
|
||||
}
|
||||
|
||||
if cmd.Kernel != "" {
|
||||
if cmd.KernelRegex != "" {
|
||||
var km artifact.Target
|
||||
km, err = kernelMask(cmd.Kernel)
|
||||
km.Kernel.Regex = cmd.KernelRegex
|
||||
|
||||
if cmd.DistroID == "" {
|
||||
err = errors.New("--distro-id is required")
|
||||
return
|
||||
}
|
||||
|
||||
var dt distro.ID
|
||||
dt, err = distro.NewID(cmd.DistroID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
km.Distro.ID = dt
|
||||
|
||||
if cmd.DistroRelease != "" {
|
||||
km.Distro.Release = cmd.DistroRelease
|
||||
}
|
||||
|
||||
ka.Targets = []artifact.Target{km}
|
||||
} else if cmd.DistroID != "" {
|
||||
var km artifact.Target
|
||||
|
||||
var dt distro.ID
|
||||
dt, err = distro.NewID(cmd.DistroID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
km.Distro.ID = dt
|
||||
|
||||
if cmd.DistroRelease != "" {
|
||||
km.Distro.Release = cmd.DistroRelease
|
||||
}
|
||||
|
||||
ka.Targets = []artifact.Target{km}
|
||||
} else if cmd.DistroRelease != "" {
|
||||
err = errors.New("--distro-release has no use on its own")
|
||||
return
|
||||
}
|
||||
|
||||
// TODO there was a lib for merge structures
|
||||
ka.Qemu.Timeout.Duration = g.Config.Qemu.Timeout.Duration
|
||||
ka.Docker.Timeout.Duration = g.Config.Docker.Timeout.Duration
|
||||
if ka.Qemu.Timeout.Duration == 0 {
|
||||
ka.Qemu.Timeout.Duration = g.Config.Qemu.Timeout.Duration
|
||||
}
|
||||
|
||||
if ka.Docker.Timeout.Duration == 0 {
|
||||
ka.Docker.Timeout.Duration = g.Config.Docker.Timeout.Duration
|
||||
}
|
||||
|
||||
if cmd.QemuTimeout != 0 {
|
||||
log.Info().Msgf("Set qemu timeout to %s", cmd.QemuTimeout)
|
||||
g.Config.Qemu.Timeout.Duration = cmd.QemuTimeout
|
||||
ka.Qemu.Timeout.Duration = cmd.QemuTimeout
|
||||
}
|
||||
|
||||
if cmd.DockerTimeout != 0 {
|
||||
log.Info().Msgf("Set docker timeout to %s", cmd.DockerTimeout)
|
||||
g.Config.Docker.Timeout.Duration = cmd.DockerTimeout
|
||||
ka.Docker.Timeout.Duration = cmd.DockerTimeout
|
||||
}
|
||||
|
||||
log.Info().Msgf("Qemu timeout: %s", ka.Qemu.Timeout.Duration)
|
||||
log.Info().Msgf("Docker timeout: %s", ka.Docker.Timeout.Duration)
|
||||
|
||||
if cmd.Tag == "" {
|
||||
cmd.Tag = fmt.Sprintf("%d", time.Now().Unix())
|
||||
}
|
||||
@ -361,39 +412,54 @@ func (cmd PewCmd) testArtifact(swg *sizedwaitgroup.SizedWaitGroup,
|
||||
|
||||
defer swg.Done()
|
||||
|
||||
logdir := "logs/" + cmd.Tag
|
||||
err := os.MkdirAll(logdir, os.ModePerm)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("mkdir %s", logdir)
|
||||
return
|
||||
}
|
||||
var logDirWriter *zerolog.ConsoleWriter
|
||||
if cmd.LogDir != "" {
|
||||
logdir := filepath.Join(cmd.LogDir, cmd.Tag)
|
||||
err := os.MkdirAll(logdir, os.ModePerm)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("mkdir %s", logdir)
|
||||
return
|
||||
}
|
||||
|
||||
logfile := fmt.Sprintf("logs/%s/%s-%s-%s.log",
|
||||
cmd.Tag,
|
||||
ki.Distro.ID.String(),
|
||||
ki.Distro.Release,
|
||||
ki.KernelRelease,
|
||||
)
|
||||
f, err := os.Create(logfile)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("create %s", logfile)
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
logfile := fmt.Sprintf("logs/%s/%s-%s-%s.log",
|
||||
cmd.Tag,
|
||||
ki.Distro.ID.String(),
|
||||
ki.Distro.Release,
|
||||
ki.KernelRelease,
|
||||
)
|
||||
f, err := os.Create(logfile)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("create %s", logfile)
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
slog := zerolog.New(zerolog.MultiLevelWriter(
|
||||
&ConsoleWriter,
|
||||
&FileWriter,
|
||||
&zerolog.ConsoleWriter{
|
||||
logDirWriter = &zerolog.ConsoleWriter{
|
||||
Out: f,
|
||||
FieldsExclude: []string{
|
||||
"distro_release",
|
||||
"distro_type",
|
||||
"kernel",
|
||||
"command",
|
||||
"workdir",
|
||||
},
|
||||
NoColor: true,
|
||||
},
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
var slog zerolog.Logger
|
||||
if logDirWriter != nil {
|
||||
slog = zerolog.New(zerolog.MultiLevelWriter(
|
||||
&ConsoleWriter,
|
||||
&FileWriter,
|
||||
logDirWriter,
|
||||
))
|
||||
} else {
|
||||
slog = zerolog.New(zerolog.MultiLevelWriter(
|
||||
&ConsoleWriter,
|
||||
&FileWriter,
|
||||
))
|
||||
}
|
||||
|
||||
switch LogLevel {
|
||||
case zerolog.TraceLevel, zerolog.DebugLevel:
|
||||
@ -406,12 +472,33 @@ func (cmd PewCmd) testArtifact(swg *sizedwaitgroup.SizedWaitGroup,
|
||||
Str("kernel", ki.KernelRelease).
|
||||
Logger()
|
||||
|
||||
ka.Process(slog, ki,
|
||||
cmd.Endless, cmd.Binary, cmd.EndlessStress, cmd.EndlessTimeout,
|
||||
func(q *qemu.System, ka artifact.Artifact, ki distro.KernelInfo, result *artifact.Result) {
|
||||
dumpResult(q, ka, ki, result, cmd.Dist, cmd.Tag, cmd.Binary, cmd.DB)
|
||||
},
|
||||
)
|
||||
retriesLeft := cmd.InternalErrorsRetries
|
||||
var stop bool
|
||||
for !stop {
|
||||
ka.Process(slog, ki, cmd.OutputOnSuccess, cmd.RealtimeOutput,
|
||||
cmd.Endless, cmd.Binary, cmd.EndlessStress, cmd.EndlessTimeout,
|
||||
func(q *qemu.System, ka artifact.Artifact, ki distro.KernelInfo, res *artifact.Result) {
|
||||
if res.InternalError == nil {
|
||||
cmd.dumpResult(q, ka, ki, res)
|
||||
stop = true
|
||||
return
|
||||
}
|
||||
|
||||
q.Log.Warn().Err(res.InternalError).
|
||||
Str("panic", fmt.Sprintf("%v", q.KernelPanic)).
|
||||
Str("timeout", fmt.Sprintf("%v", q.KilledByTimeout)).
|
||||
Int("retries_left", retriesLeft).
|
||||
Msg("internal")
|
||||
|
||||
if retriesLeft == 0 {
|
||||
state.InternalErrors += 1
|
||||
stop = true
|
||||
return
|
||||
}
|
||||
retriesLeft -= 1
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func shuffleKernels(a []distro.KernelInfo) []distro.KernelInfo {
|
||||
@ -492,25 +579,6 @@ func (cmd PewCmd) performCI(ka artifact.Artifact) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func kernelMask(kernel string) (km artifact.Target, err error) {
|
||||
parts := strings.Split(kernel, ":")
|
||||
if len(parts) != 2 {
|
||||
err = errors.New("kernel is not 'distroType:regex'")
|
||||
return
|
||||
}
|
||||
|
||||
dt, err := distro.NewID(parts[0])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
km = artifact.Target{
|
||||
Distro: distro.Distro{ID: dt},
|
||||
Kernel: artifact.Kernel{Regex: parts[1]},
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func genOkFail(name string, ok bool) (aurv aurora.Value) {
|
||||
s := " " + name
|
||||
if name == "" {
|
||||
@ -526,76 +594,70 @@ func genOkFail(name string, ok bool) (aurv aurora.Value) {
|
||||
return
|
||||
}
|
||||
|
||||
func dumpResult(q *qemu.System, ka artifact.Artifact, ki distro.KernelInfo,
|
||||
res *artifact.Result, dist, tag, binary string, db *sql.DB) {
|
||||
func (cmd PewCmd) dumpResult(q *qemu.System, ka artifact.Artifact, ki distro.KernelInfo, res *artifact.Result) {
|
||||
state.Overall += 1
|
||||
|
||||
// TODO refactor
|
||||
if res.Test.Ok {
|
||||
state.Success += 1
|
||||
}
|
||||
|
||||
if res.InternalError != nil {
|
||||
q.Log.Warn().Err(res.InternalError).
|
||||
Str("panic", fmt.Sprintf("%v", q.KernelPanic)).
|
||||
Str("timeout", fmt.Sprintf("%v", q.KilledByTimeout)).
|
||||
Msg("internal")
|
||||
res.InternalErrorString = res.InternalError.Error()
|
||||
state.InternalErrors += 1
|
||||
colored := ""
|
||||
switch ka.Type {
|
||||
case artifact.KernelExploit:
|
||||
colored = aurora.Sprintf("%s %s",
|
||||
genOkFail("BUILD", res.Build.Ok),
|
||||
genOkFail("LPE", res.Test.Ok))
|
||||
case artifact.KernelModule:
|
||||
colored = aurora.Sprintf("%s %s %s",
|
||||
genOkFail("BUILD", res.Build.Ok),
|
||||
genOkFail("INSMOD", res.Run.Ok),
|
||||
genOkFail("TEST", res.Test.Ok))
|
||||
case artifact.Script:
|
||||
colored = aurora.Sprintf("%s",
|
||||
genOkFail("", res.Test.Ok))
|
||||
}
|
||||
|
||||
additional := ""
|
||||
if q.KernelPanic {
|
||||
additional = "(panic)"
|
||||
} else if q.KilledByTimeout {
|
||||
additional = "(timeout)"
|
||||
}
|
||||
|
||||
if additional != "" {
|
||||
q.Log.Info().Msgf("%v %v", colored, additional)
|
||||
} else {
|
||||
colored := ""
|
||||
|
||||
state.Overall += 1
|
||||
|
||||
if res.Test.Ok {
|
||||
state.Success += 1
|
||||
}
|
||||
|
||||
switch ka.Type {
|
||||
case artifact.KernelExploit:
|
||||
colored = aurora.Sprintf("%s %s",
|
||||
genOkFail("BUILD", res.Build.Ok),
|
||||
genOkFail("LPE", res.Test.Ok))
|
||||
case artifact.KernelModule:
|
||||
colored = aurora.Sprintf("%s %s %s",
|
||||
genOkFail("BUILD", res.Build.Ok),
|
||||
genOkFail("INSMOD", res.Run.Ok),
|
||||
genOkFail("TEST", res.Test.Ok))
|
||||
case artifact.Script:
|
||||
colored = aurora.Sprintf("%s",
|
||||
genOkFail("", res.Test.Ok))
|
||||
}
|
||||
|
||||
additional := ""
|
||||
if q.KernelPanic {
|
||||
additional = "(panic)"
|
||||
} else if q.KilledByTimeout {
|
||||
additional = "(timeout)"
|
||||
}
|
||||
|
||||
if additional != "" {
|
||||
q.Log.Info().Msgf("%v %v", colored, additional)
|
||||
} else {
|
||||
q.Log.Info().Msgf("%v", colored)
|
||||
}
|
||||
q.Log.Info().Msgf("%v", colored)
|
||||
}
|
||||
|
||||
err := addToLog(db, q, ka, ki, res, tag)
|
||||
err := addToLog(cmd.DB, q, ka, ki, res, cmd.Tag)
|
||||
if err != nil {
|
||||
q.Log.Warn().Err(err).Msgf("[db] addToLog (%v)", ka)
|
||||
q.Log.Error().Err(err).Msgf("[db] addToLog (%v)", ka)
|
||||
}
|
||||
|
||||
if binary == "" && dist != pathDevNull {
|
||||
err = os.MkdirAll(dist, os.ModePerm)
|
||||
if err != nil {
|
||||
log.Warn().Err(err).Msgf("os.MkdirAll (%v)", ka)
|
||||
}
|
||||
if cmd.Binary != "" {
|
||||
return
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("%s/%s-%s-%s", dist, ki.Distro.ID,
|
||||
ki.Distro.Release, ki.KernelRelease)
|
||||
if ka.Type != artifact.KernelExploit {
|
||||
path += ".ko"
|
||||
}
|
||||
if cmd.Dist == pathDevNull { // why?
|
||||
return
|
||||
}
|
||||
|
||||
err = artifact.CopyFile(res.BuildArtifact, path)
|
||||
if err != nil {
|
||||
log.Warn().Err(err).Msgf("copy file (%v)", ka)
|
||||
}
|
||||
err = os.MkdirAll(cmd.Dist, os.ModePerm)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("os.MkdirAll (%v)", ka)
|
||||
return
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("%s/%s-%s-%s", cmd.Dist, ki.Distro.ID,
|
||||
ki.Distro.Release, ki.KernelRelease)
|
||||
if ka.Type != artifact.KernelExploit {
|
||||
path += ".ko"
|
||||
}
|
||||
|
||||
err = artifact.CopyFile(res.BuildArtifact, path)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("copy file (%v)", ka)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -35,9 +35,12 @@ type OutOfTree struct {
|
||||
Timeout artifact.Duration
|
||||
Registry string
|
||||
|
||||
// Commands that will be executed before
|
||||
// the base layer of Dockerfile
|
||||
Commands []distro.Command
|
||||
// Commands that are executed before (prepend) and after (append) the
|
||||
// base layer of the Dockerfile.
|
||||
Commands struct {
|
||||
Prepend []distro.Command
|
||||
Append []distro.Command
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -14,13 +14,17 @@ import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/cavaliergopher/grab/v3"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"code.dumpstack.io/tools/out-of-tree/cache"
|
||||
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
|
||||
"code.dumpstack.io/tools/out-of-tree/distro"
|
||||
"code.dumpstack.io/tools/out-of-tree/fs"
|
||||
)
|
||||
|
||||
var Runtime = "docker"
|
||||
@ -29,12 +33,21 @@ var Registry = ""
|
||||
|
||||
var Timeout time.Duration
|
||||
|
||||
var Commands []distro.Command
|
||||
// Commands that are executed before (prepend) and after (append) the
|
||||
// base layer of the Dockerfile.
|
||||
var Commands struct {
|
||||
Prepend []distro.Command
|
||||
Append []distro.Command
|
||||
}
|
||||
|
||||
var UseCache = true
|
||||
|
||||
var UsePrebuilt = true
|
||||
|
||||
var Prune = true
|
||||
|
||||
var Stdout = false
|
||||
|
||||
type Image struct {
|
||||
Name string
|
||||
Distro distro.Distro
|
||||
@ -92,6 +105,25 @@ func Load(localpath string, name string) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
if strings.Contains(Runtime, "docker") {
|
||||
var err2 error
|
||||
cmd = exec.Command(Runtime, "tag", "localhost/"+name, name)
|
||||
log.Debug().Msgf("%v", cmd)
|
||||
|
||||
raw, err2 = cmd.CombinedOutput()
|
||||
if err2 != nil {
|
||||
log.Debug().Err(err2).Msg(string(raw))
|
||||
}
|
||||
|
||||
cmd = exec.Command(Runtime, "rmi", "localhost/"+name)
|
||||
log.Debug().Msgf("%v", cmd)
|
||||
|
||||
raw, err2 = cmd.CombinedOutput()
|
||||
if err2 != nil {
|
||||
log.Debug().Err(err2).Msg(string(raw))
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@ -146,7 +178,15 @@ type Container struct {
|
||||
// Additional arguments
|
||||
Args []string
|
||||
|
||||
// Base of container is local-only
|
||||
LocalBase bool
|
||||
|
||||
Log zerolog.Logger
|
||||
|
||||
commandsOutput struct {
|
||||
listener chan string
|
||||
mu sync.Mutex
|
||||
}
|
||||
}
|
||||
|
||||
func New(dist distro.Distro) (c Container, err error) {
|
||||
@ -205,6 +245,43 @@ func NewFromKernelInfo(ki distro.KernelInfo) (
|
||||
return
|
||||
}
|
||||
|
||||
// c.SetCommandsOutputHandler(func(s string) { fmt.Println(s) })
|
||||
// defer c.CloseCommandsOutputHandler()
|
||||
func (c *Container) SetCommandsOutputHandler(handler func(s string)) {
|
||||
c.commandsOutput.mu.Lock()
|
||||
defer c.commandsOutput.mu.Unlock()
|
||||
|
||||
c.commandsOutput.listener = make(chan string)
|
||||
|
||||
go func(l chan string) {
|
||||
for m := range l {
|
||||
if m != "" {
|
||||
handler(m)
|
||||
}
|
||||
}
|
||||
}(c.commandsOutput.listener)
|
||||
}
|
||||
|
||||
func (c *Container) CloseCommandsOutputHandler() {
|
||||
c.commandsOutput.mu.Lock()
|
||||
defer c.commandsOutput.mu.Unlock()
|
||||
|
||||
close(c.commandsOutput.listener)
|
||||
c.commandsOutput.listener = nil
|
||||
}
|
||||
|
||||
func (c *Container) handleCommandsOutput(m string) {
|
||||
if c.commandsOutput.listener == nil {
|
||||
return
|
||||
}
|
||||
c.commandsOutput.mu.Lock()
|
||||
defer c.commandsOutput.mu.Unlock()
|
||||
|
||||
if c.commandsOutput.listener != nil {
|
||||
c.commandsOutput.listener <- m
|
||||
}
|
||||
}
|
||||
|
||||
func (c Container) Name() string {
|
||||
return c.name
|
||||
}
|
||||
@ -231,7 +308,38 @@ func (c Container) Exist() (yes bool) {
|
||||
return
|
||||
}
|
||||
|
||||
func (c Container) loadPrebuilt() (err error) {
|
||||
if c.Exist() && UseCache {
|
||||
return
|
||||
}
|
||||
|
||||
tmp, err := fs.TempDir()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
log.Info().Msgf("download prebuilt container %s", c.Name())
|
||||
resp, err := grab.Get(tmp, cache.ContainerURL(c.Name()))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
defer os.Remove(resp.Filename)
|
||||
|
||||
err = Load(resp.Filename, c.Name())
|
||||
if err == nil {
|
||||
log.Info().Msgf("use prebuilt container %s", c.Name())
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c Container) Build(image string, envs, runs []string) (err error) {
|
||||
if c.Exist() && UseCache {
|
||||
return
|
||||
}
|
||||
|
||||
cdir := dotfiles.Dir("containers", c.name)
|
||||
cfile := filepath.Join(cdir, "Dockerfile")
|
||||
|
||||
@ -241,9 +349,15 @@ func (c Container) Build(image string, envs, runs []string) (err error) {
|
||||
}
|
||||
cf += image + "\n"
|
||||
|
||||
for _, c := range Commands {
|
||||
// TODO check for distro type
|
||||
cf += "RUN " + c.Command + "\n"
|
||||
for _, cmd := range Commands.Prepend {
|
||||
if cmd.Distro.ID != distro.None && cmd.Distro.ID != c.dist.ID {
|
||||
continue
|
||||
}
|
||||
if cmd.Distro.Release != "" && cmd.Distro.Release != c.dist.Release {
|
||||
continue
|
||||
}
|
||||
|
||||
cf += "RUN " + cmd.Command + "\n"
|
||||
}
|
||||
|
||||
for _, e := range envs {
|
||||
@ -254,6 +368,17 @@ func (c Container) Build(image string, envs, runs []string) (err error) {
|
||||
cf += "RUN " + c + "\n"
|
||||
}
|
||||
|
||||
for _, cmd := range Commands.Append {
|
||||
if cmd.Distro.ID != distro.None && cmd.Distro.ID != c.dist.ID {
|
||||
continue
|
||||
}
|
||||
if cmd.Distro.Release != "" && cmd.Distro.Release != c.dist.Release {
|
||||
continue
|
||||
}
|
||||
|
||||
cf += "RUN " + cmd.Command + "\n"
|
||||
}
|
||||
|
||||
buf, err := os.ReadFile(cfile)
|
||||
if err != nil {
|
||||
err = os.WriteFile(cfile, []byte(cf), os.ModePerm)
|
||||
@ -277,10 +402,17 @@ func (c Container) Build(image string, envs, runs []string) (err error) {
|
||||
c.Log.Info().Msg("build")
|
||||
}
|
||||
|
||||
output, err := c.build(cdir)
|
||||
if err != nil {
|
||||
c.Log.Error().Err(err).Msg(output)
|
||||
return
|
||||
if UsePrebuilt {
|
||||
err = c.loadPrebuilt()
|
||||
}
|
||||
|
||||
if err != nil || !UsePrebuilt {
|
||||
var output string
|
||||
output, err = c.build(cdir)
|
||||
if err != nil {
|
||||
c.Log.Error().Err(err).Msg(output)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
c.Log.Info().Msg("success")
|
||||
@ -299,7 +431,10 @@ func (c Container) build(imagePath string) (output string, err error) {
|
||||
|
||||
args := []string{"build"}
|
||||
if !UseCache {
|
||||
args = append(args, "--pull", "--no-cache")
|
||||
if !c.LocalBase {
|
||||
args = append(args, "--pull")
|
||||
}
|
||||
args = append(args, "--no-cache")
|
||||
}
|
||||
args = append(args, "-t", c.name, imagePath)
|
||||
|
||||
@ -324,6 +459,10 @@ func (c Container) build(imagePath string) (output string, err error) {
|
||||
scanner := bufio.NewScanner(stdout)
|
||||
for scanner.Scan() {
|
||||
m := scanner.Text()
|
||||
if Stdout {
|
||||
fmt.Println(m)
|
||||
}
|
||||
c.handleCommandsOutput(m)
|
||||
output += m + "\n"
|
||||
flog.Trace().Str("stdout", m).Msg("")
|
||||
}
|
||||
@ -333,7 +472,7 @@ func (c Container) build(imagePath string) (output string, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (c Container) Run(workdir string, cmds []string) (out string, err error) {
|
||||
func (c *Container) Run(workdir string, cmds []string) (out string, err error) {
|
||||
flog := c.Log.With().
|
||||
Str("workdir", workdir).
|
||||
Str("command", fmt.Sprintf("%v", cmds)).
|
||||
@ -397,16 +536,17 @@ func (c Container) Run(workdir string, cmds []string) (out string, err error) {
|
||||
scanner := bufio.NewScanner(stdout)
|
||||
for scanner.Scan() {
|
||||
m := scanner.Text()
|
||||
if Stdout {
|
||||
fmt.Println(m)
|
||||
}
|
||||
c.handleCommandsOutput(m)
|
||||
out += m + "\n"
|
||||
flog.Trace().Str("stdout", m).Msg("")
|
||||
flog.Trace().Str("container stdout", m).Msg("")
|
||||
}
|
||||
}()
|
||||
|
||||
err = cmd.Wait()
|
||||
if err != nil {
|
||||
e := fmt.Sprintf("error `%v` for cmd `%v` with output `%v`",
|
||||
err, cmds, out)
|
||||
err = errors.New(e)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -18,6 +18,7 @@ func createJobTable(db *sql.DB) (err error) {
|
||||
group_uuid TEXT,
|
||||
repo TEXT,
|
||||
"commit" TEXT,
|
||||
description TEXT,
|
||||
config TEXT,
|
||||
target TEXT,
|
||||
created INT,
|
||||
@ -30,8 +31,8 @@ func createJobTable(db *sql.DB) (err error) {
|
||||
|
||||
func AddJob(db *sql.DB, job *api.Job) (err error) {
|
||||
stmt, err := db.Prepare(`INSERT INTO job (updated, uuid, group_uuid, repo, "commit", ` +
|
||||
`config, target, created, started, finished) ` +
|
||||
`VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10);`)
|
||||
`description, config, target, created, started, finished) ` +
|
||||
`VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11);`)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -53,7 +54,7 @@ func AddJob(db *sql.DB, job *api.Job) (err error) {
|
||||
target := tbuf.Bytes()
|
||||
|
||||
res, err := stmt.Exec(time.Now().Unix(), job.UUID, job.Group,
|
||||
job.RepoName, job.Commit, config, target,
|
||||
job.RepoName, job.Commit, job.Description, config, target,
|
||||
job.Created.Unix(), job.Started.Unix(),
|
||||
job.Finished.Unix(),
|
||||
)
|
||||
@ -68,10 +69,10 @@ func AddJob(db *sql.DB, job *api.Job) (err error) {
|
||||
func UpdateJob(db *sql.DB, job *api.Job) (err error) {
|
||||
stmt, err := db.Prepare(`UPDATE job ` +
|
||||
`SET updated=$1, uuid=$2, group_uuid=$3, repo=$4, ` +
|
||||
`"commit"=$5, config=$6, target=$7, ` +
|
||||
`created=$8, started=$9, finished=$10, ` +
|
||||
`status=$11 ` +
|
||||
`WHERE id=$12`)
|
||||
`"commit"=$5, description=$6, config=$7, target=$8, ` +
|
||||
`created=$9, started=$10, finished=$11, ` +
|
||||
`status=$12 ` +
|
||||
`WHERE id=$13`)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -92,7 +93,7 @@ func UpdateJob(db *sql.DB, job *api.Job) (err error) {
|
||||
target := tbuf.Bytes()
|
||||
|
||||
_, err = stmt.Exec(time.Now().Unix(), job.UUID, job.Group,
|
||||
job.RepoName, job.Commit,
|
||||
job.RepoName, job.Commit, job.Description,
|
||||
config, target,
|
||||
job.Created.Unix(), job.Started.Unix(),
|
||||
job.Finished.Unix(), job.Status, job.ID)
|
||||
@ -103,7 +104,8 @@ func scanJob(scan func(dest ...any) error) (job api.Job, err error) {
|
||||
var config, target []byte
|
||||
var updated, created, started, finished int64
|
||||
err = scan(&job.ID, &updated, &job.UUID, &job.Group,
|
||||
&job.RepoName, &job.Commit, &config, &target,
|
||||
&job.RepoName, &job.Commit, &job.Description,
|
||||
&config, &target,
|
||||
&created, &started, &finished, &job.Status)
|
||||
if err != nil {
|
||||
return
|
||||
@ -130,7 +132,7 @@ func scanJob(scan func(dest ...any) error) (job api.Job, err error) {
|
||||
|
||||
func Jobs(db *sql.DB, where string, args ...any) (jobs []api.Job, err error) {
|
||||
q := `SELECT id, updated, uuid, group_uuid, ` +
|
||||
`repo, "commit", config, target, created, ` +
|
||||
`repo, "commit", description, config, target, created, ` +
|
||||
`started, finished, status FROM job`
|
||||
if len(where) != 0 {
|
||||
q += ` WHERE ` + where
|
||||
@ -163,7 +165,7 @@ func Jobs(db *sql.DB, where string, args ...any) (jobs []api.Job, err error) {
|
||||
func Job(db *sql.DB, uuid string) (job api.Job, err error) {
|
||||
stmt, err := db.Prepare(`SELECT id, updated, uuid, ` +
|
||||
`group_uuid, ` +
|
||||
`repo, "commit", config, target, ` +
|
||||
`repo, "commit", description, config, target, ` +
|
||||
`created, started, finished, status ` +
|
||||
`FROM job WHERE uuid=$1`)
|
||||
if err != nil {
|
||||
|
@ -131,7 +131,7 @@ func (pj *jobProcessor) Process(res *Resources) (err error) {
|
||||
var result *artifact.Result
|
||||
var dq *qemu.System
|
||||
|
||||
pj.job.Artifact.Process(pj.log, pj.job.Target, false, "", "", 0,
|
||||
pj.job.Artifact.Process(pj.log, pj.job.Target, false, false, false, "", "", 0,
|
||||
func(q *qemu.System, ka artifact.Artifact, ki distro.KernelInfo,
|
||||
res *artifact.Result) {
|
||||
|
||||
|
@ -37,12 +37,9 @@ func (centos CentOS) Packages() (pkgs []string, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
if !c.Exist() {
|
||||
err = c.Build("centos:"+centos.release,
|
||||
centos.envs(), centos.runs())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = c.Build("centos:"+centos.release, centos.envs(), centos.runs())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
cmd := "yum search kernel --showduplicates 2>/dev/null " +
|
||||
@ -82,36 +79,41 @@ func (centos CentOS) runs() (commands []string) {
|
||||
// TODO refactor
|
||||
switch centos.release {
|
||||
case "6":
|
||||
repofmt := "[6.%d-%s]\\nbaseurl=https://vault.centos.org/6.%d/%s/$basearch/\\ngpgcheck=0"
|
||||
repofmt := "[6.%d-%s]\\n" +
|
||||
"name=CentOS-6.%d - %s\\n" +
|
||||
"baseurl=https://vault.centos.org/6.%d/%s/$basearch/\\n" +
|
||||
"gpgcheck=0"
|
||||
for i := 0; i <= 10; i++ {
|
||||
repos = append(repos, fmt.Sprintf(repofmt, i, "os", i, "os"))
|
||||
repos = append(repos, fmt.Sprintf(repofmt, i, "updates", i, "updates"))
|
||||
repos = append(repos, fmt.Sprintf(repofmt, i, "os", i, "os", i, "os"))
|
||||
repos = append(repos, fmt.Sprintf(repofmt, i, "updates", i, "updates", i, "updates"))
|
||||
}
|
||||
cmdf("rm /etc/yum.repos.d/*")
|
||||
case "7":
|
||||
repofmt := "[%s-%s]\\nbaseurl=https://vault.centos.org/%s/%s/$basearch/\\ngpgcheck=0"
|
||||
repofmt := "[%s-%s]\\n" +
|
||||
"name=CentOS-%s - %s\\n" +
|
||||
"baseurl=https://vault.centos.org/%s/%s/$basearch/\\n" +
|
||||
"gpgcheck=0"
|
||||
for _, ver := range []string{
|
||||
"7.0.1406", "7.1.1503", "7.2.1511",
|
||||
"7.3.1611", "7.4.1708", "7.5.1804",
|
||||
"7.6.1810", "7.7.1908", "7.8.2003",
|
||||
"7.9.2009",
|
||||
} {
|
||||
repos = append(repos, fmt.Sprintf(repofmt, ver, "os", ver, "os"))
|
||||
repos = append(repos, fmt.Sprintf(repofmt, ver, "updates", ver, "updates"))
|
||||
repos = append(repos, fmt.Sprintf(repofmt, ver, "os", ver, "os", ver, "os"))
|
||||
repos = append(repos, fmt.Sprintf(repofmt, ver, "updates", ver, "updates", ver, "updates"))
|
||||
}
|
||||
|
||||
// FIXME http/gpgcheck=0
|
||||
repofmt = "[%s-%s]\\nbaseurl=http://mirror.centos.org/centos-7/%s/%s/$basearch/\\ngpgcheck=0"
|
||||
repos = append(repos, fmt.Sprintf(repofmt, "7.9.2009", "os", "7.9.2009", "os"))
|
||||
repos = append(repos, fmt.Sprintf(repofmt, "7.9.2009", "updates", "7.9.2009", "updates"))
|
||||
case "8":
|
||||
repofmt := "[%s-%s]\\nbaseurl=https://vault.centos.org/%s/%s/$basearch/os/\\ngpgcheck=0"
|
||||
repofmt := "[%s-%s]\\n" +
|
||||
"name=CentOS-%s - %s\\n" +
|
||||
"baseurl=https://vault.centos.org/%s/%s/$basearch/os/\\n" +
|
||||
"gpgcheck=0"
|
||||
|
||||
for _, ver := range []string{
|
||||
"8.0.1905", "8.1.1911", "8.2.2004",
|
||||
"8.3.2011", "8.4.2105", "8.5.2111",
|
||||
} {
|
||||
repos = append(repos, fmt.Sprintf(repofmt, ver, "baseos", ver, "BaseOS"))
|
||||
repos = append(repos, fmt.Sprintf(repofmt, ver, "appstream", ver, "AppStream"))
|
||||
repos = append(repos, fmt.Sprintf(repofmt, ver, "baseos", ver, "baseos", ver, "BaseOS"))
|
||||
repos = append(repos, fmt.Sprintf(repofmt, ver, "appstream", ver, "appstream", ver, "AppStream"))
|
||||
}
|
||||
default:
|
||||
log.Fatal().Msgf("no support for centos %s", centos.release)
|
||||
@ -119,6 +121,7 @@ func (centos CentOS) runs() (commands []string) {
|
||||
}
|
||||
|
||||
cmdf("sed -i 's/enabled=1/enabled=0/' /etc/yum.repos.d/* || true")
|
||||
cmdf("sed -i 's/name/enabled=0\\nname/' /etc/yum.repos.d/* || true")
|
||||
|
||||
for _, repo := range repos {
|
||||
cmdf("echo -e '%s' >> /etc/yum.repos.d/oot.repo\n", repo)
|
||||
|
@ -54,11 +54,9 @@ func (d Debian) Packages() (packages []string, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
if !c.Exist() {
|
||||
err = c.Build(d.image(), d.envs(), d.runs())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = c.Build(d.image(), d.envs(), d.runs())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
kernels, err := GetKernels()
|
||||
|
@ -57,6 +57,8 @@ func (suse OpenSUSE) Packages() (pkgs []string, err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
c.LocalBase = true
|
||||
|
||||
} else if strings.HasPrefix(suse.release, "13") {
|
||||
name = "opensuse:13"
|
||||
cnturl := cache.ContainerURL("openSUSE-13.2")
|
||||
@ -64,17 +66,17 @@ func (suse OpenSUSE) Packages() (pkgs []string, err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
c.LocalBase = true
|
||||
|
||||
} else if strings.HasPrefix(suse.release, "42") {
|
||||
name = "opensuse/leap:42"
|
||||
} else if strings.HasPrefix(suse.release, "15") {
|
||||
name = "opensuse/leap:" + suse.release
|
||||
}
|
||||
|
||||
if !c.Exist() {
|
||||
err = c.Build(name, suse.envs(), suse.runs())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = c.Build(name, suse.envs(), suse.runs())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
cmd := "zypper search -s --match-exact kernel-default | grep x86_64 " +
|
||||
@ -85,7 +87,32 @@ func (suse OpenSUSE) Packages() (pkgs []string, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
pkgs = append(pkgs, strings.Fields(output)...)
|
||||
// TODO Find a way for non-interactive installation of
|
||||
// retracted kernels
|
||||
retracted := []string{
|
||||
"5.14.21-150400.24.49.3",
|
||||
"5.14.21-150400.24.84.1",
|
||||
"5.14.21-150500.55.22.1",
|
||||
"5.3.18-150300.59.81.1",
|
||||
"5.3.18-59.30.1",
|
||||
"5.3.18-lp152.98.1",
|
||||
}
|
||||
|
||||
for _, k := range strings.Fields(output) {
|
||||
skip := false
|
||||
for _, rk := range retracted {
|
||||
if rk == k {
|
||||
skip = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if skip {
|
||||
continue
|
||||
}
|
||||
|
||||
pkgs = append(pkgs, k)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -38,12 +38,9 @@ func (ol OracleLinux) Packages() (pkgs []string, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
if !c.Exist() {
|
||||
err = c.Build("oraclelinux:"+ol.release,
|
||||
ol.envs(), ol.runs())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = c.Build("oraclelinux:"+ol.release, ol.envs(), ol.runs())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if ol.release == "8" {
|
||||
|
@ -17,6 +17,7 @@ func init() {
|
||||
"18.04",
|
||||
"20.04",
|
||||
"22.04",
|
||||
"24.04",
|
||||
}
|
||||
|
||||
for _, release := range releases {
|
||||
@ -42,11 +43,9 @@ func (u Ubuntu) Packages() (pkgs []string, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
if !c.Exist() {
|
||||
err = c.Build("ubuntu:"+u.release, u.envs(), u.runs())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = c.Build("ubuntu:"+u.release, u.envs(), u.runs())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
cmd := "apt-cache search " +
|
||||
|
@ -34,7 +34,7 @@ func GenHostKernels(download bool) (kernels []distro.KernelInfo, err error) {
|
||||
|
||||
rawOutput, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
log.Print(string(rawOutput), err)
|
||||
log.Error().Err(err).Msg(string(rawOutput))
|
||||
return
|
||||
}
|
||||
|
||||
@ -86,7 +86,7 @@ func GenHostKernels(download bool) (kernels []distro.KernelInfo, err error) {
|
||||
}
|
||||
|
||||
vmlinux := "/usr/lib/debug/boot/vmlinux-" + krel
|
||||
log.Print("vmlinux", vmlinux)
|
||||
log.Info().Msgf("vmlinux %s", vmlinux)
|
||||
if fs.PathExists(vmlinux) {
|
||||
ki.VmlinuxPath = vmlinux
|
||||
}
|
||||
|
6
main.go
6
main.go
@ -35,13 +35,13 @@ type CLI struct {
|
||||
cmd.Globals
|
||||
|
||||
Pew cmd.PewCmd `cmd:"" help:"build, run, and test module/exploit"`
|
||||
Kernel cmd.KernelCmd `cmd:"" help:"manipulate kernels"`
|
||||
Kernel cmd.KernelCmd `cmd:"" aliases:"kernels" help:"manipulate kernels"`
|
||||
Debug cmd.DebugCmd `cmd:"" help:"debug environment"`
|
||||
Log cmd.LogCmd `cmd:"" help:"query logs"`
|
||||
Pack cmd.PackCmd `cmd:"" help:"exploit pack test"`
|
||||
Gen cmd.GenCmd `cmd:"" help:"generate .out-of-tree.toml skeleton"`
|
||||
Image cmd.ImageCmd `cmd:"" help:"manage images"`
|
||||
Container cmd.ContainerCmd `cmd:"" help:"manage containers"`
|
||||
Image cmd.ImageCmd `cmd:"" aliases:"images" help:"manage images"`
|
||||
Container cmd.ContainerCmd `cmd:"" aliases:"containers" help:"manage containers"`
|
||||
Distro cmd.DistroCmd `cmd:"" help:"distro-related helpers"`
|
||||
|
||||
Daemon cmd.DaemonCmd `cmd:"" help:"run daemon"`
|
||||
|
@ -101,6 +101,16 @@ type System struct {
|
||||
|
||||
Stdout, Stderr string
|
||||
|
||||
qemuOutput struct {
|
||||
listener chan string
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
commandsOutput struct {
|
||||
listener chan string
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// accessible after qemu is closed
|
||||
exitErr error
|
||||
|
||||
@ -138,6 +148,80 @@ func NewSystem(arch arch, kernel Kernel, drivePath string) (q *System, err error
|
||||
return
|
||||
}
|
||||
|
||||
// q.SetQemuOutputHandler(func(s string) { fmt.Println(s) })
|
||||
// defer q.CloseQemuOutputHandler()
|
||||
func (q *System) SetQemuOutputHandler(handler func(s string)) {
|
||||
q.qemuOutput.mu.Lock()
|
||||
defer q.qemuOutput.mu.Unlock()
|
||||
|
||||
q.qemuOutput.listener = make(chan string)
|
||||
|
||||
go func(l chan string) {
|
||||
for m := range l {
|
||||
if m != "" {
|
||||
handler(m)
|
||||
}
|
||||
}
|
||||
}(q.qemuOutput.listener)
|
||||
}
|
||||
|
||||
func (q *System) CloseQemuOutputHandler() {
|
||||
q.qemuOutput.mu.Lock()
|
||||
defer q.qemuOutput.mu.Unlock()
|
||||
|
||||
close(q.qemuOutput.listener)
|
||||
q.qemuOutput.listener = nil
|
||||
}
|
||||
|
||||
func (q *System) handleQemuOutput(m string) {
|
||||
if q.qemuOutput.listener == nil {
|
||||
return
|
||||
}
|
||||
q.qemuOutput.mu.Lock()
|
||||
defer q.qemuOutput.mu.Unlock()
|
||||
|
||||
if q.qemuOutput.listener != nil {
|
||||
q.qemuOutput.listener <- m
|
||||
}
|
||||
}
|
||||
|
||||
// q.SetCommandsOutputHandler(func(s string) { fmt.Println(s) })
|
||||
// defer q.CloseCommandsOutputHandler()
|
||||
func (q *System) SetCommandsOutputHandler(handler func(s string)) {
|
||||
q.commandsOutput.mu.Lock()
|
||||
defer q.commandsOutput.mu.Unlock()
|
||||
|
||||
q.commandsOutput.listener = make(chan string)
|
||||
|
||||
go func(l chan string) {
|
||||
for m := range l {
|
||||
if m != "" {
|
||||
handler(m)
|
||||
}
|
||||
}
|
||||
}(q.commandsOutput.listener)
|
||||
}
|
||||
|
||||
func (q *System) CloseCommandsOutputHandler() {
|
||||
q.commandsOutput.mu.Lock()
|
||||
defer q.commandsOutput.mu.Unlock()
|
||||
|
||||
close(q.commandsOutput.listener)
|
||||
q.commandsOutput.listener = nil
|
||||
}
|
||||
|
||||
func (q *System) handleCommandsOutput(m string) {
|
||||
if q.commandsOutput.listener == nil {
|
||||
return
|
||||
}
|
||||
q.commandsOutput.mu.Lock()
|
||||
defer q.commandsOutput.mu.Unlock()
|
||||
|
||||
if q.commandsOutput.listener != nil {
|
||||
q.commandsOutput.listener <- m
|
||||
}
|
||||
}
|
||||
|
||||
func (q *System) SetSSHAddrPort(addr string, port int) (err error) {
|
||||
// TODO validate
|
||||
q.SSH.AddrPort = fmt.Sprintf("%s:%d", addr, port)
|
||||
@ -312,7 +396,8 @@ func (q *System) Start() (err error) {
|
||||
go func() {
|
||||
scanner := bufio.NewScanner(q.pipe.stdout)
|
||||
for scanner.Scan() {
|
||||
m := scanner.Text()
|
||||
m := strings.TrimSpace(scanner.Text())
|
||||
q.handleQemuOutput(m)
|
||||
q.Stdout += m + "\n"
|
||||
q.Log.Trace().Str("stdout", m).Msg("qemu")
|
||||
go q.checkOopsPanic(m)
|
||||
@ -322,7 +407,8 @@ func (q *System) Start() (err error) {
|
||||
go func() {
|
||||
scanner := bufio.NewScanner(q.pipe.stderr)
|
||||
for scanner.Scan() {
|
||||
m := scanner.Text()
|
||||
m := strings.TrimSpace(scanner.Text())
|
||||
q.handleQemuOutput(m)
|
||||
q.Stderr += m + "\n"
|
||||
q.Log.Trace().Str("stderr", m).Msg("qemu")
|
||||
}
|
||||
@ -342,6 +428,7 @@ func (q *System) Start() (err error) {
|
||||
|
||||
if q.Timeout != 0 {
|
||||
go func() {
|
||||
q.Log.Debug().Msgf("qemu wait for %s before kill", q.Timeout)
|
||||
time.Sleep(q.Timeout)
|
||||
q.KilledByTimeout = true
|
||||
q.Stop()
|
||||
@ -473,7 +560,8 @@ func (q System) Command(user, cmd string) (output string, err error) {
|
||||
|
||||
scanner := bufio.NewScanner(stdout)
|
||||
for scanner.Scan() {
|
||||
m := scanner.Text()
|
||||
m := strings.TrimSpace(scanner.Text())
|
||||
q.handleCommandsOutput(m)
|
||||
output += m + "\n"
|
||||
flog.Trace().Str("stdout", m).Msg("qemu command")
|
||||
}
|
||||
@ -486,7 +574,8 @@ func (q System) Command(user, cmd string) (output string, err error) {
|
||||
|
||||
scanner := bufio.NewScanner(stderr)
|
||||
for scanner.Scan() {
|
||||
m := scanner.Text()
|
||||
m := strings.TrimSpace(scanner.Text())
|
||||
q.handleCommandsOutput(m)
|
||||
output += m + "\n"
|
||||
// Note: it prints stderr as stdout
|
||||
flog.Trace().Str("stdout", m).Msg("qemu command")
|
||||
|
@ -28,6 +28,8 @@ RUN sed -i '/PermitEmptyPasswords/d' $TMPDIR/etc/ssh/sshd_config
|
||||
RUN echo PermitEmptyPasswords yes >> $TMPDIR/etc/ssh/sshd_config
|
||||
RUN sed -i '/PermitRootLogin/d' $TMPDIR/etc/ssh/sshd_config
|
||||
RUN echo PermitRootLogin yes >> $TMPDIR/etc/ssh/sshd_config
|
||||
RUN sed -i '/UseDNS/d' $TMPDIR/etc/ssh/sshd_config
|
||||
RUN echo UseDNS no >> $TMPDIR/etc/ssh/sshd_config
|
||||
|
||||
# network workaround
|
||||
RUN chmod +x $TMPDIR/etc/rc.local
|
||||
|
@ -4,6 +4,6 @@ set -eux
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
sudo docker build -t gen-centos6-image .
|
||||
sudo docker run --privileged -v $(pwd):/shared -t gen-centos6-image
|
||||
sudo podman build -t gen-centos6-image .
|
||||
sudo podman run --privileged -v $(pwd):/shared -t gen-centos6-image
|
||||
tar -Szcf out_of_tree_centos_6.img.tar.gz out_of_tree_centos_6.img
|
||||
|
@ -13,6 +13,11 @@
|
||||
#
|
||||
FROM centos:7
|
||||
|
||||
RUN sed -i 's/enabled=1/enabled=0/' /etc/yum.repos.d/* || true
|
||||
RUN sed -i 's/name/enabled=0\nname/' /etc/yum.repos.d/* || true
|
||||
RUN echo -e '[7.9.2009-os]\nbaseurl=https://vault.centos.org/7.9.2009/os/$basearch/\ngpgcheck=0' >> /etc/yum.repos.d/oot.repo
|
||||
RUN echo -e '[7.9.2009-updates]\nbaseurl=https://vault.centos.org/7.9.2009/updates/$basearch/\ngpgcheck=0' >> /etc/yum.repos.d/oot.repo
|
||||
|
||||
RUN yum -y update
|
||||
RUN yum -y groupinstall "Development Tools"
|
||||
RUN yum -y install qemu-img e2fsprogs
|
||||
@ -21,13 +26,13 @@ ENV TMPDIR=/tmp/centos
|
||||
|
||||
RUN yum --installroot=$TMPDIR \
|
||||
--releasever=7 \
|
||||
--disablerepo='*' \
|
||||
--enablerepo=base \
|
||||
-y groupinstall Base
|
||||
|
||||
RUN rm $TMPDIR/etc/yum.repos.d/*
|
||||
RUN cp /etc/yum.repos.d/* $TMPDIR/etc/yum.repos.d/
|
||||
|
||||
RUN yum --installroot=$TMPDIR \
|
||||
--releasever=7 \
|
||||
--disablerepo='*' \
|
||||
--enablerepo=base \
|
||||
-y install openssh-server openssh-clients
|
||||
|
||||
RUN chroot $TMPDIR /bin/sh -c 'useradd -m user'
|
||||
@ -37,6 +42,8 @@ RUN sed -i '/PermitEmptyPasswords/d' $TMPDIR/etc/ssh/sshd_config
|
||||
RUN echo PermitEmptyPasswords yes >> $TMPDIR/etc/ssh/sshd_config
|
||||
RUN sed -i '/PermitRootLogin/d' $TMPDIR/etc/ssh/sshd_config
|
||||
RUN echo PermitRootLogin yes >> $TMPDIR/etc/ssh/sshd_config
|
||||
RUN sed -i '/UseDNS/d' $TMPDIR/etc/ssh/sshd_config
|
||||
RUN echo UseDNS no >> $TMPDIR/etc/ssh/sshd_config
|
||||
|
||||
# network workaround
|
||||
RUN chmod +x $TMPDIR/etc/rc.local
|
||||
|
9
tools/qemu-centos-img/7/generate.sh
Executable file
9
tools/qemu-centos-img/7/generate.sh
Executable file
@ -0,0 +1,9 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -eux
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
sudo podman build -t gen-centos7-image .
|
||||
sudo podman run --privileged -v $(pwd):/shared -t gen-centos7-image
|
||||
tar -Szcf out_of_tree_centos_7.img.tar.gz out_of_tree_centos_7.img
|
@ -28,6 +28,8 @@ RUN sed -i '/PermitEmptyPasswords/d' $TMPDIR/etc/ssh/sshd_config
|
||||
RUN echo PermitEmptyPasswords yes >> $TMPDIR/etc/ssh/sshd_config
|
||||
RUN sed -i '/PermitRootLogin/d' $TMPDIR/etc/ssh/sshd_config
|
||||
RUN echo PermitRootLogin yes >> $TMPDIR/etc/ssh/sshd_config
|
||||
RUN sed -i '/UseDNS/d' $TMPDIR/etc/ssh/sshd_config
|
||||
RUN echo UseDNS no >> $TMPDIR/etc/ssh/sshd_config
|
||||
|
||||
# network workaround
|
||||
RUN chmod +x $TMPDIR/etc/rc.local
|
||||
|
@ -4,6 +4,6 @@ set -eux
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
sudo docker build -t gen-centos8-image .
|
||||
sudo docker run --privileged -v $(pwd):/shared -t gen-centos8-image
|
||||
sudo podman build -t gen-centos8-image .
|
||||
sudo podman run --privileged -v $(pwd):/shared -t gen-centos8-image
|
||||
tar -Szcf out_of_tree_centos_8.img.tar.gz out_of_tree_centos_8.img
|
||||
|
@ -11,6 +11,8 @@ sed -i '/PermitEmptyPasswords/d' $TMPDIR/etc/ssh/sshd_config
|
||||
echo PermitEmptyPasswords yes >> $TMPDIR/etc/ssh/sshd_config
|
||||
sed -i '/PermitRootLogin/d' $TMPDIR/etc/ssh/sshd_config
|
||||
echo PermitRootLogin yes >> $TMPDIR/etc/ssh/sshd_config
|
||||
sed -i '/UseDNS/d' $TMPDIR/etc/ssh/sshd_config
|
||||
echo UseDNS no >> $TMPDIR/etc/ssh/sshd_config
|
||||
|
||||
echo '#!/bin/sh' > $TMPDIR/etc/rc.local
|
||||
echo 'dhclient' >> $TMPDIR/etc/rc.local
|
||||
|
@ -13,6 +13,7 @@ RUN yum --installroot=$TMPDIR \
|
||||
--releasever=_VERSION_ \
|
||||
--disablerepo='*' \
|
||||
--enablerepo=ol_VERSION__baseos_latest \
|
||||
--enablerepo=ol_VERSION__appstream \
|
||||
-y groupinstall Base
|
||||
|
||||
RUN cp /etc/yum.repos.d/* $TMPDIR/etc/yum.repos.d/
|
||||
@ -21,6 +22,7 @@ RUN yum --installroot=$TMPDIR \
|
||||
--releasever=_VERSION_ \
|
||||
--disablerepo='*' \
|
||||
--enablerepo=ol_VERSION__baseos_latest \
|
||||
--enablerepo=ol_VERSION__appstream \
|
||||
-y install openssh-server openssh-clients dhclient yum
|
||||
|
||||
RUN chroot $TMPDIR /bin/sh -c 'useradd -m user'
|
||||
@ -30,6 +32,8 @@ RUN sed -i '/PermitEmptyPasswords/d' $TMPDIR/etc/ssh/sshd_config
|
||||
RUN echo PermitEmptyPasswords yes >> $TMPDIR/etc/ssh/sshd_config
|
||||
RUN sed -i '/PermitRootLogin/d' $TMPDIR/etc/ssh/sshd_config
|
||||
RUN echo PermitRootLogin yes >> $TMPDIR/etc/ssh/sshd_config
|
||||
RUN sed -i '/UseDNS/d' $TMPDIR/etc/ssh/sshd_config
|
||||
RUN echo UseDNS no >> $TMPDIR/etc/ssh/sshd_config
|
||||
|
||||
# network workaround
|
||||
RUN chmod +x $TMPDIR/etc/rc.local
|
||||
|
@ -11,9 +11,11 @@ for version in 6 7 8 9; do
|
||||
|
||||
if [[ $version -eq 6 ]]; then
|
||||
sed -i 's/baseos_latest/u10_base/' $version/Dockerfile
|
||||
sed -i '/appstream/d' $version/Dockerfile
|
||||
fi
|
||||
if [[ $version -eq 7 ]]; then
|
||||
sed -i 's/baseos_latest/u9_base/' $version/Dockerfile
|
||||
sed -i '/appstream/d' $version/Dockerfile
|
||||
fi
|
||||
|
||||
podman build -t gen-oraclelinux${version}-image $version
|
||||
|
@ -1,35 +0,0 @@
|
||||
# Copyright 2018 Mikhail Klementev. All rights reserved.
|
||||
# Use of this source code is governed by a AGPLv3 license
|
||||
# (or later) that can be found in the LICENSE file.
|
||||
#
|
||||
# Usage:
|
||||
#
|
||||
# $ docker build -t gen-ubuntu1404-image .
|
||||
# $ docker run --privileged -v $(pwd):/shared -t gen-ubuntu1404-image
|
||||
#
|
||||
# ubuntu1404.img will be created in current directory. You can change $(pwd) to
|
||||
# different directory to use different destination for image.
|
||||
#
|
||||
FROM ubuntu:14.04
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y debootstrap qemu
|
||||
|
||||
ENV TMPDIR=/tmp/ubuntu
|
||||
ENV IMAGEDIR=/tmp/image
|
||||
ENV IMAGE=/shared/out_of_tree_ubuntu_14__04.img
|
||||
ENV REPOSITORY=http://archive.ubuntu.com/ubuntu
|
||||
ENV RELEASE=trusty
|
||||
|
||||
RUN mkdir $IMAGEDIR
|
||||
|
||||
# Must be executed with --privileged because of /dev/loop
|
||||
CMD debootstrap --include=openssh-server,policykit-1 \
|
||||
$RELEASE $TMPDIR $REPOSITORY && \
|
||||
/shared/setup.sh $TMPDIR && \
|
||||
qemu-img create $IMAGE 2G && \
|
||||
mkfs.ext4 -F $IMAGE && \
|
||||
mount -o loop $IMAGE $IMAGEDIR && \
|
||||
cp -a $TMPDIR/* $IMAGEDIR/ && \
|
||||
umount $IMAGEDIR
|
@ -1,17 +0,0 @@
|
||||
#!/bin/sh -eux
|
||||
# Copyright 2018 Mikhail Klementev. All rights reserved.
|
||||
# Use of this source code is governed by a AGPLv3 license
|
||||
# (or later) that can be found in the LICENSE file.
|
||||
TMPDIR=$1
|
||||
chroot $TMPDIR /bin/sh -c 'useradd -m user'
|
||||
sed -i 's/root:\*:/root::/' $TMPDIR/etc/shadow
|
||||
sed -i 's/user:!!:/user::/' $TMPDIR/etc/shadow
|
||||
echo auth sufficient pam_permit.so > $TMPDIR/etc/pam.d/sshd
|
||||
sed -i '/PermitEmptyPasswords/d' $TMPDIR/etc/ssh/sshd_config
|
||||
echo PermitEmptyPasswords yes >> $TMPDIR/etc/ssh/sshd_config
|
||||
sed -i '/PermitRootLogin/d' $TMPDIR/etc/ssh/sshd_config
|
||||
echo PermitRootLogin yes >> $TMPDIR/etc/ssh/sshd_config
|
||||
|
||||
echo '#!/bin/sh' > $TMPDIR/etc/rc.local
|
||||
echo 'dhclient eth0' >> $TMPDIR/etc/rc.local
|
||||
chmod +x $TMPDIR/etc/rc.local
|
73
tools/qemu-ubuntu-img/generate-images.py
Executable file
73
tools/qemu-ubuntu-img/generate-images.py
Executable file
@ -0,0 +1,73 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
script_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
os.chdir(script_dir)
|
||||
|
||||
releases = [
|
||||
('12.04', 'precise', 'http://old-releases.ubuntu.com/ubuntu'),
|
||||
('14.04', 'trusty', 'http://archive.ubuntu.com/ubuntu'),
|
||||
('16.04', 'xenial', 'http://archive.ubuntu.com/ubuntu'),
|
||||
('18.04', 'bionic', 'http://archive.ubuntu.com/ubuntu'),
|
||||
('20.04', 'focal', 'http://archive.ubuntu.com/ubuntu'),
|
||||
('22.04', 'jammy', 'http://archive.ubuntu.com/ubuntu'),
|
||||
('24.04', 'noble', 'http://archive.ubuntu.com/ubuntu')
|
||||
]
|
||||
|
||||
template = '''
|
||||
FROM ubuntu:{version}
|
||||
|
||||
RUN sed -i 's;http://archive.ubuntu.com/ubuntu;{repository};' /etc/apt/sources.list
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y debootstrap qemu-utils
|
||||
RUN apt-get install -y linux-image-generic
|
||||
|
||||
ENV TMPDIR=/tmp/ubuntu
|
||||
ENV IMAGEDIR=/tmp/image
|
||||
ENV IMAGE=/shared/out_of_tree_ubuntu_{img_version}.img
|
||||
ENV REPOSITORY={repository}
|
||||
ENV RELEASE={codename}
|
||||
|
||||
RUN mkdir $IMAGEDIR
|
||||
|
||||
# Must be executed with --privileged because of /dev/loop
|
||||
CMD debootstrap --include=openssh-server,policykit-1 \
|
||||
$RELEASE $TMPDIR $REPOSITORY && \
|
||||
/shared/setup.sh $TMPDIR && \
|
||||
qemu-img create $IMAGE 2G && \
|
||||
mkfs.ext4 -F $IMAGE && \
|
||||
mount -o loop $IMAGE $IMAGEDIR && \
|
||||
cp -a $TMPDIR/* $IMAGEDIR/ && \
|
||||
umount $IMAGEDIR
|
||||
'''
|
||||
|
||||
def run_cmd(cmd):
|
||||
print(f"+ {cmd}")
|
||||
subprocess.run(cmd, shell=True, check=True, executable='/bin/bash')
|
||||
|
||||
for version, codename, repository in releases:
|
||||
numeric_version = version.replace('.', '')
|
||||
img_version=version.replace(".","__")
|
||||
|
||||
dockerfile_content = template.format(
|
||||
version=version,
|
||||
img_version=img_version,
|
||||
codename=codename,
|
||||
repository=repository,
|
||||
numeric_version=numeric_version)
|
||||
|
||||
os.makedirs(str(version), exist_ok=True)
|
||||
with open(f"{version}/Dockerfile", "w") as dockerfile:
|
||||
dockerfile.write(dockerfile_content)
|
||||
|
||||
run_cmd(f"podman build -t gen-ubuntu{numeric_version}-image {version}")
|
||||
run_cmd(f"rm -rf {version}")
|
||||
|
||||
run_cmd(f"podman run --privileged -v {os.getcwd()}:/shared -t gen-ubuntu{numeric_version}-image")
|
||||
|
||||
run_cmd(f"tar -Szcf out_of_tree_ubuntu_{img_version}.img.tar.gz out_of_tree_ubuntu_{img_version}.img")
|
||||
|
@ -11,7 +11,9 @@ sed -i '/PermitEmptyPasswords/d' $TMPDIR/etc/ssh/sshd_config
|
||||
echo PermitEmptyPasswords yes >> $TMPDIR/etc/ssh/sshd_config
|
||||
sed -i '/PermitRootLogin/d' $TMPDIR/etc/ssh/sshd_config
|
||||
echo PermitRootLogin yes >> $TMPDIR/etc/ssh/sshd_config
|
||||
sed -i '/UseDNS/d' $TMPDIR/etc/ssh/sshd_config
|
||||
echo UseDNS no >> $TMPDIR/etc/ssh/sshd_config
|
||||
|
||||
echo '#!/bin/sh' > $TMPDIR/etc/rc.local
|
||||
echo 'dhclient' >> $TMPDIR/etc/rc.local
|
||||
echo 'dhclient || dhcpcd' >> $TMPDIR/etc/rc.local
|
||||
chmod +x $TMPDIR/etc/rc.local
|
||||
|
Reference in New Issue
Block a user