1
0
Fork 0

Compare commits

...

665 Commits

Author SHA1 Message Date
dump_stack() fc193afe92
refactor(daemon): switch to gob encoding 2024-02-28 03:04:38 +00:00
dump_stack() 1c8e1d068b
feat(daemon): query jobs by update time 2024-02-28 01:51:45 +00:00
dump_stack() c909c2a352
feat(daemon): timestamps 2024-02-27 02:00:07 +00:00
dump_stack() e633fd2e79
feat(daemon): task groups 2024-02-26 09:48:00 +00:00
dump_stack() 2b4db95166
feat(daemon): parallel execution 2024-02-25 18:19:48 +00:00
dump_stack() 6a9bfb503f
refactor: set qemu default values as constants 2024-02-25 18:02:39 +00:00
dump_stack() 29f4821320
feat: add watch flag 2024-02-25 18:02:03 +00:00
dump_stack() bb8344958e
ci: rename logs 2024-02-22 13:46:03 +00:00
dump_stack() 2c66dbc736
docs: add .readthedocs.yaml 2024-02-22 13:06:05 +00:00
dump_stack() cc43cbcc2d
Revert "ci: remove setup.sh"
This reverts commit e203229f00.
2024-02-22 10:33:50 +00:00
dump_stack() e203229f00
ci: remove setup.sh 2024-02-22 10:19:02 +00:00
dump_stack() 7b7c01ac8a
ci: deps 2024-02-22 10:01:01 +00:00
dump_stack() 247f0f90ed
ci: build cache 2024-02-22 09:58:49 +00:00
dump_stack() b1dc739cfa
ci: 8 threads; 128 kernels 2024-02-22 08:56:30 +00:00
dump_stack() 9727c7863c
ci: bump ssh-agent version 2024-02-21 22:52:54 +00:00
dump_stack() 335eeb5ed5
fix: load local archive 2024-02-21 22:52:38 +00:00
dump_stack() 8812cb4293
feat: allow amended commits 2024-02-21 22:48:39 +00:00
dump_stack() a9a42ba33a
fix: C-c/C-v 2024-02-21 18:58:49 +00:00
dump_stack() c17676d0f9
fix: use image load 2024-02-21 18:57:19 +00:00
dump_stack() 8251927821
Revert "fix: load, not import"
This reverts commit 35df5850f5.
2024-02-21 18:55:48 +00:00
dump_stack() 4a1422e010
ci: use the latest podman 2024-02-21 18:43:32 +00:00
dump_stack() 35df5850f5
fix: load, not import 2024-02-21 18:31:33 +00:00
dump_stack() 451566d27f
fix: do not rebuild existing containers w/o files 2024-02-21 18:16:51 +00:00
dump_stack() 61b995f330
Revert "fix: set default value"
This reverts commit b1be394d6b.
2024-02-21 18:08:36 +00:00
dump_stack() b1be394d6b
fix: set default value 2024-02-21 17:57:54 +00:00
dump_stack() cc5e70373f
fix: fetch prebuilt containers in `kernel list-remote` 2024-02-21 17:25:09 +00:00
dump_stack() 86213c171a
fix: do not reimport existing containers 2024-02-21 16:15:38 +00:00
dump_stack() 8a5971379a
feat(distro): add gcc-12 to ubuntu 22.04 image 2024-02-21 16:03:18 +00:00
dump_stack() ce0a4d92fc
refactor: logs 2024-02-21 15:39:07 +00:00
dump_stack() 059ca6dc93
fix: do not fail if no prebuilt container 2024-02-21 15:05:59 +00:00
dump_stack() d317769a64
refactor: logs 2024-02-21 15:00:21 +00:00
dump_stack() d733cde2ae
feat: prebuilt containers 2024-02-21 14:55:04 +00:00
dump_stack() 8a4ce9909b
ci: upload images only after test success 2024-02-21 14:05:47 +00:00
dump_stack() 293dbda2a7
fix: script path 2024-02-21 13:49:38 +00:00
dump_stack() 0c6d5bd371
fix: set build dir for script type 2024-02-21 13:42:08 +00:00
dump_stack() bd2f274887
refactor: logs 2024-02-21 13:30:49 +00:00
dump_stack() 0ba3651c4a
refactor: remove excessing logging 2024-02-21 13:14:55 +00:00
dump_stack() 6bda2344c9
ci: prune docker 2024-02-21 12:53:16 +00:00
dump_stack() 30e0e5f554
fix: correct output path 2024-02-21 12:38:29 +00:00
dump_stack() fcd559124d
ci: maximize build space 2024-02-21 12:35:51 +00:00
dump_stack() 4e01c719a0
ci: PATH 2024-02-21 12:30:32 +00:00
dump_stack() 3c6a2eab32
ci: yaml 2024-02-21 12:17:25 +00:00
dump_stack() a8066428f8
feat: save container images 2024-02-21 12:11:57 +00:00
dump_stack() 94d0cf1ae4
ci: yaml 2024-02-21 11:51:30 +00:00
dump_stack() 987be594a4
ci: yaml 2024-02-21 11:46:46 +00:00
dump_stack() 8aa5391a25
ci: add ssh key 2024-02-21 11:42:30 +00:00
dump_stack() e63059043f
fix: typo 2024-02-21 09:34:28 +00:00
dump_stack() 438f7f7386
feat: flag to skip kernels.toml update 2024-02-21 09:28:49 +00:00
dump_stack() edfaf68b11
fix: do not quit on error 2024-02-21 08:27:34 +00:00
dump_stack() ee232dc54a
ci: maximize build space 2024-02-21 02:11:52 +00:00
dump_stack() ed99ffd2e1
fix: add timeouts to kernel artifact 2024-02-20 23:00:52 +00:00
dump_stack() 375844e2cd
ci(qemu): set timeouts 2024-02-20 22:50:58 +00:00
dump_stack() bdeb395dd9
ci: upload-artifact v3 -> v4 2024-02-20 15:09:58 +00:00
dump_stack() 592fdc8b83
refactor(logs): show timeout 2024-02-20 15:08:44 +00:00
dump_stack() ed34797dc0
nix flake update 2024-02-20 14:35:19 +00:00
dump_stack() 4e5a499db7
fix: remove spaces from examples names 2024-02-20 14:30:17 +00:00
dump_stack() 4202a7bc26
test: do not compare non-marshallable field, refactor 2024-02-20 14:28:14 +00:00
dump_stack() 1356e0dc34
fix: remove toolchain directive 2024-02-20 13:28:43 +00:00
dump_stack() 0314b5ca93
feat: initial daemon implementation 2024-02-20 13:25:31 +00:00
dump_stack() 820208d079
refactor: remove ioutil 2024-02-20 12:44:35 +00:00
dump_stack() 48e5e8cd04
refactor: rand.Seed not required anymore 2024-02-20 12:28:27 +00:00
dump_stack() 9b69738163
refactor: replace deprecated functions 2024-02-20 12:24:40 +00:00
dump_stack() 0a9b16a0f6
feat: log function name instead of path 2024-02-20 12:14:34 +00:00
dump_stack() b4bf0314f0
refactor: remove ioutil (deprecated) 2024-02-20 12:04:24 +00:00
dump_stack() d43cd36858
refactor: debug logging 2024-02-20 12:03:35 +00:00
dump_stack() 67ffa2347a
feat: export GetFreeAddrPort 2024-02-20 12:01:41 +00:00
dump_stack() 6036211172
refactor: errors 2024-02-20 11:37:19 +00:00
dump_stack() 4e92950929
refactor: move commands to cmd/ 2024-02-17 22:38:43 +00:00
dump_stack() 1b3e23d188
feat: watch for kernel panic/warn/bug 2023-12-25 15:03:46 +00:00
dump_stack() 6f53a3f386
feat: use artefact name as default build output filename 2023-10-18 01:28:11 +00:00
dump_stack() 4a8f119b5d
feat: check ssh connection after tests 2023-08-24 00:52:21 +00:00
dump_stack() 56faf1a351
fix: remove type from kernel release 2023-06-20 16:01:38 +00:00
dump_stack() 41c4241d75
fix: do not include some modules on opensuse 15.2 2023-06-20 12:25:02 +00:00
dump_stack() 1cb5c40c77
fix: apply modprobe workaround to all opensuse releases 2023-06-20 06:34:11 +00:00
dump_stack() b86c0508f9
fix: apply af_packet workaround for all except opensuse15 2023-06-19 22:41:35 +00:00
dump_stack() 7e1b2a24f3
fix: use --no-hostonly only on opensuse 12 2023-06-19 22:34:08 +00:00
dump_stack() f8e5d29722
ci: increase timeout to 20 minutes 2023-06-18 14:02:02 +00:00
dump_stack() 47a3d00f1b
fix: use separate container for opensuse 12.3 2023-06-18 12:58:40 +00:00
dump_stack() 3a1fc86251
fix: cleanup boot and modules dirs 2023-06-18 12:25:37 +00:00
dump_stack() f44c275c9d
fix: add dracut on opensuse 12.3 2023-06-18 12:19:36 +00:00
dump_stack() 6ffaa3dad4
fix: cleanup dracut repo after install 2023-06-18 12:02:32 +00:00
dump_stack() 8516ff9e91
ci: include internal errors in status 2023-06-18 11:47:02 +00:00
dump_stack() 983f135097
feat: use dracut from opensuse 12.3 on 12.{1,2} 2023-06-18 11:42:52 +00:00
dump_stack() 2a55d611d1
fix: avoid fail on non-existing dracut module 2023-06-17 15:25:12 +00:00
dump_stack() 77201baa63
fix: use mkinitrd for opensuse 12 and older 2023-06-17 14:44:51 +00:00
dump_stack() 338e184424
fix: do not use replacefiles on opensuse 12 2023-06-17 13:36:00 +00:00
dump_stack() cfc6c1928f
ci: add opensuse 12 to matrix 2023-06-17 13:26:11 +00:00
dump_stack() 7497b67b8b
feat: support for openSUSE 12 2023-06-17 13:19:16 +00:00
dump_stack() 6d725d3581
ci: free more disk space 2023-06-17 12:47:12 +00:00
dump_stack() 312e708116
fix: ignore file conflicts 2023-06-17 09:16:48 +00:00
dump_stack() 0c16dc02dc
feat: support for openSUSE 13 2023-06-17 08:46:20 +00:00
dump_stack() b1f11af512
feat: implement container import 2023-06-17 08:45:17 +00:00
dump_stack() a6944050cc
feat: implement openSUSE 42+ support 2023-06-15 16:30:12 +00:00
dump_stack() c12b0a8829
feat: remove dangling or unused images from local storage after build 2023-06-03 13:05:17 +00:00
dump_stack() b53b3f2632
feat: blocklist buggy oel6 kernels 2023-06-02 09:53:05 +00:00
dump_stack() 79037d61ec
feat: add support for blocklisting kernels 2023-06-02 09:52:17 +00:00
dump_stack() 482378abaf
feat: add smap blocklist for some oracle linux kernels 2023-06-01 14:19:21 +00:00
dump_stack() b0b19d87ca
feat: add flag to count internal errors as part of the success rate 2023-06-01 14:19:20 +00:00
dump_stack() 5396375b47
feat: add smap blocklist for some debian 7 kernels 2023-06-01 14:19:18 +00:00
dump_stack() 6d6ee135cd
feat: make qemu cpu model/flags configurable 2023-06-01 14:19:17 +00:00
dump_stack() c7bc206ad8
feat: add error counters output 2023-06-01 14:19:16 +00:00
dump_stack() c54616594c
ci: use vm with the latest cpu 2023-06-01 14:19:14 +00:00
dump_stack() 4441e84063
fix: add check for shutdown request 2023-06-01 14:18:27 +00:00
dump_stack() 7d28549db7
fix: define autogen var 2023-06-01 14:07:53 +00:00
dump_stack() c1ee3e1ac0
fix: make shutdown var change available 2023-06-01 14:02:32 +00:00
dump_stack() 49388981aa
fix: check that the kernel is from the same distribution 2023-06-01 13:52:55 +00:00
dump_stack() a72d9b77de
test: skip debian list of packages test in CI 2023-06-01 09:51:03 +00:00
dump_stack() 75d740b22b
feat: sleep one second for the first threads 2023-05-30 21:04:06 +00:00
dump_stack() 8f39b502a4
refactor: rename 2023-05-30 20:55:23 +00:00
dump_stack() 916acc9895
feat: add timeout for first threads 2023-05-30 20:52:41 +00:00
dump_stack() 2f9b5d615b
feat: add qemu start up time before ssh and test duration logs 2023-05-30 20:37:02 +00:00
dump_stack() eaba233ca3
feat: add apparmor to debian 9+ base container 2023-05-29 21:55:32 +00:00
dump_stack() bec424b493
feat: caching more deps in the base docker container 2023-05-29 21:38:46 +00:00
dump_stack() 6b4298c55d
feat: add libc6 >= 2.14 on debian wheezy 2023-05-29 20:13:40 +00:00
dump_stack() 56dfabdfa3
feat: filter out pre-release kernels 2023-05-29 19:53:41 +00:00
dump_stack() 1e3b7a867d
fix: add gcc-6 to debian 10 2023-05-29 18:45:28 +00:00
dump_stack() 3b76b4c0cd
fix: add gcc-5 to debian 9 2023-05-29 12:37:26 +00:00
dump_stack() 43d7643ba7
fix: do not invalidate new kernels while updating kbuild 2023-05-29 12:27:00 +00:00
dump_stack() 347fcbae60
ci: disable kbuild refetch 2023-05-29 12:01:46 +00:00
dump_stack() e141f46892
feat: parameter to refetch kbuild 2023-05-29 11:48:25 +00:00
dump_stack() 9271d69bc6
feat: less smart kbuild version guess 2023-05-29 11:21:20 +00:00
dump_stack() 7942bd22fa
feat: implement parse debian kernel version 2023-05-29 10:19:29 +00:00
dump_stack() 17356ac0e4
test: remove obsolete test 2023-05-29 10:18:19 +00:00
dump_stack() ca57ea2dac
refactor: cache control for the container module 2023-05-29 09:09:30 +00:00
dump_stack() 004d4223f9
feat: add gcc-10 to debian 12 container 2023-05-29 08:56:35 +00:00
dump_stack() 443d23bd67
feat: add update for debian release 2023-05-29 08:38:53 +00:00
dump_stack() a3170ada69
feat: filter out unstable debian kernels 2023-05-29 08:16:21 +00:00
dump_stack() 31f4d0e92d
feat: implement list kernels for all distro versions 2023-05-29 08:14:38 +00:00
dump_stack() a748778b72
ci: set kernel cache limit to 128, disable cache on packages fetch 2023-05-28 20:51:49 +00:00
dump_stack() 487b9c520d
feat: add limit amount of kernels to fetch 2023-05-28 20:37:05 +00:00
dump_stack() 4db10c66dc
feat: change max rate limiter timeout to 2s 2023-05-28 20:36:20 +00:00
dump_stack() 5ec6873c57
feat: change rate limiter timeout to 1s 2023-05-28 17:22:16 +00:00
dump_stack() 8e57f7f5ef
feat: set one minute delay before trying with the new rate limiter 2023-05-28 12:42:01 +00:00
dump_stack() c0914820c0
feat: disable burst for mr/metasnap api 2023-05-28 12:31:52 +00:00
dump_stack() 9df0880e3e
feat: add max timeout for mr/metasnap api 2023-05-28 12:16:05 +00:00
dump_stack() 88bfa867fd
feat: include proposed-updates 2023-05-28 12:15:44 +00:00
dump_stack() 013fb42350
feat: match only stable debian releases, backports, and updates 2023-05-28 12:02:59 +00:00
dump_stack() dce7546dd2
fix: trace error before nullifying 2023-05-28 11:38:47 +00:00
dump_stack() b6bc9b36c5
feat: add release to debian kernel 2023-05-28 09:38:36 +00:00
dump_stack() 4fca6b07e1
ci: run full e2e tests on all branches 2023-05-28 06:57:27 +00:00
dump_stack() e618d6b001
fix: use gcc 4.8 for Debian 3.11-{1,2} kernels 2023-05-27 20:51:05 +00:00
dump_stack() 3d70591717
feat: use libc from jessie on wheezy 2023-05-27 18:45:17 +00:00
dump_stack() 5813721dc9
feat: filter out experimental debian kernels 2023-05-27 18:30:54 +00:00
dump_stack() f827a72bee
feat: build for 5.15.0 kernel modules in ol9 container 2023-05-27 16:56:15 +00:00
dump_stack() 5c1bd9a27d
ci: set kernel autogen limit to 256 2023-05-26 22:43:36 +00:00
dump_stack() 99fea27497
ci: increase qemu timeout to 4 minutes 2023-05-26 19:20:44 +00:00
dump_stack() fe2b05d5fc
fix: let deb12 to fetch libssl1 from deb11 2023-05-26 18:15:14 +00:00
dump_stack() 1410fe4660
fix: use wildcard in apt pin priority 2023-05-26 17:32:35 +00:00
dump_stack() 8c49680675
feat: add container timeout parameter to kernel command 2023-05-26 17:01:53 +00:00
dump_stack() 852680e944
feat: remove default timeout 2023-05-26 16:57:57 +00:00
dump_stack() 673b273593
feat: set the default container timeout to 8min 2023-05-26 16:22:02 +00:00
dump_stack() 650cf65fa4
fix: correctly handle already installed kernels for stats 2023-05-26 16:21:01 +00:00
dump_stack() 56e9898d75
fix: make sure apt-get will not download the repo version 2023-05-26 15:44:06 +00:00
dump_stack() 022ced0eba
feat: show amount of kernels failed to install 2023-05-26 15:43:24 +00:00
dump_stack() 603f1c3654
fix: always update containerfile if it is to build 2023-05-26 14:22:14 +00:00
dump_stack() 92f95d8658
feat: add delay for new limit 2023-05-26 13:10:54 +00:00
dump_stack() 1b87946130
feat: fetch only the local repo 2023-05-26 12:56:18 +00:00
dump_stack() d9bfa63ed1
feat: install .deb packages with apt-get 2023-05-26 12:43:44 +00:00
dump_stack() a98fca403c
Revert "fix: set gcc to 4.8 for linux-compiler-3.12-x86"
This reverts commit 3292e5c874.
2023-05-26 10:12:57 +00:00
dump_stack() 1354c029b1
feat: add gcc 4.6,4.8 to debian wheezy container 2023-05-26 10:03:44 +00:00
dump_stack() a57478e38b
fix: install only the latest gcc on debian wheezy 2023-05-26 09:39:03 +00:00
dump_stack() 60de4af81e
fix: break on found 2023-05-26 09:07:10 +00:00
dump_stack() 3292e5c874
fix: set gcc to 4.8 for linux-compiler-3.12-x86 2023-05-26 08:53:21 +00:00
dump_stack() db9516b358
feat: set the default kernel gen retries to 2 2023-05-26 08:20:21 +00:00
dump_stack() b17433ab42
fix: check for shutdown after unlock 2023-05-25 23:01:42 +00:00
dump_stack() d7cf88e34f
feat: add package name, use for debian check if already installed 2023-05-25 23:00:42 +00:00
dump_stack() 1a9fdf0917
feat: support for excluding kernels using regex 2023-05-25 18:35:27 +00:00
dump_stack() 0dceacd2df
fix: change artifact info padding 2023-05-25 16:32:44 +00:00
dump_stack() aceaf96448
feat: show only the last step that was successful/failed 2023-05-25 13:29:11 +00:00
dump_stack() b24008ad3f
fix: typo 2023-05-25 12:48:25 +00:00
dump_stack() a930b8d9b8
fix: typo 2023-05-25 12:46:21 +00:00
dump_stack() e18ddf8a11
fix: install kernel-firmware package on centos6 2023-05-25 12:42:16 +00:00
dump_stack() 2c462da3de
fix: change timestamp and padding in log query 2023-05-25 12:35:44 +00:00
dump_stack() 5e67783bb8
ci: modprobe xfs in case no uio/9p module 2023-05-25 12:12:07 +00:00
dump_stack() 9a5a42202e
feat: add linux-firmware to centos container 2023-05-25 12:05:32 +00:00
dump_stack() 4fa8c2c02b
feat: always show script 2023-05-25 12:04:29 +00:00
dump_stack() 301e2fde1a
fix: set internal error on module scp connection refused 2023-05-25 11:04:30 +00:00
dump_stack() a646e9cf33
refactor: rename parameter 2023-05-25 10:31:24 +00:00
dump_stack() b631767d98
fix: correct support for empty release 2023-05-25 10:25:26 +00:00
dump_stack() 6db5ffc8c2
fix: correct success rate 2023-05-25 10:02:45 +00:00
dump_stack() 10c5fb7ac4
feat: generate all kernels if no distro is set 2023-05-25 06:36:02 +00:00
dump_stack() ad3a76320e
ci: try also to load 9p 2023-05-25 06:14:43 +00:00
dump_stack() 65b49996e6
fix: use nullstring in lastlog 2023-05-24 22:48:58 +00:00
dump_stack() 7806a774e4
feat: add internal error to log database 2023-05-24 22:46:04 +00:00
dump_stack() 605871d17a
fix: check only distro id 2023-05-24 21:58:25 +00:00
dump_stack() 49760c065e
fix: set error when qemu is dead 2023-05-24 19:46:11 +00:00
dump_stack() 5749f7d96e
Revert "fix: check client is not nil before closing"
This reverts commit 94285cd94d.
2023-05-24 19:43:40 +00:00
dump_stack() 94285cd94d
fix: check client is not nil before closing 2023-05-24 19:36:54 +00:00
dump_stack() bf9a43c1b8
feat: reduce the default retry count to 4 2023-05-24 18:51:02 +00:00
dump_stack() 7d0ee9a1dc
fix: do not continue retrying when qemu is dead 2023-05-24 18:47:46 +00:00
dump_stack() b8058bffb0
feat: exclude internal errors from success rate 2023-05-24 18:44:14 +00:00
dump_stack() 04d6f0dbd3
fix: exit on panic while waiting for ssh 2023-05-24 18:39:01 +00:00
dump_stack() 11bf6eda38
ci: modprobe uio 2023-05-24 18:03:04 +00:00
dump_stack() 3d8faafcce
Revert "ci: run qemu in 2 threads"
This reverts commit 2c31dd25f9.
2023-05-24 17:40:13 +00:00
dump_stack() 2c31dd25f9
ci: run qemu in 2 threads 2023-05-24 17:07:25 +00:00
dump_stack() f36c412250
ci: rename workflow 2023-05-24 16:53:14 +00:00
dump_stack() abcf2c1013
fix: support updating containers 2023-05-24 16:43:25 +00:00
dump_stack() ee90bfaa72
fix: support force kernel reinstallation 2023-05-24 16:29:52 +00:00
dump_stack() 3271710653
refactor: add genkernel function 2023-05-24 16:13:12 +00:00
dump_stack() 3dd88bac0e
ci: rename jobs 2023-05-24 16:03:10 +00:00
dump_stack() 2c2124bdb0
refactor: merge guess with when no kernels defined 2023-05-24 15:46:58 +00:00
dump_stack() ceaacade0b
ci: disable selinux to allow to run from unit 2023-05-24 15:35:50 +00:00
dump_stack() 7b9935dc13
ci: do full e2e tests only on master 2023-05-24 15:20:12 +00:00
dump_stack() 4171954350
ci: add full e2e tests on vm 2023-05-24 15:17:28 +00:00
dump_stack() 707cf6f268
feat: use libc from jessie to support the latest backports 2023-05-24 14:36:54 +00:00
dump_stack() a4b20299cd
fix: do not use custom sources.list on debian 12 bookworm 2023-05-24 12:39:13 +00:00
dump_stack() 93f66b08f4
feat: add debian 12 bookworm image 2023-05-24 10:28:23 +00:00
dump_stack() c1fceb6ce6
fix: use kernel version (same as uname -r) for /lib/modules path 2023-05-24 10:01:25 +00:00
dump_stack() e0295664af
fix: typo 2023-05-24 09:32:43 +00:00
dump_stack() a9cd7ba18b
fix: use initramfs-tools from backports only on wheezy 2023-05-24 09:30:52 +00:00
dump_stack() 3740a07619
feat: use initramfs-tools from backports 2023-05-24 09:24:41 +00:00
dump_stack() fe96366eba
feat: support Debian 12 Bookworm 2023-05-24 09:14:18 +00:00
dump_stack() 48ba7b7c7b
feat: match the gcc version to distinguish between Debian releases 2023-05-24 09:12:33 +00:00
dump_stack() ae00b57471
fix: match gcc-10+ 2023-05-24 09:10:56 +00:00
dump_stack() 408e330b27
feat: add backports repo for debian 2023-05-24 09:10:32 +00:00
dump_stack() ac4fcaaa91
fix: no need to have separate case for debian anymore 2023-05-24 09:09:16 +00:00
dump_stack() bff4422098
feat: update existing container in case of containerfile changes 2023-05-24 09:08:31 +00:00
dump_stack() 4a5376eb43
fix: copy debian /usr/src with folow symbolic links 2023-05-24 07:30:18 +00:00
dump_stack() 02bca8e0ae
feat: use debian codename for container image 2023-05-24 07:17:05 +00:00
dump_stack() 26a65924df
feat: use numerical debian release string by default 2023-05-24 07:15:47 +00:00
dump_stack() 77e118be64
fix: use distro equal check 2023-05-23 23:02:09 +00:00
dump_stack() 66d45e69d9
fix: change return values of macos placeholder 2023-05-23 22:44:38 +00:00
dump_stack() c35def964e
fix: macos build 2023-05-23 22:40:08 +00:00
dump_stack() e2d66db16f
feat: add kernel install to distro interface 2023-05-23 22:36:46 +00:00
dump_stack() daaef89050
feat: add kernels list to distro interface 2023-05-23 22:00:20 +00:00
dump_stack() c1ec4add81
refactor: move kernelinfo to distro module 2023-05-23 21:33:50 +00:00
dump_stack() 0edb0ac0af
refactor: get rid of too many parameters 2023-05-23 21:21:06 +00:00
dump_stack() c6e06d8e3e
feat: multiple commands to run in container 2023-05-23 20:46:09 +00:00
dump_stack() e302c447f5
Revert "feat: numerical release strings"
This reverts commit 330519f617.
2023-05-23 17:53:58 +00:00
dump_stack() 330519f617
feat: numerical release strings 2023-05-23 17:48:15 +00:00
dump_stack() 7ca989fd8d
feat: use distro info to create the container 2023-05-23 16:54:45 +00:00
dump_stack() f2ce20e53b
feat: change interface from ID()/Release() to Distro() with both 2023-05-23 16:26:36 +00:00
dump_stack() 6f40fa554e
fix: avoid use of range variable 2023-05-23 13:46:16 +00:00
dump_stack() a1999115db
refactor: move container generation to distro modules 2023-05-23 13:20:48 +00:00
dump_stack() ff7bed76f2
fix: search in source volumes not destination ones 2023-05-23 09:34:29 +00:00
dump_stack() 14320faca8
fix: make sure we only remove the package extension 2023-05-23 09:22:50 +00:00
dump_stack() fa5d0adb39
feat: implement global docker timeout 2023-05-22 14:41:00 +00:00
dump_stack() 2fe3103603
refactor: add volume list 2023-05-22 14:28:28 +00:00
dump_stack() 2eb91ffac9
fix: missed unlock 2023-05-22 07:20:40 +00:00
dump_stack() 519b8d190a
fix: check that max is reached after unlock 2023-05-22 06:58:13 +00:00
dump_stack() d507b86373
fix: do not match generic-pae 2023-05-22 06:56:12 +00:00
dump_stack() e1dd7c18be
fix: workaround for grub-install issues 2023-05-22 06:55:06 +00:00
dump_stack() c076db3505
fix: avoid stderr 2023-05-22 06:01:40 +00:00
dump_stack() 632e4f5ffc
feat: parallel kernel installation 2023-05-21 21:43:18 +00:00
dump_stack() b02da8adeb
fix: typo 2023-05-21 20:39:52 +00:00
dump_stack() 31b0945a15
fix: use the default config dirs provider 2023-05-21 20:31:47 +00:00
dump_stack() ba03d4a049
feat: use all available kernels in case of no targets defined 2023-05-21 14:40:24 +00:00
dump_stack() b88ab7cca3
feat: emulate a cpu on macOS with all features supported by KVM 2023-05-21 14:33:46 +00:00
dump_stack() b8817a4930
feat: less verbose wget output 2023-05-18 22:31:34 +00:00
dump_stack() e767299222
feat: show both release number and name for debian 2023-05-18 22:26:42 +00:00
dump_stack() f0c82f9289
feat: implement list of available distros 2023-05-18 22:02:41 +00:00
dump_stack() 9c237b52db
test: remove obsolete match test 2023-05-18 21:40:22 +00:00
dump_stack() 120fcdc56b
feat: initial implementation of distro interface 2023-05-18 21:37:07 +00:00
dump_stack() c3774714fd
refactor: move distro id to separate file 2023-05-18 20:02:09 +00:00
dump_stack() 73f5df2425
feat!: new kernel config structure
BREAKING CHANGE: kernel definition in the configuration files has switched

from

  [[targets]]
  distro = { id = "Ubuntu", release = "18.04" }
  release_mask = ".*"

to

  [[targets]]
  distro = { id = "Ubuntu", release = "18.04" }
  kernel = { regex = ".*" }
2023-05-18 18:48:09 +00:00
dump_stack() d551cc8fc4
refactor: use the same name as in config 2023-05-18 18:27:51 +00:00
dump_stack() 6385ce92e3
feat!: rename supported kernels to targets
BREAKING CHANGE: .out-of-tree.toml: s/[[supported_kernels]]/[[targets]]/
2023-05-18 18:13:09 +00:00
dump_stack() 6939d64226
fix: continue in case of no/wrong preload .out-of-tree.toml 2023-05-18 16:43:15 +00:00
dump_stack() 071608805e
ci: typo 2023-05-18 16:25:36 +00:00
dump_stack() 80e57cb60c
ci: fix .out-of-tree.toml 2023-05-18 16:21:42 +00:00
dump_stack() bcf8de336f
feat!: introduce new distribution structure
BREAKING CHANGE: distro definition in the configuration files has switched

from

  [[supported_kernels]]
  distro_type = "Ubuntu"
  distro_release = "16.04"
  ...

to

  [[supported_kernels]]
  distro = { id = "Ubuntu", release = "16.04" }
  ...
2023-05-18 16:07:24 +00:00
dump_stack() 8d2d56bea3
build: add version for flake 2023-05-18 12:53:46 +00:00
dump_stack() 17256317c9
test: fix function name 2023-05-18 12:52:52 +00:00
dump_stack() 26faa53f8b
refactor: move cmdline generation out of distro switch 2023-05-18 12:07:59 +00:00
dump_stack() 5ccca6617f
ci: wait for the previous debian cache workflow to finish 2023-05-18 12:00:12 +00:00
dump_stack() 0589ae25e4
ci: automatically cancel previous jobs on new commits 2023-05-18 11:59:15 +00:00
dump_stack() d6670ee8d9
fix: typo 2023-05-18 11:50:17 +00:00
dump_stack() 6a338fc6ad
refactor: move ubutu install/cleanup to module 2023-05-18 11:46:12 +00:00
dump_stack() 407c1a7975
refactor: move oracle linux install/cleanup to module 2023-05-18 11:42:25 +00:00
dump_stack() 99c9346995
refactor: rename debian functions to common interface 2023-05-18 11:34:46 +00:00
dump_stack() 90f7e62888
Revert "refactor: remove debian functions to common interface"
This reverts commit 412199966e.
2023-05-18 11:32:48 +00:00
dump_stack() 412199966e
refactor: remove debian functions to common interface 2023-05-18 11:31:54 +00:00
dump_stack() ef35743579
refactor: move oraclelinux/ubuntu kernels match to modules 2023-05-18 11:28:06 +00:00
dump_stack() 71c2b2001c
refactor: move oracle linux runs/envs to module 2023-05-18 11:08:23 +00:00
dump_stack() 4eed03ec2a
fix: install/remove kernel in one layer 2023-05-18 10:50:24 +00:00
dump_stack() 3cd901b1be
fix: add appstream repo for centos8, refactor 2023-05-18 10:37:59 +00:00
dump_stack() 73b1edd1cb
fix: clean ubuntu modules package 2023-05-18 09:50:02 +00:00
dump_stack() a607ce62d1
feat: set -cpu max, also for non-kvm 2023-05-17 17:45:52 +00:00
dump_stack() e1ac462642
feat: test lkrg alert 2023-05-17 17:04:13 +00:00
dump_stack() 304bb74ecf
fix: keep target with random name 2023-05-17 16:55:22 +00:00
dump_stack() 8486a0337d
fix: dereference symbolic links only for debian 2023-05-17 16:15:58 +00:00
dump_stack() 2a6e775b69
ci: disable testing with script type as redundant 2023-05-17 15:39:18 +00:00
dump_stack() f2e43f891a
ci: remove check for kvm 2023-05-17 13:40:54 +00:00
dump_stack() 5707559c28
docs: update readme 2023-05-17 13:32:31 +00:00
dump_stack() 51a67db71a
ci: disable fail-fast for examples 2023-05-17 13:22:30 +00:00
dump_stack() 6df94d7e15
feat: add distro type/release to target name 2023-05-17 12:58:14 +00:00
dump_stack() d45d5731a9
fix: dereference symbolic links when copying /usr/src 2023-05-17 12:41:09 +00:00
dump_stack() 950cee6df0
feat: add --dump to dump cache 2023-05-17 12:33:59 +00:00
dump_stack() 7e3f02f3a9
ci: checkout first 2023-05-17 12:04:13 +00:00
dump_stack() 360afdb05e
ci: 755 2023-05-17 12:02:16 +00:00
dump_stack() 0cb9128810
ci: reclaim some space in runner vm 2023-05-17 11:59:15 +00:00
dump_stack() c3f6e90137
ci: fix log names 2023-05-17 11:42:31 +00:00
dump_stack() be5f114694
fix: decrease log level 2023-05-17 11:25:07 +00:00
dump_stack() f1429d3e1d
ci: return back oracle linux to the matrix 2023-05-17 11:11:34 +00:00
dump_stack() fb6ef30aaa
Update readme 2023-05-17 11:09:58 +00:00
dump_stack() eb54ec4a24
ci: do not run on docs/readme change 2023-05-17 11:09:26 +00:00
dump_stack() 5d95422624
ci: typo 2023-05-17 10:36:11 +00:00
dump_stack() e95e8d299f
ci: rename workflows 2023-05-17 10:32:49 +00:00
dump_stack() 3de5f5e12d
ci: add kernel module tests to e2e matrix 2023-05-17 10:24:28 +00:00
dump_stack() a68ceacb43
feat: parallel download of deb packages 2023-05-17 10:04:37 +00:00
dump_stack() 72f52d3200
fix: support --no-headers for debian 2023-05-17 06:50:52 +00:00
dump_stack() 706d442948
fix: install gcc for all debian releases 2023-05-17 06:24:18 +00:00
dump_stack() f7b9f538b4
fix: wait 10 seconds before query with new limit 2023-05-17 06:05:37 +00:00
dump_stack() d70be6a306
feat: install all gcc versions for debian base image 2023-05-17 05:48:28 +00:00
dump_stack() 15a6f38631
fix: lower limit also on connection refused 2023-05-17 05:39:24 +00:00
dump_stack() ac2166b050
refactor: avoid potential typos 2023-05-17 05:28:34 +00:00
dump_stack() f630fa6f49
fix: add timeout to wget 2023-05-17 05:12:32 +00:00
dump_stack() 6e92010dc0
feat: set default limiter timeout to 50ms 2023-05-17 05:05:49 +00:00
dump_stack() 008ce1cdbf
refactor: remove unused code 2023-05-17 05:03:16 +00:00
dump_stack() 5270f2438c
feat: set default limiter timeout to 100ms 2023-05-17 05:02:57 +00:00
dump_stack() 204413af9e
fix: add linux-compiler-* to dependencies 2023-05-17 04:52:04 +00:00
dump_stack() c43f16733e
fix: add libssl-dev for debian base image 2023-05-17 04:51:32 +00:00
dump_stack() 74898924da
ci: set 60 minutes timeout for e2e test jobs 2023-05-17 04:18:22 +00:00
dump_stack() c6acbef7f5
feat: set timeout on first tries to install 2023-05-16 20:30:04 +00:00
dump_stack() d27847c533
ci: typo 2023-05-16 20:25:54 +00:00
dump_stack() eec740b208
ci: return back to Github Actions for E2E testing 2023-05-16 20:13:09 +00:00
dump_stack() bd2dfe3e4e
ci: run debian cache at midnight, change batch size 2023-05-16 19:44:15 +00:00
dump_stack() 6ab8f2fea1
ci: rename ubuntu workflow 2023-05-16 19:40:39 +00:00
dump_stack() e7614ef3a7
feat: use snapshots only in case of failed fetch from repos 2023-05-16 19:37:57 +00:00
dump_stack() 18426775b9
fix: deb packages install command 2023-05-16 19:20:58 +00:00
dump_stack() e87add8e44
ci: remove name 2023-05-16 19:12:03 +00:00
dump_stack() b8d0319097
ci: typo 2023-05-16 19:11:50 +00:00
dump_stack() 968c4d7363
ci: use bash 2023-05-16 19:05:32 +00:00
dump_stack() 246e0efac1
ci: typo 2023-05-16 19:03:30 +00:00
dump_stack() 4cc0166a92
ci: install less packages, add symlink for qemu 2023-05-16 18:59:00 +00:00
dump_stack() 87e9790f79
ci: switch to root 2023-05-16 18:54:59 +00:00
dump_stack() 2af2692a66
ci: correct directory 2023-05-16 18:50:39 +00:00
dump_stack() ef1ebf6f23
ci: parallel testing of examples 2023-05-16 18:45:59 +00:00
dump_stack() 8a7439d7a9
ci: use dnf 2023-05-16 18:41:19 +00:00
dump_stack() 8d93517be7
ci: switch to almalinux 2023-05-16 18:39:34 +00:00
dump_stack() da637c2923
ci: typo 2023-05-16 18:28:37 +00:00
dump_stack() 6f18f6c779
ci: add timeout before enable linger 2023-05-16 18:09:23 +00:00
dump_stack() 66026ebf5a
fix: typo 2023-05-16 17:55:30 +00:00
dump_stack() f5b1283690
fix: try to install debian packages 3 times 2023-05-16 17:52:15 +00:00
dump_stack() f906e3187f
ci: set XDG_RUNTIME_DIR 2023-05-16 17:36:17 +00:00
dump_stack() ba3e6072d4
ci: enable linger 2023-05-16 17:03:55 +00:00
dump_stack() 0338483e72
ci: typo 2023-05-16 16:54:39 +00:00
dump_stack() f2d0035c0e
ci: fix parameters 2023-05-16 16:54:12 +00:00
dump_stack() 7e87567070
ci: add stdout logs 2023-05-16 16:54:05 +00:00
dump_stack() 2be8b14fc7
ci: add to kvm group 2023-05-16 16:03:38 +00:00
dump_stack() a043b998ff
ci: use full path to logs 2023-05-16 15:52:54 +00:00
dump_stack() c527544107
ci: typo 2023-05-16 15:46:06 +00:00
dump_stack() ef4eeea6a2
ci: typo 2023-05-16 15:43:43 +00:00
dump_stack() d4fe5d8f15
ci: do not keep ssh connection 2023-05-16 15:37:14 +00:00
dump_stack() e3652db73b
ci: set droplet region 2023-05-16 15:06:46 +00:00
dump_stack() f571635848
ci: install recommends
rootless podman does not work without it
2023-05-16 14:53:27 +00:00
dump_stack() 2cc06ed092
feat: log output on container error 2023-05-16 14:47:28 +00:00
dump_stack() adc450c201
ci: --no-install-recommends 2023-05-16 14:38:15 +00:00
dump_stack() 5de9c50579
ci: wait for cloud-init to finish 2023-05-16 14:26:22 +00:00
dump_stack() a49d705846
ci: switch to apt 2023-05-16 14:18:14 +00:00
dump_stack() 67630e080b
ci: download by 64 packages 2023-05-16 14:15:18 +00:00
dump_stack() 697eb18552
ci: do not start new fetch after 2 hours 2023-05-16 14:14:24 +00:00
dump_stack() a855a6e70c
ci: switch to apt-get with 360 seconds timeout 2023-05-16 14:11:51 +00:00
dump_stack() d8aafe081f
ci: switch to apt 2023-05-16 14:06:37 +00:00
dump_stack() 4e956d10ad
ci: yet another try to get rid of apt daemon 2023-05-16 13:58:38 +00:00
dump_stack() a6d4fe362c
ci: update repos 2023-05-16 13:48:56 +00:00
dump_stack() 6ff9fcc2c0
ci: switch to github actions build 2023-05-16 13:44:40 +00:00
dump_stack() fca20d2d63
ci: set container registries 2023-05-16 13:15:28 +00:00
dump_stack() 4fe7a0906e
ci: set shell (bash) for the user 2023-05-16 13:05:09 +00:00
dump_stack() 22ddada2f7
ci: fix user authorized keys 2023-05-16 13:00:34 +00:00
dump_stack() 08dcfd52a3
Revert "ci: run from root"
This reverts commit 3dd9071057.
2023-05-16 12:48:08 +00:00
dump_stack() 3dd9071057
ci: run from root 2023-05-16 12:17:18 +00:00
dump_stack() decdf0625b
ci: fix yml 2023-05-16 12:12:48 +00:00
dump_stack() 2ee26c989d
ci: typo 2023-05-16 12:09:33 +00:00
dump_stack() 73eb3bf70d
ci: dependencies 2023-05-16 12:08:31 +00:00
dump_stack() 65688dcd9d
ci: ignore e2e.yml 2023-05-16 12:06:59 +00:00
dump_stack() d447b91908
ci: typo 2023-05-16 12:06:32 +00:00
dump_stack() 926631e19d
ci: e2e testing with kvm 2023-05-16 12:04:28 +00:00
dump_stack() 5ecacf00bd
build: nix flakes 2023-05-16 11:47:17 +00:00
dump_stack() 4d950d7302
refactor: move debian-related functions to debian module 2023-05-16 09:24:34 +00:00
dump_stack() e1ae427757
fix: add gcc-4.6 for wheezy 2023-05-16 09:23:44 +00:00
dump_stack() 4fd2fd31d2
fix: add podman to shell.nix 2023-05-16 09:22:37 +00:00
dump_stack() 046c553ed5
fix: cleanup tar.gz after unpack 2023-05-16 08:17:15 +00:00
dump_stack() a7e5827ff9
ci: mirror debian packages after updating the metadata cache 2023-05-16 08:04:26 +00:00
dump_stack() 7f6fe18d0a
ci: do not start new fetch after 4 hours 2023-05-16 07:52:00 +00:00
dump_stack() 53183245ce
fix: old dhclient requires interface name 2023-05-15 18:55:27 +00:00
dump_stack() 8f1a2afc53
feat: modify cache api to store more than one kernel for version 2023-05-15 18:41:12 +00:00
dump_stack() 8949b53ccc
fix: return after found 2023-05-15 18:17:31 +00:00
dump_stack() 4ea7fbfbf9
refactor: move getting kernel by version to function 2023-05-15 17:57:44 +00:00
dump_stack() 9b33140cc8
feat: always set last fetch date 2023-05-15 17:42:31 +00:00
dump_stack() c13b595ab1
refactor: unexport getDebianKernel 2023-05-15 17:40:53 +00:00
dump_stack() 7c2957dafb
feat: lower debug level 2023-05-15 15:35:48 +00:00
dump_stack() 3e64c99b1c
fix: show actual refetch date 2023-05-15 15:29:23 +00:00
dump_stack() b0c795153a
feat: bump go version 2023-05-15 15:09:09 +00:00
dump_stack() 43bb539db8
ci: rename job 2023-05-15 14:28:52 +00:00
dump_stack() 3959a23efa
fix: typo 2023-05-15 14:26:38 +00:00
dump_stack() 262362659a
ci: archive logs after run 2023-05-15 14:25:41 +00:00
dump_stack() bbdc9712c5
ci: fix typo 2023-05-15 14:25:24 +00:00
dump_stack() 53878bcb23
ci: deb package mirroring 2023-05-15 14:21:49 +00:00
dump_stack() 24c0a05ab0
feat: do not fail if at least one download succeeds 2023-05-15 14:11:08 +00:00
dump_stack() 40b1b223d4
feat: support to specify maximum amount of downloads 2023-05-15 13:41:37 +00:00
dump_stack() 97ee8f09a4
feat: exit with error if no packages found to download 2023-05-15 13:41:04 +00:00
dump_stack() 346e24db6b
feat: support for ignoring packages exists on the mirror 2023-05-15 13:19:17 +00:00
dump_stack() d118ab03c3
feat: check for mirrored packages 2023-05-15 13:07:56 +00:00
dump_stack() e1ac75d0fa
ci: always refetch 2023-05-15 12:17:04 +00:00
dump_stack() 34b5693ae8
test: do not rebuild the cache every time
We can safely do that because cache library does not breaks on structures changes.
2023-05-15 12:07:28 +00:00
dump_stack() 562abec7f4
feat: fail fast on regexp error 2023-05-15 11:50:54 +00:00
dump_stack() 883c8ee6cb
feat: do not download if already exists 2023-05-15 11:48:06 +00:00
dump_stack() 689bf1098a
feat: command to download debian packages 2023-05-15 11:35:15 +00:00
dump_stack() eda23b45b9
feat: combined packages list 2023-05-15 11:14:59 +00:00
dump_stack() 6c1f9f8606
feat: add command for distro-related helpers 2023-05-15 10:50:56 +00:00
dump_stack() 5e11c1939d
fix: set volumes after search 2023-05-15 10:08:34 +00:00
dump_stack() 52c452debe
feat: pass kernel mask to get container volumes 2023-05-15 09:43:48 +00:00
dump_stack() a05b579086
feat: remove the overhead for something not used 2023-05-15 08:21:53 +00:00
dump_stack() d089ad4931
feat: return complete repo info 2023-05-15 08:19:35 +00:00
dump_stack() 0f799b0d5a
feat: implement part of metasnap api 2023-05-15 07:30:00 +00:00
dump_stack() 21882ff461
test: disable fail-fast 2023-05-14 22:37:52 +00:00
dump_stack() 82ba7bd7af
fix: always use the next command because dpkg returns error if deps not met 2023-05-14 22:34:46 +00:00
dump_stack() c0603404a8
feat: use the latest snapshot for Debian Jessie and Stretch 2023-05-14 22:20:34 +00:00
dump_stack() e0b63aee1a
fix: handle existing debian kernels correctly 2023-05-14 22:16:32 +00:00
dump_stack() b2383ba442
feat: add container volumes list function 2023-05-14 22:15:43 +00:00
dump_stack() 1b2d636410
feat: introduce kernel version 2023-05-14 22:00:29 +00:00
dump_stack() de5ebd6455
feat: mount container volumes based on kernel info 2023-05-14 21:39:41 +00:00
dump_stack() 42be5161d8
feat: define container volumes as a structure 2023-05-14 21:24:01 +00:00
dump_stack() 1a2929a1aa
fix: wrong image release 2023-05-14 21:19:06 +00:00
dump_stack() 5778f39ac4
fix: no module-init-tools on debian 9 2023-05-14 21:17:21 +00:00
dump_stack() 032bba6ee5
fix: sed edit in place 2023-05-14 21:12:24 +00:00
dump_stack() 23a28f33d4
test: add debian to e2e test matrix 2023-05-14 21:06:46 +00:00
dump_stack() 8bb211cf01
feat: list debian kernels implementation 2023-05-14 21:04:22 +00:00
dump_stack() c75f10e692
feat: search file in directory by substring 2023-05-14 21:03:57 +00:00
dump_stack() b4a75dc66e
feat: install dependencies, use http for deb urls 2023-05-14 20:32:44 +00:00
dump_stack() f85ad89130
fix: match exact version (binpackages can return more than one) 2023-05-14 19:48:15 +00:00
dump_stack() 501dcb23ae
feat: set default refetch timeout to 14 days 2023-05-14 18:08:26 +00:00
dump_stack() 19081aea5d
test: skip MatchImagePkg test in CI 2023-05-14 17:34:49 +00:00
dump_stack() a090328b1c
fix: exp should be filtered in other place 2023-05-14 17:29:53 +00:00
dump_stack() 2452b090b0
feat: use semver wrapper to match debian release 2023-05-14 17:14:05 +00:00
dump_stack() b09b51840c
feat: fetch kbuild from linux-tools for older kernels 2023-05-14 16:54:12 +00:00
dump_stack() a13a78e292
feat: ignore experimental kernels 2023-05-14 14:34:11 +00:00
dump_stack() e10b50a41a
test: pretty print result 2023-05-14 14:06:04 +00:00
dump_stack() 87ed8da5b8
fix: ignore dbg packages 2023-05-14 14:05:10 +00:00
dump_stack() e9ced28b29
feat: match kbuild 2023-05-14 13:59:25 +00:00
dump_stack() f8f3424e1e
refactor: move cache-related functions 2023-05-14 12:37:45 +00:00
dump_stack() 0fd9d80940
fix: match the "all" arch of the debian kernel packages 2023-05-14 11:41:26 +00:00
dump_stack() fa23cdfc54
feat: support matching several architectures 2023-05-14 11:38:41 +00:00
dump_stack() 6bb0da5082
feat: support multiple headers packages 2023-05-14 11:06:54 +00:00
dump_stack() 6b8d97be39
test: fix kernel release test 2023-05-14 10:30:35 +00:00
dump_stack() 7502221cfd
feat: support adding dependencies to debian kernel packages 2023-05-14 10:27:35 +00:00
dump_stack() 181115d914
fix: use only 3.2 -> 3.7 kernels on wheezy' 2023-05-14 10:13:01 +00:00
dump_stack() f91534aa6a
feat: add common debian packages 2023-05-14 10:12:33 +00:00
dump_stack() 0ee813124d
feat: disable deb package installation without meeting dependencies 2023-05-14 10:12:20 +00:00
dump_stack() f7f8a27dfa
feat: use old debian containers from snapshots 2023-05-14 09:53:59 +00:00
dump_stack() bb676fa491
fix: incorrect comment about valid releases 2023-05-14 07:27:07 +00:00
dump_stack() 78626c10af
feat: get container path relative to config dir 2023-05-14 07:24:49 +00:00
dump_stack() 93a1b74e34
feat: add dummy func to list debian kernels 2023-05-14 07:19:52 +00:00
dump_stack() 73139e1b91
refactor: docker -> container 2023-05-14 07:00:00 +00:00
dump_stack() e231121082
feat: install debian kernels 2023-05-14 06:53:32 +00:00
dump_stack() 9e8a381de0
feat: implement basic debian container image 2023-05-13 19:51:57 +00:00
dump_stack() 17295cad89
feat: list debian kernels 2023-05-13 19:51:06 +00:00
dump_stack() 29010b2a1b
feat: support getting cached kernels 2023-05-13 19:48:01 +00:00
dump_stack() 0bf2acb043
feat: lowercase release strings 2023-05-13 19:08:46 +00:00
dump_stack() d0693e64c4
feat: make sure of cache thread-safety 2023-05-13 18:49:11 +00:00
dump_stack() 70fec57d2f
feat: debian versions cache 2023-05-13 18:43:15 +00:00
dump_stack() 2cc84ac962
fix: match kernel release correctly 2023-05-13 17:42:25 +00:00
dump_stack() 143e54984d
test: cover more kernel releases 2023-05-13 17:32:02 +00:00
dump_stack() c6d0ee0102
feat: lower debug level for some container-related stuff 2023-05-13 16:11:45 +00:00
dump_stack() 39f4cd4cfd
fix: check if cache already exists 2023-05-13 16:08:59 +00:00
dump_stack() 065aca24b0
fix: fill config value in case of default directory 2023-05-13 15:56:54 +00:00
dump_stack() baf282ec2c
fix: typo 2023-05-13 15:56:11 +00:00
dump_stack() 9d1bbcc288
feat: support changing dotdir 2023-05-13 15:45:21 +00:00
dump_stack() 804b6b56ba
feat: debug logging for download 2023-05-13 12:56:54 +00:00
dump_stack() 5975898225
test: use tmpdir to not interfere with other tests 2023-05-13 12:56:25 +00:00
dump_stack() dc8d667930
fix: filename is already full path 2023-05-13 12:50:26 +00:00
dump_stack() 05f210494a
test: missing panic 2023-05-13 12:40:36 +00:00
dump_stack() fb5411503c
fix: log error if no cache found 2023-05-13 12:38:03 +00:00
dump_stack() 1818d38b03
test: cover download debian cache 2023-05-13 12:36:19 +00:00
dump_stack() c8d171da98
refactor: use fs/ helper to get temp dir 2023-05-13 12:33:12 +00:00
dump_stack() 4e77cf82d3
refactor: use one provider for default cache url 2023-05-13 12:14:20 +00:00
dump_stack() 60a1d19042
test: avoid api requests 2023-05-13 12:02:41 +00:00
dump_stack() 7cf1bbd194
refactor: remove global temporary directory base 2023-05-13 11:32:29 +00:00
dump_stack() 5ada1ef41a
feat: introduce temp dir helper 2023-05-13 11:29:25 +00:00
dump_stack() 997d6a67ba
test: fix match image test 2023-05-13 11:17:11 +00:00
dump_stack() 1d22902eb0
feat: download debian cache automatically 2023-05-13 11:04:35 +00:00
dump_stack() 59febd75b0
test: enable match image test 2023-05-13 11:00:34 +00:00
dump_stack() b1b7a9e675
refactor: move kernel functions to submodule 2023-05-13 10:47:47 +00:00
dump_stack() 95695a4070
feat: add cache argument 2023-05-13 10:46:43 +00:00
dump_stack() 28acc51417
test: disable match image test until cache is implemented 2023-05-13 10:24:29 +00:00
dump_stack() ebc597ff0b
refactor: move container functions to submodule 2023-05-13 10:14:45 +00:00
dump_stack() 2c2435a7a5
refactor: use cavaliergopher/grab to download files 2023-05-13 09:43:06 +00:00
dump_stack() 2977b6f7fd
test: add download image test 2023-05-13 09:31:25 +00:00
dump_stack() da5797766b
refactor: move fs-related functions to submodule 2023-05-13 09:17:57 +00:00
dump_stack() 9b987bcc82
Refactor 2023-05-13 09:09:29 +00:00
dump_stack() 599ce03ca4
Implement config paths helpers 2023-05-13 08:44:45 +00:00
dump_stack() d13eab6947
Match by debian release 2023-05-12 20:05:44 +00:00
dump_stack() 9fd4b541da
Add missing directory 2023-05-12 19:34:55 +00:00
dump_stack() 43aa116682
Implement match for Debian packages 2023-05-12 17:27:48 +00:00
dump_stack() b5d4bdd5f4
Fetch debian kernels function 2023-05-12 15:02:43 +00:00
dump_stack() fa579e5170
Wait to make sure IP is ready 2023-05-12 12:36:06 +00:00
dump_stack() b310b29670
Sleep is more error-prone ¯\_(ツ)_/¯ 2023-05-12 12:29:01 +00:00
dump_stack() 490d063e5b
Wildcard 2023-05-12 12:24:04 +00:00
dump_stack() 33ee48692c
Disable auto-upgrades 2023-05-12 12:21:32 +00:00
dump_stack() ed5d0ab1d1
Increase exit check timeout 2023-05-12 12:16:32 +00:00
dump_stack() ea6775fa45
Handle ssh errors 2023-05-12 12:13:04 +00:00
dump_stack() 4e16dec7c1
Do not exit in case of unsuccessful kill 2023-05-12 12:12:41 +00:00
dump_stack() 0a1a5890ed
Add setup.sh for debian image generator 2023-05-12 12:08:03 +00:00
dump_stack() 593c152798
Handle exit codes correctly 2023-05-12 12:07:42 +00:00
dump_stack() e8554e7c4a
Test 2023-05-12 11:27:30 +00:00
dump_stack() 39d7adc72f
Fix early exit 2023-05-12 11:15:38 +00:00
dump_stack() 4688c6aefd
Fix status code 2023-05-12 11:07:39 +00:00
dump_stack() 4f2b7e1962
We still need to wait to kill apt properly 2023-05-12 11:00:07 +00:00
dump_stack() dc025ff32f
Fix broken ssh connections 2023-05-12 10:56:22 +00:00
dump_stack() 11c69f24ce
Set public acl, fix host-bucket 2023-05-12 09:31:46 +00:00
dump_stack() da57e4e6b5
Run workflows on their change 2023-05-12 09:13:11 +00:00
dump_stack() f46a2ec4b0
Do not run tests on images actions change 2023-05-12 09:00:47 +00:00
dump_stack() 166c125391
Rename 2023-05-12 08:58:35 +00:00
dump_stack() bb511898bb
Rename 2023-05-12 08:16:24 +00:00
dump_stack() e919def8d0
Rename 2023-05-12 08:13:59 +00:00
dump_stack() 8f06448240
Switch to podman 2023-05-12 08:11:57 +00:00
dump_stack() 7d88f09bb0
Use separate workflows for generating images 2023-05-12 08:11:38 +00:00
dump_stack() 9c4313c3a4
Archive logs on failure 2023-05-12 07:59:58 +00:00
dump_stack() e87a6be1bc
Typo 2023-05-12 07:54:09 +00:00
dump_stack() eb51469283
Switch to s3cmd 2023-05-12 07:53:23 +00:00
dump_stack() 3ae8707dcb
Missing quotation mark 2023-05-12 07:52:40 +00:00
dump_stack() 9c830bf22a
Remove newline 2023-05-12 07:48:56 +00:00
dump_stack() a78429729b
Upload images 2023-05-12 07:47:22 +00:00
dump_stack() c965cf3ade
Remove sh -c 2023-05-12 07:41:44 +00:00
dump_stack() 5b50fd2b13
Switch back to pkill 2023-05-12 07:36:32 +00:00
dump_stack() c860b77332
Do not delete droplet (for debug) 2023-05-12 07:32:49 +00:00
dump_stack() 4e4c62c376
Proper wait for the lock 2023-05-12 07:32:03 +00:00
dump_stack() a48cbe4bb2
Kill apt 2023-05-12 07:29:04 +00:00
dump_stack() 2179f84874
Wait for the apt lock 2023-05-12 07:24:12 +00:00
dump_stack() 33d3d28e5d
Add ssh key 2023-05-12 07:19:55 +00:00
dump_stack() 8873566dcb
Switch to podman 2023-05-12 07:09:56 +00:00
dump_stack() fb12fc2f65
Fix quotes 2023-05-12 07:05:34 +00:00
dump_stack() da28fef007
Boilerplate for external VM action 2023-05-12 06:58:17 +00:00
dump_stack() 44d474d6e5
Fix path 2023-05-12 05:41:07 +00:00
dump_stack() 3e87a1b651
Run with sudo 2023-05-12 02:06:25 +00:00
dump_stack() 310999744e
Untabify 2023-05-12 02:05:36 +00:00
dump_stack() 61ac856afb
Try from the same directory 2023-05-12 02:05:28 +00:00
dump_stack() 995f24fdc4
Remove policykit-1 on Debian 11 2023-05-12 01:40:28 +00:00
dump_stack() 8b807a7e77
Allow to run manually 2023-05-12 01:32:46 +00:00
dump_stack() 467f31d141
Upload images 2023-05-12 01:31:14 +00:00
dump_stack() 9752c7ae7d
Typo 2023-05-12 01:25:42 +00:00
dump_stack() e3c2bb134f
Upload cache 2023-05-12 01:25:05 +00:00
dump_stack() 94f77cd742
Add sources.list to the image 2023-05-12 01:09:31 +00:00
dump_stack() 028fa48f71
Generate images on push 2023-05-12 01:04:17 +00:00
dump_stack() 2910ce17c7
Add generator for debian images 2023-05-12 00:59:36 +00:00
dump_stack() 183b4698dd
Build debian cache once a week 2023-05-12 00:39:50 +00:00
dump_stack() efbdc9db36
Implement refetch 2023-05-12 00:07:51 +00:00
dump_stack() 3d2b8b7d2a
Disable E2E testing for Oracle Linux 9
Works fine locally, but for some reason does not work on GitHub Actions
2023-05-11 23:22:44 +00:00
dump_stack() 9190b850b7
Rename for consistency 2023-05-11 22:53:01 +00:00
dump_stack() d41846ede2
Fix parameters 2023-05-11 22:35:38 +00:00
dump_stack() 52c6581675
Ignore cache 2023-05-11 22:18:14 +00:00
dump_stack() 83c1ca303c
Add debian code names 2023-05-11 22:17:46 +00:00
dump_stack() ad0a3706cb
Implement as normal filter 2023-05-11 22:17:31 +00:00
dump_stack() 2e6ce1e8f9
Skip also all-amd64 2023-05-11 22:06:54 +00:00
dump_stack() a77d56c26b
Do not produce error on empty response 2023-05-11 22:06:31 +00:00
dump_stack() 53324e663a
Add ErrNotFound 2023-05-11 21:59:44 +00:00
dump_stack() d4fbec39a6
Crutch for regex 2023-05-11 21:47:43 +00:00
dump_stack() cca637cf9d
Fix regexp 2023-05-11 21:36:54 +00:00
dump_stack() bef382920e
Fix regexp 2023-05-11 21:35:20 +00:00
dump_stack() 20cbd2f72e
Fix regexp 2023-05-11 21:34:32 +00:00
dump_stack() 0594b0ea60
More than one result is legit 2023-05-11 21:29:15 +00:00
dump_stack() f6cb2d8d12
Check if package already in slice 2023-05-11 21:22:28 +00:00
dump_stack() 27a22ba023
Dynamic rate limiter 2023-05-11 21:15:29 +00:00
dump_stack() da9a243ee4
Match bpo only 2023-05-11 20:52:33 +00:00
dump_stack() e123bf258c
Fix missing error check 2023-05-11 20:45:53 +00:00
dump_stack() 9c563ca68b
Lower rate limit 2023-05-11 20:43:29 +00:00
dump_stack() 2b0d7b0460
Fix regex 2023-05-11 20:42:25 +00:00
dump_stack() 68ad89973c
Lower rate limit 2023-05-11 20:42:12 +00:00
dump_stack() 96a6dd7fb9
Missing continue 2023-05-11 20:29:39 +00:00
dump_stack() 6a0846e129
Better error handling 2023-05-11 20:26:39 +00:00
dump_stack() 5b396e7b5e
Expose error variables 2023-05-11 20:25:34 +00:00
dump_stack() 7e545586bc
Trace 2023-05-11 20:24:54 +00:00
dump_stack() da6843f9b7
Add command to populate debian cache 2023-05-11 20:08:08 +00:00
dump_stack() 26c261b6f3
Remove query escape (endpoint does not apper to support it) 2023-05-11 19:43:42 +00:00
dump_stack() 77be74797b
Implement debian package 2023-05-11 19:30:25 +00:00
dump_stack() d04a9de932
Implement package cache API 2023-05-11 18:45:44 +00:00
dump_stack() ac74e450c3
Add dists at time of snapshot, remove codename field (-> function) 2023-05-11 16:52:47 +00:00
dump_stack() 41c84c70f6
Add codename, retries, timeouts 2023-05-11 16:26:16 +00:00
dump_stack() 3ba71a7787
Add logging, rate limiter, retries 2023-05-11 16:07:15 +00:00
dump_stack() aaca60cafc
Add deb package url and its repo component 2023-05-11 13:20:36 +00:00
dump_stack() 3a29b3b869
Handle http errors 2023-05-11 12:58:33 +00:00
dump_stack() 07bd886383
Base mr snapshot wrappers 2023-05-11 12:03:18 +00:00
dump_stack() c3042c97f0
Assert mr API 2023-05-11 11:54:35 +00:00
dump_stack() 1b68fc571f
Rename 2023-05-11 10:58:34 +00:00
dump_stack() b1d034a7fe
Implement debian snapshot mr api 2023-05-11 10:21:21 +00:00
dump_stack() 35d34fdbe0
Add yum to oraclelinux images 2023-05-11 08:20:35 +00:00
dump_stack() cc470d2105
Add yum/dnf to centos8 image 2023-05-11 08:04:13 +00:00
dump_stack() 5f0749155a
Fix CentOS 8 image generator 2023-05-11 07:57:25 +00:00
dump_stack() e2e0dad1ad
Check that fs is case-sensitive 2023-05-11 02:42:34 +00:00
dump_stack() d5ea97c532
Stop waiting for ssh if qemu is dead 2023-05-10 12:30:01 +00:00
dump_stack() 31370b7bad
Reduce layers 2023-05-10 11:20:07 +00:00
dump_stack() cbdef2936d
Upload all logs 2023-05-10 10:25:19 +00:00
dump_stack() bc9eea879d
Test all examples 2023-05-10 10:20:43 +00:00
dump_stack() b0dae2fc69
Rename jobs 2023-05-10 10:12:53 +00:00
dump_stack() c22cea2834
Remove genall test 2023-05-10 10:12:39 +00:00
dump_stack() 828511f0eb
Fix yml 2023-05-10 10:06:05 +00:00
dump_stack() 0735f1d581
Fix yml 2023-05-10 10:04:22 +00:00
dump_stack() f816b43609
Refactor 2023-05-10 10:02:12 +00:00
dump_stack() 52d3d339df
Typo 2023-05-10 09:51:08 +00:00
dump_stack() 308b916b0f
End-to-End Testing for CentOS and Oracle Linux 2023-05-10 09:49:15 +00:00
dump_stack() 5ecf36ebc3
End-to-End Testing (Ubuntu) 2023-05-10 09:36:19 +00:00
dump_stack() 1351819f17
Use old-releases for Ubuntu 12.04 2023-05-10 09:13:05 +00:00
dump_stack() 0323d3d941
Typo 2023-05-10 09:10:07 +00:00
dump_stack() fa9ee43817
End-to-End Testing (list-remote) 2023-05-10 09:09:12 +00:00
dump_stack() 25fad476b4
Image generator for CentOS 6 2023-05-10 08:35:56 +00:00
dump_stack() f484dd99de
tar generated images 2023-05-10 08:21:43 +00:00
dump_stack() 5aed593d81
Make container update parameter available for all kernel commands 2023-05-10 08:09:53 +00:00
dump_stack() e8a7c043d6
Fix CentOS support 2023-05-10 08:04:42 +00:00
dump_stack() dbd3449074
Remove required for --update flag 2023-05-10 07:57:14 +00:00
dump_stack() e274fe55f0
Implement --list-remote command 2023-05-10 07:52:44 +00:00
dump_stack() 66bd74d59d
Fix dracut cmdline 2023-05-10 05:37:17 +00:00
dump_stack() 98a0a561f1
Remove Oracle Linux tests 2023-05-10 05:26:45 +00:00
dump_stack() f8880b2486
no space left on device 2023-05-09 21:07:05 +00:00
dump_stack() f5f87867ac
Remove Oracle Linux 6 test 2023-05-09 20:43:39 +00:00
dump_stack() 70ac88a07f
No libdtrace-ctf on el8 2023-05-09 20:16:28 +00:00
dump_stack() 5d13843835
Do not download images 2023-05-09 19:58:35 +00:00
dump_stack() 0f36b3b55b
Test installation of Oracle Linux kernels 2023-05-09 19:29:17 +00:00
dump_stack() aa32c7a8ae
Make sure libdtrace-ctf available on all versions 2023-05-09 19:19:46 +00:00
dump_stack() f7fcfe8292
Add libdtrace-ctf on el6 2023-05-09 19:13:22 +00:00
dump_stack() d5b733a0a0
Force drivers only on 8+ 2023-05-09 17:53:39 +00:00
dump_stack() cd68dc1ddc
Add kernel modules to boot on el{8,9} 2023-05-09 17:45:47 +00:00
dump_stack() 2460b8230a
Ignore logs 2023-05-09 17:43:53 +00:00
dump_stack() 2f4c859dd8
Show warning in case no kernel packages matched 2023-05-09 16:31:34 +00:00
dump_stack() 169acca9df
Use bash 2023-05-09 16:19:10 +00:00
dump_stack() 48be2df1b0
Add Oracle Linux image generator 2023-05-09 16:14:15 +00:00
dump_stack() 0e85866822
Implement Oracle Linux support 2023-05-09 14:40:06 +00:00
dump_stack() e291352925
Sync 2023-05-08 22:31:46 +00:00
dump_stack() c14c5989a4
Flag to override rootfs image 2023-05-08 22:21:28 +00:00
dump_stack() 3677adece9
Implements the parameter to specify an endless stress script 2023-05-08 22:01:10 +00:00
dump_stack() ca95155ce0
endless: exit on first error 2023-05-08 21:36:25 +00:00
dump_stack() 15d8ab8137
Do not lose stdout 2023-05-08 21:35:47 +00:00
dump_stack() 1a1afce4f2
Implements endless tests 2023-05-08 21:19:06 +00:00
dump_stack() 17a70fdb2d
Add option for timeout after starting of the qemu vm before tests 2023-05-08 19:56:10 +00:00
dump_stack() 8ec4f13364
Lower log level for debug output 2023-05-08 19:18:49 +00:00
dump_stack() c1c5afc0e0
ssh dial with retry 2023-05-08 14:54:28 +00:00
dump_stack() 2e5c386c42
Revert "Lock ssh handshake"
This reverts commit b558269ac3.
2023-05-08 14:40:57 +00:00
dump_stack() b558269ac3
Lock ssh handshake 2023-05-08 14:30:53 +00:00
dump_stack() d4f826d44b
Debug logging 2023-05-08 14:30:09 +00:00
dump_stack() 7b94053376
Avoid use of external scp command 2023-05-07 18:14:59 +00:00
dump_stack() 0e08d87a64
Fix check whether vm is alive 2023-05-07 16:05:11 +00:00
dump_stack() 734240500b
Revert "Ensure log file is available for VM status check"
This reverts commit 8c1024b36c.
2023-05-07 16:03:28 +00:00
dump_stack() 8c1024b36c
Ensure log file is available for VM status check 2023-05-07 15:59:45 +00:00
dump_stack() ababd027f9
Revert "Do not close log file immediately"
This reverts commit e8a446ec76.
2023-05-07 15:46:54 +00:00
dump_stack() 0826948568
Try creating a new session in ssh check 2023-05-07 15:24:10 +00:00
dump_stack() e8a446ec76
Do not close log file immediately 2023-05-07 14:54:36 +00:00
dump_stack() de3f361e51
Remove colors from the cwd logs 2023-05-02 17:40:28 +00:00
dump_stack() 8279517ecf
Do not show distro type/release and kernel version in cwd logs 2023-05-02 12:38:06 +00:00
dump_stack() 4f80122039
Implements per-test logging to the current working directory 2023-05-02 11:10:57 +00:00
dump_stack() 8922b3e548
Retry scp if failed 2023-05-01 15:10:39 +00:00
dump_stack() 321fe0567a
Fix 2023-05-01 14:57:08 +00:00
dump_stack() 530b98e513
Handle empty build dir correctly 2023-05-01 14:53:52 +00:00
dump_stack() 249f11d059
Do not exit copying error, skip 2023-05-01 14:52:16 +00:00
dump_stack() b1f5a36a32
Improve logging 2023-05-01 14:51:58 +00:00
dump_stack() 8fa62e9a6e
Bump version 2023-04-26 14:36:25 +00:00
dump_stack() e04154b235
Fix stderr log 2023-04-26 14:35:20 +00:00
dump_stack() 096cad8701
Cleanup also -core/-modules 2023-04-09 17:13:54 +00:00
dump_stack() 70d464f0e2
Fix directory name 2023-04-09 14:38:39 +00:00
dump_stack() d65d683dfc
Fix name of ubuntu image generator 2023-04-09 14:16:29 +00:00
dump_stack() bde115f5df
Bump version 2023-04-08 14:57:39 +00:00
dump_stack() d972bae547
Send SIGINT first while killing the container by timeout 2023-04-08 14:50:18 +00:00
dump_stack() b3d4a0dbc2
Update changelog 2023-04-07 21:38:30 +00:00
dump_stack() 4a3d739b85
Implements dry run for image edit 2023-04-07 21:30:03 +00:00
dump_stack() bb319a9ff6
Export qemu arguments 2023-04-07 21:17:34 +00:00
dump_stack() 21daac4fbc
Check for shutdown before log current kernel 2023-04-07 21:03:31 +00:00
dump_stack() 841fd7f585
Graceful shutdown on ^C 2023-04-07 20:52:45 +00:00
dump_stack() b812048408
Typo 2023-04-07 19:11:42 +00:00
dump_stack() a5edc4837f
Update readme 2023-04-07 19:09:33 +00:00
dump_stack() 9e55ebd44e
Add a flag to set the container runtime binary 2023-04-07 18:57:18 +00:00
dump_stack() e35e030c54
Install the kernel in a single container run 2023-04-07 17:47:54 +00:00
dump_stack() a4f2a31819
Correctly handle empty workdir 2023-04-07 17:46:36 +00:00
dump_stack() c3cf25e523
Allow to disable container volumes mount 2023-04-07 17:35:00 +00:00
dump_stack() 056e38698e
Use single temp directory base 2023-04-07 16:44:21 +00:00
dump_stack() 32b692f752
Cleanup after cache kernel package dependencies 2023-04-07 16:27:56 +00:00
dump_stack() 3f8c7fd86b
brew cask is no longer a brew command 2023-04-07 14:30:59 +00:00
120 changed files with 10508 additions and 2927 deletions

117
.github/workflows/debian-cache.yml vendored Normal file
View File

@ -0,0 +1,117 @@
name: Debian Cache
on:
workflow_dispatch:
schedule:
- cron: '0 0 * * *'
push:
paths:
- '.github/workflows/debian-cache.yml'
- 'distro/debian/snapshot/**'
- 'distro/debian/cache.go'
- 'distro/debian/kernel.go'
concurrency:
group: ${{ github.workflow_ref }}
jobs:
debian-kernel-metadata-cache:
name: Metadata
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- name: Build
run: go build
- name: Cache
run: ./out-of-tree --log-level=trace distro debian cache --refetch=0 --limit=128 --update-release
- name: Install s3cmd
run: sudo apt install s3cmd
- name: Archive cache
uses: actions/upload-artifact@v4
with:
name: debian-cache
path: ~/.out-of-tree/debian.cache
- name: Archive logs
if: always()
uses: actions/upload-artifact@v4
with:
name: debian-metadata-cache-logs
path: ~/.out-of-tree/logs
- name: Upload cache
run: s3cmd put --acl-public ~/.out-of-tree/debian.cache s3://out-of-tree/1.0.0/ --host=fra1.digitaloceanspaces.com --host-bucket='%(bucket)s.fra1.digitaloceanspaces.com' --access_key=${{ secrets.DIGITALOCEAN_SPACES_ACCESS_KEY }} --secret_key=${{ secrets.DIGITALOCEAN_SPACES_SECRET_KEY }}
debian-kernel-packages-mirror:
name: Packages
needs: debian-kernel-metadata-cache
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- name: Build
run: go build
- name: Install s3cmd
run: sudo apt install s3cmd
- name: Mirror deb packages
shell: python
run: |
import os
import logging
import time
import datetime
from subprocess import getstatusoutput
def get_kernels() -> bool:
status, output = getstatusoutput(
"./out-of-tree distro debian fetch --max=16 --limit=1"
)
logging.info(output)
return status == 0
def upload(f: str) -> bool:
status, output = getstatusoutput(
"s3cmd "
"--host=fra1.digitaloceanspaces.com "
"--host-bucket='%(bucket)s.fra1.digitaloceanspaces.com' "
"--access_key=${{ secrets.DIGITALOCEAN_SPACES_ACCESS_KEY }} "
"--secret_key=${{ secrets.DIGITALOCEAN_SPACES_SECRET_KEY }} "
f"put --acl-public {f} "
"s3://out-of-tree/1.0.0/packages/debian/"
)
logging.info(output)
return status == 0
logging.basicConfig(level=logging.NOTSET)
uploaded = []
timeout = time.time() + datetime.timedelta(hours=2).seconds
while get_kernels() and time.time() < timeout:
for f in os.listdir():
if not f.endswith('.deb'):
continue
if f in uploaded:
continue
logging.info(f)
ok = upload(f)
if ok:
uploaded += [f]
- name: Archive logs
if: always()
uses: actions/upload-artifact@v4
with:
name: debian-packages-cache-logs
path: ~/.out-of-tree/logs

172
.github/workflows/e2e.yml vendored Normal file
View File

@ -0,0 +1,172 @@
name: E2E
on:
workflow_dispatch:
push:
paths-ignore:
- ".github/workflows/images-*"
- ".github/workflows/ubuntu.yml"
- ".github/workflows/macos.yml"
- ".github/workflows/debian-cache.yml"
- "docs/**"
- ".readthedocs.yaml"
- "README.md"
concurrency:
group: ${{ github.workflow_ref }}
cancel-in-progress: true
jobs:
test-end-to-end:
name: Module
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
os: [
{ distro: Ubuntu, release: 12.04 },
{ distro: Ubuntu, release: 14.04 },
{ distro: Ubuntu, release: 16.04 },
{ distro: Ubuntu, release: 18.04 },
{ distro: Ubuntu, release: 20.04 },
{ distro: Ubuntu, release: 22.04 },
{ distro: CentOS, release: 6 },
{ distro: CentOS, release: 7 },
{ distro: CentOS, release: 8 },
{ distro: OracleLinux, release: 6 },
{ distro: OracleLinux, release: 7 },
{ distro: OracleLinux, release: 8 },
{ distro: OracleLinux, release: 9 },
{ distro: Debian, release: 7 },
{ distro: Debian, release: 8 },
{ distro: Debian, release: 9 },
{ distro: Debian, release: 10 },
{ distro: Debian, release: 11 },
{ distro: Debian, release: 12 },
{ distro: OpenSUSE, release: "12.1" },
{ distro: OpenSUSE, release: "12.2" },
{ distro: OpenSUSE, release: "12.3" },
{ distro: OpenSUSE, release: "13.1" },
{ distro: OpenSUSE, release: "13.2" },
{ distro: OpenSUSE, release: "42.1" },
{ distro: OpenSUSE, release: "42.2" },
{ distro: OpenSUSE, release: "42.3" },
{ distro: OpenSUSE, release: "15.0" },
{ distro: OpenSUSE, release: "15.1" },
{ distro: OpenSUSE, release: "15.2" },
{ distro: OpenSUSE, release: "15.3" },
{ distro: OpenSUSE, release: "15.4" },
{ distro: OpenSUSE, release: "15.5" }
]
steps:
- uses: actions/checkout@v1
- name: Build
run: go build
- uses: digitalocean/action-doctl@v2
with:
token: ${{ secrets.DIGITALOCEAN_ACCESS_TOKEN }}
- uses: webfactory/ssh-agent@v0.9.0
with:
ssh-private-key: ${{ secrets.SSH_PRIVATE_KEY }}
- name: Create droplet
run: >-
doctl compute droplet create
--size s-4vcpu-8gb-intel
--tag-name=github-actions
--image almalinux-9-x64
--ssh-keys='b4:4c:66:7d:be:19:25:43:1c:e0:02:61:9f:49:12:94,37:46:77:a8:4a:96:3b:20:16:46:35:04:95:ca:0c:5c'
--wait
--region fra1
ga-out-of-tree-e2e-${{ matrix.os.distro }}-${{ matrix.os.release }}-$GITHUB_SHA
- name: End-to-End Testing [${{ matrix.os.distro }} ${{ matrix.os.release }}]
shell: bash
run: |
sleep 1m
IP=$(doctl compute droplet list \
--tag-name=github-actions \
--format "Name,Public IPv4" \
| grep -v ID \
| grep ga-out-of-tree-e2e-${{ matrix.os.distro }}-${{ matrix.os.release }}-$GITHUB_SHA \
| awk '{print $2}')
while ! ssh -o StrictHostKeyChecking=accept-new root@$IP echo
do
sleep 1s
done
ssh root@$IP "cloud-init status --wait"
ssh root@$IP "dnf install -y podman qemu-kvm-core epel-release"
ssh root@$IP "dnf install -y s3cmd"
ssh root@$IP "ln -s /usr/libexec/qemu-kvm /usr/bin/qemu-system-x86_64"
scp ./out-of-tree root@$IP:/usr/local/bin/
echo 'name = "test"' > examples/kernel-module/.out-of-tree.toml
echo 'type = "module"' >> examples/kernel-module/.out-of-tree.toml
echo 'standard_modules = true' >> examples/kernel-module/.out-of-tree.toml
echo '[[targets]]' >> examples/kernel-module/.out-of-tree.toml
echo 'distro = { id = "${{ matrix.os.distro }}", release = "${{ matrix.os.release }}" }' >> examples/kernel-module/.out-of-tree.toml
echo 'kernel = { regex = ".*" }' >> examples/kernel-module/.out-of-tree.toml
echo '[qemu]' >> examples/kernel-module/.out-of-tree.toml
echo 'timeout = "10m"' >> examples/kernel-module/.out-of-tree.toml
echo 'after_start_timeout = "10s"' >> examples/kernel-module/.out-of-tree.toml
echo 'modprobe uio || modprobe 9p || modprobe xfs' >> examples/kernel-module/test.sh
scp -r examples/kernel-module root@$IP:test
echo '[Unit]' >> test.service
echo 'Description=e2e' >> test.service
echo '[Service]' >> test.service
echo 'RemainAfterExit=yes' >> test.service
echo 'StandardError=append:/var/log/test.log' >> test.service
echo 'StandardOutput=append:/var/log/test.log' >> test.service
echo 'Type=oneshot' >> test.service
echo 'WorkingDirectory=/root/test' >> test.service
echo 'TimeoutStopSec=1' >> test.service
echo 'ExecStart=/usr/local/bin/out-of-tree kernel --no-container-cache autogen --threads=8 --max=128 --shuffle' >> test.service
echo 'ExecStart=/usr/local/bin/out-of-tree pew --qemu-timeout=10m --threads=4 --include-internal-errors' >> test.service
scp test.service root@$IP:/etc/systemd/system/test.service
ssh root@$IP systemctl daemon-reload
ssh root@$IP setenforce 0
ssh root@$IP systemctl start test --no-block
while ! ssh root@$IP systemctl show test -p SubState --value | grep -E '(failed|exited)'
do
sleep 30s
done
ssh root@$IP "cat /var/log/test.log"
scp -r root@$IP:.out-of-tree/logs .
ssh root@$IP systemctl is-active test || exit 1
ssh root@$IP "/usr/local/bin/out-of-tree container save"
ssh root@$IP "s3cmd put --acl-public *.tar.gz s3://out-of-tree/1.0.0/containers/ --host=fra1.digitaloceanspaces.com --host-bucket='%(bucket)s.fra1.digitaloceanspaces.com' --access_key=${{ secrets.DIGITALOCEAN_SPACES_ACCESS_KEY }} --secret_key=${{ secrets.DIGITALOCEAN_SPACES_SECRET_KEY }}"
- name: Archive logs
if: always()
uses: actions/upload-artifact@v4
with:
name: test-end-to-end-${{ matrix.os.distro }}-${{ matrix.os.release }}-logs
path: logs
- name: Delete droplet
if: always()
run: doctl compute droplet delete -f ga-out-of-tree-e2e-${{ matrix.os.distro }}-${{ matrix.os.release }}-$GITHUB_SHA

86
.github/workflows/images-debian.yml vendored Normal file
View File

@ -0,0 +1,86 @@
name: Debian
on:
workflow_dispatch:
push:
paths:
- 'tools/qemu-debian-img/**'
- '.github/workflows/images-debian.yml'
concurrency:
group: ${{ github.workflow_ref }}
cancel-in-progress: true
jobs:
images:
name: Qemu Images
runs-on: ubuntu-latest
steps:
- uses: digitalocean/action-doctl@v2
with:
token: ${{ secrets.DIGITALOCEAN_ACCESS_TOKEN }}
- uses: webfactory/ssh-agent@v0.8.0
with:
ssh-private-key: ${{ secrets.SSH_PRIVATE_KEY }}
- name: create droplet
run: doctl compute droplet create --ssh-keys='b4:4c:66:7d:be:19:25:43:1c:e0:02:61:9f:49:12:94' --tag-name=github-actions ga-out-of-tree-images-debian-$GITHUB_SHA --size s-1vcpu-1gb --image ubuntu-22-04-x64 --wait
# TODO Move to common script
- name: generate images
shell: bash
run: |
sleep 1m
IP=$(doctl compute droplet list --tag-name=github-actions --format "Name,Public IPv4" | grep -v ID | grep ga-out-of-tree-images-debian-$GITHUB_SHA | awk '{print $2}')
while ! ssh -o StrictHostKeyChecking=accept-new root@$IP echo
do
sleep 1s
done
sleep 5m
ssh root@$IP pkill apt-get || true
ssh root@$IP apt-get update
ssh root@$IP apt-get install -y git podman s3cmd
ssh root@$IP git clone https://github.com/out-of-tree/out-of-tree
ssh root@$IP "cd out-of-tree && git checkout $GITHUB_SHA"
ssh root@$IP "echo -e '[Unit]\nDescription=Debian image generator and uploader\n[Service]\nRemainAfterExit=yes\nStandardError=append:/var/log/images-debian.log\nStandardOutput=append:/var/log/images-debian.log\nType=oneshot' >> /etc/systemd/system/images-debian.service"
ssh root@$IP "echo Environment=HOST=fra1.digitaloceanspaces.com >> /etc/systemd/system/images-debian.service"
ssh root@$IP "echo Environment=HOST_BUCKET=out-of-tree.fra1.digitaloceanspaces.com >> /etc/systemd/system/images-debian.service"
ssh root@$IP "echo Environment=ACCESS_KEY=${{ secrets.DIGITALOCEAN_SPACES_ACCESS_KEY }} >> /etc/systemd/system/images-debian.service"
ssh root@$IP "echo Environment=SECRET_KEY=${{ secrets.DIGITALOCEAN_SPACES_SECRET_KEY }} >> /etc/systemd/system/images-debian.service"
ssh root@$IP "echo 'ExecStart=/root/out-of-tree/tools/qemu-debian-img/generate-images.sh' >> /etc/systemd/system/images-debian.service"
ssh root@$IP 'echo ExecStart=/bin/sh -c \"s3cmd put --acl-public /root/out-of-tree/tools/qemu-debian-img/*.tar.gz s3://out-of-tree/1.0.0/ --host=\$HOST --host-bucket=\$HOST_BUCKET --access_key=\$ACCESS_KEY --secret_key=\$SECRET_KEY\" >> /etc/systemd/system/images-debian.service'
ssh root@$IP "echo TimeoutStopSec=1 >> /etc/systemd/system/images-debian.service"
ssh root@$IP systemctl daemon-reload
ssh root@$IP systemctl start images-debian --no-block
while ! ssh root@$IP systemctl show images-debian -p SubState --value | grep -E '(failed|exited)'
do
sleep 3m
done
scp root@$IP:/var/log/images-debian.log .
ssh root@$IP systemctl is-active images-debian
- name: Archive logs
if: always()
uses: actions/upload-artifact@v4
with:
name: images-debian-log
path: images-debian.log
- name: delete droplet
if: always()
run: doctl compute droplet delete -f ga-out-of-tree-images-debian-$GITHUB_SHA

View File

@ -0,0 +1,79 @@
name: Oracle Linux
on:
workflow_dispatch:
push:
paths:
- 'tools/qemu-oraclelinux-img/**'
- '.github/workflows/images-oraclelinux.yml'
concurrency:
group: ${{ github.workflow_ref }}
cancel-in-progress: true
jobs:
images-oraclelinux:
name: Qemu Images
runs-on: ubuntu-latest
steps:
- uses: digitalocean/action-doctl@v2
with:
token: ${{ secrets.DIGITALOCEAN_ACCESS_TOKEN }}
- uses: webfactory/ssh-agent@v0.8.0
with:
ssh-private-key: ${{ secrets.SSH_PRIVATE_KEY }}
- name: create droplet
run: doctl compute droplet create --ssh-keys='b4:4c:66:7d:be:19:25:43:1c:e0:02:61:9f:49:12:94' --tag-name=github-actions ga-out-of-tree-images-oraclelinux-$GITHUB_SHA --size s-1vcpu-1gb --image ubuntu-22-04-x64 --wait
# TODO Move to common script
- name: generate images
shell: bash
run: |
sleep 1m
IP=$(doctl compute droplet list --tag-name=github-actions --format "Name,Public IPv4" | grep -v ID | grep ga-out-of-tree-images-oraclelinux-$GITHUB_SHA | awk '{print $2}')
while ! ssh -o StrictHostKeyChecking=accept-new root@$IP echo
do
sleep 1s
done
sleep 5m
ssh root@$IP pkill apt-get || true
ssh root@$IP apt-get update
ssh root@$IP apt-get install -y git podman s3cmd
ssh root@$IP git clone https://github.com/out-of-tree/out-of-tree
ssh root@$IP "cd out-of-tree && git checkout $GITHUB_SHA"
ssh root@$IP "echo -e '[Unit]\nDescription=Oracle Linux image generator and uploader\n[Service]\nRemainAfterExit=yes\nStandardError=append:/var/log/images-oraclelinux.log\nStandardOutput=append:/var/log/images-oraclelinux.log\nType=oneshot' >> /etc/systemd/system/images-oraclelinux.service"
ssh root@$IP "echo Environment=HOST=fra1.digitaloceanspaces.com >> /etc/systemd/system/images-oraclelinux.service"
ssh root@$IP "echo Environment=HOST_BUCKET=out-of-tree.fra1.digitaloceanspaces.com >> /etc/systemd/system/images-oraclelinux.service"
ssh root@$IP "echo Environment=ACCESS_KEY=${{ secrets.DIGITALOCEAN_SPACES_ACCESS_KEY }} >> /etc/systemd/system/images-oraclelinux.service"
ssh root@$IP "echo Environment=SECRET_KEY=${{ secrets.DIGITALOCEAN_SPACES_SECRET_KEY }} >> /etc/systemd/system/images-oraclelinux.service"
ssh root@$IP "echo 'ExecStart=/root/out-of-tree/tools/qemu-oraclelinux-img/generate-images.sh' >> /etc/systemd/system/images-oraclelinux.service"
ssh root@$IP 'echo ExecStart=/bin/sh -c \"s3cmd put --acl-public /root/out-of-tree/tools/qemu-oraclelinux-img/*.tar.gz s3://out-of-tree/1.0.0/ --host=\$HOST --host-bucket=\$HOST_BUCKET --access_key=\$ACCESS_KEY --secret_key=\$SECRET_KEY\" >> /etc/systemd/system/images-oraclelinux.service'
ssh root@$IP "echo TimeoutStopSec=1 >> /etc/systemd/system/images-oraclelinux.service"
ssh root@$IP systemctl daemon-reload
ssh root@$IP systemctl start images-oraclelinux --no-block
while ! ssh root@$IP systemctl show images-oraclelinux -p SubState --value | grep -E '(failed|exited)'
do
sleep 3m
done
scp root@$IP:/var/log/images-oraclelinux.log .
ssh root@$IP systemctl is-active images-oraclelinux
- name: delete droplet
if: always()
run: doctl compute droplet delete -f ga-out-of-tree-images-oraclelinux-$GITHUB_SHA

View File

@ -1,10 +1,23 @@
name: macOS
on: [push, pull_request]
on:
workflow_dispatch:
push:
paths-ignore:
- ".github/workflows/images-*"
- ".github/workflows/debian-cache.yml"
- ".github/workflows/e2e.yml"
- "docs/**"
- "README.md"
pull_request:
concurrency:
group: ${{ github.workflow_ref }}
cancel-in-progress: true
jobs:
build:
name: Build on macOS
name: Build
runs-on: macOS-latest
steps:
- uses: actions/checkout@v1

23
.github/workflows/scripts/setup.sh vendored Executable file
View File

@ -0,0 +1,23 @@
#!/usr/bin/env bash
set -eu
id
df -h
sudo systemd-run --wait rm -rf \
/usr/share/az* \
/usr/share/dotnet \
/usr/share/gradle* \
/usr/share/miniconda \
/usr/share/swift \
/var/lib/gems \
/var/lib/mysql \
/var/lib/snapd \
/opt/hostedtoolcache/CodeQL \
/opt/hostedtoolcache/Java_Temurin-Hotspot_jdk
sudo fstrim /
df -h

View File

@ -1,13 +1,29 @@
name: Ubuntu
on: [push, pull_request]
on:
workflow_dispatch:
push:
paths-ignore:
- ".github/workflows/images-*"
- ".github/workflows/e2e.yml"
- ".github/workflows/macos.yml"
- ".github/workflows/debian-cache.yml"
- "docs/**"
- ".readthedocs.yaml"
- "README.md"
pull_request:
concurrency:
group: ${{ github.workflow_ref }}
cancel-in-progress: true
jobs:
build:
name: Build on Ubuntu
name: Build
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- uses: actions/setup-go@v5
- name: Build
run: go build
@ -17,6 +33,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- uses: actions/setup-go@v5
- name: Install dependencies for tests
run: |
@ -24,107 +41,172 @@ jobs:
sudo apt-get install qemu-system-x86
- name: Bootstrap
run: ./tools/qemu-debian-img/bootstrap.sh
run: ./tools/qemu-ubuntu-img/bootstrap.sh
- name: Unit Testing
run: go test -parallel 1 -v ./...
run: |
mkdir ~/.out-of-tree
go test -parallel 1 -v ./...
test-end-to-end-examples:
needs: [build]
name: Examples
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
example: [
{ dir: "kernel-module", params: "" },
{ dir: "kernel-exploit", params: "--threshold=0" },
{ dir: "script", params: "" },
{ dir: "preload", params: "" }
]
steps:
- uses: actions/checkout@v1
- uses: actions/setup-go@v5
- name: Setup
run: .github/workflows/scripts/setup.sh
- name: Build
run: go build
- name: Install dependencies for tests
run: |
sudo apt-get update
sudo apt-get install -y qemu-system-x86
echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_22.04/ /' | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:unstable.list
curl -fsSL https://download.opensuse.org/repositories/devel:kubic:libcontainers:unstable/xUbuntu_22.04/Release.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/devel_kubic_libcontainers_unstable.gpg > /dev/null
sudo apt-get update
sudo apt-get install -y podman
- name: End-to-End Testing [${{ matrix.example.dir }}]
run: |
cd examples/${{ matrix.example.dir }}
../../out-of-tree --log-level=debug kernel autogen --max=1
../../out-of-tree --log-level=debug pew --qemu-timeout=10m ${{ matrix.example.params }}
- name: Archive logs
if: always()
uses: actions/upload-artifact@v4
with:
name: test-end-to-end-examples-${{ matrix.example.dir }}-logs
path: ~/.out-of-tree/logs
test-end-to-end:
name: End-to-End Testing
needs: [build]
name: E2E
runs-on: ubuntu-latest
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
#type: [ Script, Module ]
type: [ Module ]
os: [
{ distro: Ubuntu, release: 12.04 },
{ distro: Ubuntu, release: 14.04 },
{ distro: Ubuntu, release: 16.04 },
{ distro: Ubuntu, release: 18.04 },
{ distro: Ubuntu, release: 20.04 },
{ distro: Ubuntu, release: 22.04 },
{ distro: CentOS, release: 6 },
{ distro: CentOS, release: 7 },
{ distro: CentOS, release: 8 },
{ distro: OracleLinux, release: 6 },
{ distro: OracleLinux, release: 7 },
{ distro: OracleLinux, release: 8 },
{ distro: OracleLinux, release: 9 },
{ distro: Debian, release: 7 },
{ distro: Debian, release: 8 },
{ distro: Debian, release: 9 },
{ distro: Debian, release: 10 },
{ distro: Debian, release: 11 },
{ distro: Debian, release: 12 },
{ distro: OpenSUSE, release: "12.1" },
{ distro: OpenSUSE, release: "12.2" },
{ distro: OpenSUSE, release: "12.3" },
{ distro: OpenSUSE, release: "13.1" },
{ distro: OpenSUSE, release: "13.2" },
{ distro: OpenSUSE, release: "42.1" },
{ distro: OpenSUSE, release: "42.2" },
{ distro: OpenSUSE, release: "42.3" },
# { distro: OpenSUSE, release: "15.0" },
{ distro: OpenSUSE, release: "15.1" },
{ distro: OpenSUSE, release: "15.2" },
{ distro: OpenSUSE, release: "15.3" },
{ distro: OpenSUSE, release: "15.4" },
{ distro: OpenSUSE, release: "15.5" }
]
steps:
- uses: actions/checkout@v1
- name: Backup docker files
run: |
echo "backup moby/buildkit image"
sudo docker image save -o ${GITHUB_WORKSPACE}/images.tar moby/buildkit
echo "prune docker"
sudo docker system prune -a -f
echo "back up /var/lib/docker folder structure and other files"
sudo rsync -aPq /var/lib/docker/ ${GITHUB_WORKSPACE}/docker
- name: Build
run: go build
- name: Maximize build space
uses: easimon/maximize-build-space@master
with:
overprovision-lvm: 'true'
remove-dotnet: 'true'
# instead of using default value to mount to build path,
# /var/lib/docker/ is really the place we need more spaces.
build-mount-path: '/var/lib/docker/'
- name: Install dependencies for tests
run: |
sudo apt-get update
sudo apt-get install qemu-system-x86
- name: Restore docker files
run: |
sudo rsync -aPq ${GITHUB_WORKSPACE}/docker/ /var/lib/docker
sudo rm -rf ${GITHUB_WORKSPACE}/docker
sudo ls ${GITHUB_WORKSPACE} -l
sudo docker image load -i ${GITHUB_WORKSPACE}/images.tar
sudo rm ${GITHUB_WORKSPACE}/images.tar
- name: End-to-End Testing [Kernel Module]
run: |
cd examples/kernel-module
../../out-of-tree --log-level=debug kernel autogen --max=1
../../out-of-tree --log-level=debug pew --qemu-timeout=10m
- uses: actions/checkout@v1
- uses: actions/setup-go@v5
- name: End-to-End Testing [Kernel Exploit]
run: |
cd examples/kernel-exploit
../../out-of-tree --log-level=debug kernel autogen --max=1
../../out-of-tree --log-level=debug pew --threshold=0 --qemu-timeout=10m
- name: Setup
run: .github/workflows/scripts/setup.sh
- name: Archive logs
uses: actions/upload-artifact@v3
with:
name: test-end-to-end-logs
path: /home/runner/.out-of-tree/logs/out-of-tree.log
- name: Build
run: go build
test-end-to-end-kernels:
name: End-to-End Testing (kernels)
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- name: Install dependencies for tests
run: |
sudo apt-get update
sudo apt-get install qemu-system-x86
- name: Build
run: go build
- name: End-to-End Testing ${{ matrix.type }} [${{ matrix.os.distro }} ${{ matrix.os.release }}]
shell: bash
run: |
mkdir test
cd test
- name: Install dependencies for tests
run: |
sudo apt-get update
sudo apt-get install qemu-system-x86
echo 'name = "test"' >> .out-of-tree.toml
echo 'type = "${{ matrix.type }}"' >> .out-of-tree.toml
echo 'script = "script.sh"' >> .out-of-tree.toml
echo '[[targets]]' >> .out-of-tree.toml
echo 'distro = { id = "${{ matrix.os.distro }}", release = "${{ matrix.os.release }}" }' >> .out-of-tree.toml
echo 'kernel = { regex = ".*" }' >> .out-of-tree.toml
- name: End-to-End Testing [Install one Ubuntu 18.04 kernel]
run: |
./out-of-tree --log-level=debug kernel install --distro=Ubuntu --ver=18.04 --kernel=4.15.0-70-generic
echo -e '#!/bin/sh\necho ok' >> script.sh
- name: End-to-End Testing [Reinstall one Ubuntu 18.04 kernel]
run: |
./out-of-tree --log-level=debug kernel install --distro=Ubuntu --ver=18.04 --kernel=4.15.0-70-generic --force
cp ../examples/kernel-module/{module.c,Makefile,test.sh} .
- name: End-to-End Testing [Install one Ubuntu 22.04 kernel w/o headers]
run: |
./out-of-tree --log-level=debug kernel install --distro=Ubuntu --ver=22.04 --kernel=5.19.0-28-generic --no-headers
../out-of-tree --log-level=debug kernel list-remote --distro=${{ matrix.os.distro }} --ver=${{ matrix.os.release }}
../out-of-tree --log-level=debug kernel autogen --max=1 --shuffle
../out-of-tree --log-level=debug pew --qemu-timeout=20m --include-internal-errors
- name: End-to-End Testing [Install one CentOS 7 kernel]
run: |
./out-of-tree --log-level=debug kernel install --distro=CentOS --ver=7 --kernel=3.10.0-862.6.3
- name: End-to-End Testing [Install one CentOS 7 kernel w/o headers]
run: |
./out-of-tree --log-level=debug kernel install --distro=CentOS --ver=7 --kernel=3.10.0-1160.71.1 --no-headers
- name: End-to-End Testing [Install one CentOS 8 kernel]
run: |
./out-of-tree --log-level=debug kernel install --distro=CentOS --ver=8 --kernel=4.18.0-348.7.1
- name: Archive logs
uses: actions/upload-artifact@v3
with:
name: test-end-to-end-kernels-log
path: /home/runner/.out-of-tree/logs/out-of-tree.log
test-end-to-end-genall:
name: End-to-End Testing (genall)
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- name: Build
run: go build
- name: Install dependencies for tests
run: |
sudo apt-get update
sudo apt-get install qemu-system-x86
- name: End-to-End Testing [Install all Ubuntu 22.04 kernels]
run: |
./out-of-tree --log-level=debug kernel genall --distro=Ubuntu --ver=22.04
- name: Archive logs
uses: actions/upload-artifact@v3
with:
name: test-end-to-end-genall-logs
path: /home/runner/.out-of-tree/logs/out-of-tree.log
- name: Archive logs
if: always()
uses: actions/upload-artifact@v4
with:
name: test-end-to-end-${{ matrix.type }}-${{ matrix.os.distro }}-${{ matrix.os.release }}-logs
path: ~/.out-of-tree/logs

2
.gitignore vendored
View File

@ -12,3 +12,5 @@
*.out
out-of-tree
*.cache
result

12
.readthedocs.yaml Normal file
View File

@ -0,0 +1,12 @@
version: 2
build:
os: ubuntu-22.04
tools:
python: latest
sphinx:
configuration: docs/conf.py
formats:
- pdf

View File

@ -4,6 +4,30 @@
[Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [2.1.0]
### Added
- Graceful shutdown on ^C while kernels generation.
- Flag to set the container runtime command.
- out-of-tree image --dry-run for printing full qemu command.
### Changed
- No exit at the end of the retries, will continue with the other
kernels.
- All temporary files moved to ~/.out-of-tree/tmp/.
### Fixed
- Discrepancies between /lib/modules and /boot should no longer lead
to fatal errors.
- Podman support on macOS.
## [2.0.0]
### Breaking

View File

@ -1,12 +1,12 @@
[![Codacy Badge](https://api.codacy.com/project/badge/Grade/aba4aad2046b4d1a9a99cf98e22c018b)](https://app.codacy.com/app/jollheef/out-of-tree?utm_source=github.com&utm_medium=referral&utm_content=jollheef/out-of-tree&utm_campaign=Badge_Grade_Dashboard)
[![Go Report Card](https://goreportcard.com/badge/code.dumpstack.io/tools/out-of-tree)](https://goreportcard.com/report/code.dumpstack.io/tools/out-of-tree)
[![Ubuntu](https://github.com/out-of-tree/out-of-tree/actions/workflows/ubuntu.yml/badge.svg)](https://github.com/out-of-tree/out-of-tree/actions/workflows/ubuntu.yml)
[![E2E](https://github.com/out-of-tree/out-of-tree/actions/workflows/e2e.yml/badge.svg)](https://github.com/out-of-tree/out-of-tree/actions/workflows/e2e.yml)
[![Documentation Status](https://readthedocs.org/projects/out-of-tree/badge/?version=latest)](https://out-of-tree.readthedocs.io/en/latest/?badge=latest)
# [out-of-tree](https://out-of-tree.io)
out-of-tree kernel {module, exploit} development tool
*out-of-tree* is the kernel {module, exploit} development tool.
out-of-tree is for automating some routine actions for creating development environments for debugging kernel modules and exploits, generating reliability statistics for exploits, and also provides the ability to easily integrate into CI (Continuous Integration).
*out-of-tree* was created to reduce the complexity of the environment for developing, testing and debugging Linux kernel exploits and out-of-tree kernel modules (hence the name "out-of-tree").
![Screenshot](https://cloudflare-ipfs.com/ipfs/Qmb88fgdDjbWkxz91sWsgmoZZNfVThnCtj37u3mF2s3T3T)
@ -14,17 +14,27 @@ out-of-tree is for automating some routine actions for creating development envi
### GNU/Linux (with [Nix](https://nixos.org/nix/))
$ curl -fsSL https://get.docker.com | sh
$ sudo usermod -aG docker user && newgrp docker
$ curl -L https://nixos.org/nix/install | sh
$ nix-env -iA nixpkgs.out-of-tree # Note: may not be up to date immediately, in this case consider installing from source
sudo apt install podman || sudo dnf install podman
Note that adding a user to group *docker* has serious security implications. Check Docker documentation for more information.
curl -L https://nixos.org/nix/install | sh
mkdir -p ~/.config/nix
echo "experimental-features = nix-command flakes" >> ~/.config/nix/nix.conf
# stable
nix profile install nixpkgs#out-of-tree
# latest
nix profile install git+https://code.dumpstack.io/tools/out-of-tree
### macOS
$ brew cask install docker
$ open --background -a Docker && sleep 1m
Note: case-sensitive FS is required for the ~/.out-of-tree directory.
$ brew install podman
$ podman machine stop || true
$ podman machine rm || true
$ podman machine init --cpus=4 --memory=4096 -v $HOME:$HOME
$ podman machine start
$ brew tap out-of-tree/repo
$ brew install out-of-tree
@ -38,7 +48,7 @@ Generate all Ubuntu 22.04 kernels:
Run tests based on .out-of-tree.toml definitions:
$ out-of-tree pew
$ out-of-tree pew
Test with a specific kernel:

189
api/api.go Normal file
View File

@ -0,0 +1,189 @@
package api
import (
"bytes"
"encoding/gob"
"errors"
"fmt"
"net"
"reflect"
"time"
"code.dumpstack.io/tools/out-of-tree/artifact"
"code.dumpstack.io/tools/out-of-tree/distro"
"github.com/google/uuid"
)
var ErrInvalid = errors.New("")
type Status string
const (
StatusNew Status = "new"
StatusWaiting Status = "waiting"
StatusRunning Status = "running"
StatusSuccess Status = "success"
StatusFailure Status = "failure"
)
type Command string
const (
RawMode Command = "rawmode"
AddJob Command = "add_job"
ListJobs Command = "list_jobs"
JobLogs Command = "job_logs"
JobStatus Command = "job_status"
AddRepo Command = "add_repo"
ListRepos Command = "list_repos"
Kernels Command = "kernels"
)
type Job struct {
ID int64
UpdatedAt time.Time
// Job UUID
UUID string
// Group UUID
Group string
RepoName string
Commit string
Artifact artifact.Artifact
Target distro.KernelInfo
Created time.Time
Started time.Time
Finished time.Time
Status Status
}
func (job *Job) GenUUID() {
job.UUID = uuid.New().String()
}
// ListJobsParams is the parameters for ListJobs command
type ListJobsParams struct {
// Group UUID
Group string
// Repo name
Repo string
// Commit hash
Commit string
// Status of the job
Status Status
UpdatedAfter int64
}
type Repo struct {
ID int64
Name string
Path string
}
type JobLog struct {
Name string
Text string
}
type Req struct {
Command Command
Type string
Data []byte
}
func (r *Req) SetData(data any) (err error) {
r.Type = fmt.Sprintf("%v", reflect.TypeOf(data))
var buf bytes.Buffer
err = gob.NewEncoder(&buf).Encode(data)
r.Data = buf.Bytes()
return
}
func (r *Req) GetData(data any) (err error) {
if len(r.Data) == 0 {
return
}
t := fmt.Sprintf("%v", reflect.TypeOf(data))
if r.Type != t {
err = fmt.Errorf("type mismatch (%v != %v)", r.Type, t)
return
}
buf := bytes.NewBuffer(r.Data)
return gob.NewDecoder(buf).Decode(data)
}
func (r *Req) Encode(conn net.Conn) (err error) {
return gob.NewEncoder(conn).Encode(r)
}
func (r *Req) Decode(conn net.Conn) (err error) {
return gob.NewDecoder(conn).Decode(r)
}
type Resp struct {
UUID string
Error string
Err error `json:"-"`
Type string
Data []byte
}
func NewResp() (resp Resp) {
resp.UUID = uuid.New().String()
return
}
func (r *Resp) SetData(data any) (err error) {
r.Type = fmt.Sprintf("%v", reflect.TypeOf(data))
var buf bytes.Buffer
err = gob.NewEncoder(&buf).Encode(data)
r.Data = buf.Bytes()
return
}
func (r *Resp) GetData(data any) (err error) {
if len(r.Data) == 0 {
return
}
t := fmt.Sprintf("%v", reflect.TypeOf(data))
if r.Type != t {
err = fmt.Errorf("type mismatch (%v != %v)", r.Type, t)
return
}
buf := bytes.NewBuffer(r.Data)
return gob.NewDecoder(buf).Decode(data)
}
func (r *Resp) Encode(conn net.Conn) (err error) {
if r.Err != nil && r.Err != ErrInvalid && r.Error == "" {
r.Error = fmt.Sprintf("%v", r.Err)
}
return gob.NewEncoder(conn).Encode(r)
}
func (r *Resp) Decode(conn net.Conn) (err error) {
err = gob.NewDecoder(conn).Decode(r)
r.Err = ErrInvalid
return
}

439
artifact/artifact.go Normal file
View File

@ -0,0 +1,439 @@
package artifact
import (
"errors"
"fmt"
"io"
"os"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/naoina/toml"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
"code.dumpstack.io/tools/out-of-tree/distro"
"code.dumpstack.io/tools/out-of-tree/qemu"
)
type Kernel struct {
// TODO
// Version string
// From string
// To string
// prev. ReleaseMask
Regex string
ExcludeRegex string
}
// Target defines the kernel
type Target struct {
Distro distro.Distro
Kernel Kernel
}
// DockerName is returns stable name for docker container
func (km Target) DockerName() string {
distro := strings.ToLower(km.Distro.ID.String())
release := strings.Replace(km.Distro.Release, ".", "__", -1)
return fmt.Sprintf("out_of_tree_%s_%s", distro, release)
}
// ArtifactType is the kernel module or exploit
type ArtifactType int
const (
// KernelModule is any kind of kernel module
KernelModule ArtifactType = iota
// KernelExploit is the privilege escalation exploit
KernelExploit
// Script for information gathering or automation
Script
)
func (at ArtifactType) String() string {
return [...]string{"module", "exploit", "script"}[at]
}
// UnmarshalTOML is for support github.com/naoina/toml
func (at *ArtifactType) UnmarshalTOML(data []byte) (err error) {
stype := strings.Trim(string(data), `"`)
stypelower := strings.ToLower(stype)
if strings.Contains(stypelower, "module") {
*at = KernelModule
} else if strings.Contains(stypelower, "exploit") {
*at = KernelExploit
} else if strings.Contains(stypelower, "script") {
*at = Script
} else {
err = fmt.Errorf("type %s is unsupported", stype)
}
return
}
// MarshalTOML is for support github.com/naoina/toml
func (at ArtifactType) MarshalTOML() (data []byte, err error) {
s := ""
switch at {
case KernelModule:
s = "module"
case KernelExploit:
s = "exploit"
case Script:
s = "script"
default:
err = fmt.Errorf("cannot marshal %d", at)
}
data = []byte(`"` + s + `"`)
return
}
// Duration type with toml unmarshalling support
type Duration struct {
time.Duration
}
// UnmarshalTOML for Duration
func (d *Duration) UnmarshalTOML(data []byte) (err error) {
duration := strings.Replace(string(data), "\"", "", -1)
d.Duration, err = time.ParseDuration(duration)
return
}
// MarshalTOML for Duration
func (d Duration) MarshalTOML() (data []byte, err error) {
data = []byte(`"` + d.Duration.String() + `"`)
return
}
type PreloadModule struct {
Repo string
Path string
TimeoutAfterLoad Duration
}
// Extra test files to copy over
type FileTransfer struct {
User string
Local string
Remote string
}
type Patch struct {
Path string
Source string
Script string
}
// Artifact is for .out-of-tree.toml
type Artifact struct {
Name string
Type ArtifactType
TestFiles []FileTransfer
SourcePath string
Targets []Target
Script string
Qemu struct {
Cpus int
Memory int
Timeout Duration
AfterStartTimeout Duration
}
Docker struct {
Timeout Duration
}
Mitigations struct {
DisableSmep bool
DisableSmap bool
DisableKaslr bool
DisableKpti bool
}
Patches []Patch
Make struct {
Target string
}
StandardModules bool
Preload []PreloadModule
}
// Read is for read .out-of-tree.toml
func (Artifact) Read(path string) (ka Artifact, err error) {
f, err := os.Open(path)
if err != nil {
return
}
defer f.Close()
buf, err := io.ReadAll(f)
if err != nil {
return
}
err = toml.Unmarshal(buf, &ka)
if len(strings.Fields(ka.Name)) != 1 {
err = errors.New("artifact name should not contain spaces")
}
return
}
func (ka Artifact) checkSupport(ki distro.KernelInfo, target Target) (
supported bool, err error) {
if target.Distro.Release == "" {
if ki.Distro.ID != target.Distro.ID {
return
}
} else {
if !ki.Distro.Equal(target.Distro) {
return
}
}
r, err := regexp.Compile(target.Kernel.Regex)
if err != nil {
return
}
exr, err := regexp.Compile(target.Kernel.ExcludeRegex)
if err != nil {
return
}
if !r.MatchString(ki.KernelRelease) {
return
}
if target.Kernel.ExcludeRegex != "" && exr.MatchString(ki.KernelRelease) {
return
}
supported = true
return
}
// Supported returns true if given kernel is supported by artifact
func (ka Artifact) Supported(ki distro.KernelInfo) (supported bool, err error) {
for _, km := range ka.Targets {
supported, err = ka.checkSupport(ki, km)
if supported {
break
}
}
return
}
func (ka Artifact) Process(slog zerolog.Logger, ki distro.KernelInfo,
endless bool, cBinary,
cEndlessStress string, cEndlessTimeout time.Duration,
dump func(q *qemu.System, ka Artifact, ki distro.KernelInfo,
result *Result)) {
slog.Info().Msg("start")
testStart := time.Now()
defer func() {
slog.Debug().Str("test_duration",
time.Since(testStart).String()).
Msg("")
}()
kernel := qemu.Kernel{KernelPath: ki.KernelPath, InitrdPath: ki.InitrdPath}
q, err := qemu.NewSystem(qemu.X86x64, kernel, ki.RootFS)
if err != nil {
slog.Error().Err(err).Msg("qemu init")
return
}
q.Log = slog
if ka.Qemu.Timeout.Duration != 0 {
q.Timeout = ka.Qemu.Timeout.Duration
}
if ka.Qemu.Cpus != 0 {
q.Cpus = ka.Qemu.Cpus
}
if ka.Qemu.Memory != 0 {
q.Memory = ka.Qemu.Memory
}
q.SetKASLR(!ka.Mitigations.DisableKaslr)
q.SetSMEP(!ka.Mitigations.DisableSmep)
q.SetSMAP(!ka.Mitigations.DisableSmap)
q.SetKPTI(!ka.Mitigations.DisableKpti)
if ki.CPU.Model != "" {
q.CPU.Model = ki.CPU.Model
}
if len(ki.CPU.Flags) != 0 {
q.CPU.Flags = ki.CPU.Flags
}
if endless {
q.Timeout = 0
}
qemuStart := time.Now()
slog.Debug().Msgf("qemu start %v", qemuStart)
err = q.Start()
if err != nil {
slog.Error().Err(err).Msg("qemu start")
return
}
defer q.Stop()
slog.Debug().Msgf("wait %v", ka.Qemu.AfterStartTimeout)
time.Sleep(ka.Qemu.AfterStartTimeout.Duration)
go func() {
time.Sleep(time.Minute)
for !q.Died {
slog.Debug().Msg("still alive")
time.Sleep(time.Minute)
}
}()
tmp, err := os.MkdirTemp(dotfiles.Dir("tmp"), "")
if err != nil {
slog.Error().Err(err).Msg("making tmp directory")
return
}
defer os.RemoveAll(tmp)
result := Result{}
if !endless {
defer dump(q, ka, ki, &result)
}
var cTest string
if ka.Type == Script {
result.BuildDir = ka.SourcePath
result.Build.Ok = true
ka.Script = filepath.Join(ka.SourcePath, ka.Script)
cTest = ka.Script
} else if cBinary == "" {
// TODO: build should return structure
start := time.Now()
result.BuildDir, result.BuildArtifact, result.Build.Output, err =
Build(slog, tmp, ka, ki, ka.Docker.Timeout.Duration)
slog.Debug().Str("duration", time.Since(start).String()).
Msg("build done")
if err != nil {
log.Error().Err(err).Msg("build")
return
}
result.Build.Ok = true
} else {
result.BuildArtifact = cBinary
result.Build.Ok = true
}
if cTest == "" {
cTest = result.BuildArtifact + "_test"
if _, err := os.Stat(cTest); err != nil {
slog.Debug().Msgf("%s does not exist", cTest)
cTest = tmp + "/source/" + "test.sh"
} else {
slog.Debug().Msgf("%s exist", cTest)
}
}
if ka.Qemu.Timeout.Duration == 0 {
ka.Qemu.Timeout.Duration = time.Minute
}
err = q.WaitForSSH(ka.Qemu.Timeout.Duration)
if err != nil {
result.InternalError = err
return
}
slog.Debug().Str("qemu_startup_duration",
time.Since(qemuStart).String()).
Msg("ssh is available")
remoteTest, err := copyTest(q, cTest, ka)
if err != nil {
result.InternalError = err
slog.Error().Err(err).Msg("copy test script")
return
}
if ka.StandardModules {
// Module depends on one of the standard modules
start := time.Now()
err = CopyStandardModules(q, ki)
if err != nil {
result.InternalError = err
slog.Error().Err(err).Msg("copy standard modules")
return
}
slog.Debug().Str("duration", time.Since(start).String()).
Msg("copy standard modules")
}
err = PreloadModules(q, ka, ki, ka.Docker.Timeout.Duration)
if err != nil {
result.InternalError = err
slog.Error().Err(err).Msg("preload modules")
return
}
start := time.Now()
copyArtifactAndTest(slog, q, ka, &result, remoteTest)
slog.Debug().Str("duration", time.Since(start).String()).
Msgf("test completed (success: %v)", result.Test.Ok)
if !endless {
return
}
dump(q, ka, ki, &result)
if !result.Build.Ok || !result.Run.Ok || !result.Test.Ok {
return
}
slog.Info().Msg("start endless tests")
if cEndlessStress != "" {
slog.Debug().Msg("copy and run endless stress script")
err = q.CopyAndRunAsync("root", cEndlessStress)
if err != nil {
q.Stop()
//f.Sync()
slog.Fatal().Err(err).Msg("cannot copy/run stress")
return
}
}
for {
output, err := q.Command("root", remoteTest)
if err != nil {
q.Stop()
//f.Sync()
slog.Fatal().Err(err).Msg(output)
return
}
slog.Debug().Msg(output)
slog.Info().Msg("test success")
slog.Debug().Msgf("wait %v", cEndlessTimeout)
time.Sleep(cEndlessTimeout)
}
}

36
artifact/artifact_test.go Normal file
View File

@ -0,0 +1,36 @@
package artifact
import (
"testing"
"code.dumpstack.io/tools/out-of-tree/distro"
"github.com/naoina/toml"
)
func TestMarshalUnmarshal(t *testing.T) {
artifactCfg := Artifact{
Name: "Put name here",
Type: KernelModule,
}
artifactCfg.Targets = append(artifactCfg.Targets,
Target{
Distro: distro.Distro{
ID: distro.Ubuntu,
Release: "18.04",
},
Kernel: Kernel{
Regex: ".*",
},
})
buf, err := toml.Marshal(&artifactCfg)
if err != nil {
t.Fatal(err)
}
var artifactCfgNew Artifact
err = toml.Unmarshal(buf, &artifactCfgNew)
if err != nil {
t.Fatal(err)
}
}

View File

@ -2,26 +2,25 @@
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package main
package artifact
import (
"crypto/sha1"
"encoding/hex"
"errors"
"io/ioutil"
"os"
"os/user"
"path/filepath"
"time"
"github.com/go-git/go-git/v5"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/config"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
"code.dumpstack.io/tools/out-of-tree/distro"
"code.dumpstack.io/tools/out-of-tree/qemu"
)
func preloadModules(q *qemu.System, ka config.Artifact, ki config.KernelInfo,
func PreloadModules(q *qemu.System, ka Artifact, ki distro.KernelInfo,
dockerTimeout time.Duration) (err error) {
for _, pm := range ka.Preload {
@ -33,7 +32,7 @@ func preloadModules(q *qemu.System, ka config.Artifact, ki config.KernelInfo,
return
}
func preload(q *qemu.System, ki config.KernelInfo, pm config.PreloadModule,
func preload(q *qemu.System, ki distro.KernelInfo, pm PreloadModule,
dockerTimeout time.Duration) (err error) {
var workPath, cache string
@ -46,7 +45,8 @@ func preload(q *qemu.System, ki config.KernelInfo, pm config.PreloadModule,
return
}
} else {
errors.New("No repo/path in preload entry")
err = errors.New("no repo/path in preload entry")
return
}
err = buildAndInsmod(workPath, q, ki, dockerTimeout, cache)
@ -58,32 +58,32 @@ func preload(q *qemu.System, ki config.KernelInfo, pm config.PreloadModule,
return
}
func buildAndInsmod(workPath string, q *qemu.System, ki config.KernelInfo,
func buildAndInsmod(workPath string, q *qemu.System, ki distro.KernelInfo,
dockerTimeout time.Duration, cache string) (err error) {
tmp, err := ioutil.TempDir("", "out-of-tree_")
tmp, err := tempDir()
if err != nil {
return
}
defer os.RemoveAll(tmp)
var artifact string
if exists(cache) {
artifact = cache
var af string
if pathExists(cache) {
af = cache
} else {
artifact, err = buildPreload(workPath, tmp, ki, dockerTimeout)
af, err = buildPreload(workPath, tmp, ki, dockerTimeout)
if err != nil {
return
}
if cache != "" {
err = copyFile(artifact, cache)
err = CopyFile(af, cache)
if err != nil {
return
}
}
}
output, err := q.CopyAndInsmod(artifact)
output, err := q.CopyAndInsmod(af)
if err != nil {
log.Print(output)
return
@ -91,40 +91,49 @@ func buildAndInsmod(workPath string, q *qemu.System, ki config.KernelInfo,
return
}
func buildPreload(workPath, tmp string, ki config.KernelInfo,
dockerTimeout time.Duration) (artifact string, err error) {
func buildPreload(workPath, tmp string, ki distro.KernelInfo,
dockerTimeout time.Duration) (af string, err error) {
ka, err := config.ReadArtifactConfig(workPath + "/.out-of-tree.toml")
ka, err := Artifact{}.Read(workPath + "/.out-of-tree.toml")
if err != nil {
return
log.Warn().Err(err).Msg("preload")
}
ka.SourcePath = workPath
km := config.KernelMask{DistroType: ki.DistroType,
DistroRelease: ki.DistroRelease,
ReleaseMask: ki.KernelRelease,
km := Target{
Distro: ki.Distro,
Kernel: Kernel{Regex: ki.KernelRelease},
}
ka.SupportedKernels = []config.KernelMask{km}
ka.Targets = []Target{km}
if ka.Docker.Timeout.Duration != 0 {
dockerTimeout = ka.Docker.Timeout.Duration
}
_, artifact, _, err = build(tmp, ka, ki, dockerTimeout)
_, af, _, err = Build(log.Logger, tmp, ka, ki, dockerTimeout)
return
}
func cloneOrPull(repo string, ki config.KernelInfo) (workPath, cache string, err error) {
usr, err := user.Current()
if err != nil {
return
func pathExists(path string) bool {
if _, err := os.Stat(path); err != nil {
return false
}
base := filepath.Join(usr.HomeDir, "/.out-of-tree/preload/")
return true
}
func tempDir() (string, error) {
return os.MkdirTemp(dotfiles.Dir("tmp"), "")
}
func cloneOrPull(repo string, ki distro.KernelInfo) (workPath, cache string,
err error) {
base := dotfiles.Dir("preload")
workPath = filepath.Join(base, "/repos/", sha1sum(repo))
var r *git.Repository
if exists(workPath) {
if pathExists(workPath) {
r, err = git.PlainOpen(workPath)
if err != nil {
return

377
artifact/process.go Normal file
View File

@ -0,0 +1,377 @@
package artifact
import (
"bufio"
"errors"
"fmt"
"io"
"io/fs"
"math/rand"
"os"
"os/exec"
"strings"
"time"
"github.com/otiai10/copy"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/container"
"code.dumpstack.io/tools/out-of-tree/distro"
"code.dumpstack.io/tools/out-of-tree/qemu"
)
func sh(workdir, command string) (output string, err error) {
flog := log.With().
Str("workdir", workdir).
Str("command", command).
Logger()
cmd := exec.Command("sh", "-c", "cd "+workdir+" && "+command)
flog.Debug().Msgf("%v", cmd)
stdout, err := cmd.StdoutPipe()
if err != nil {
return
}
cmd.Stderr = cmd.Stdout
err = cmd.Start()
if err != nil {
return
}
go func() {
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
m := scanner.Text()
output += m + "\n"
flog.Trace().Str("stdout", m).Msg("")
}
}()
err = cmd.Wait()
if err != nil {
err = fmt.Errorf("%v %v output: %v", cmd, err, output)
}
return
}
func applyPatches(src string, ka Artifact) (err error) {
for i, patch := range ka.Patches {
name := fmt.Sprintf("patch_%02d", i)
path := src + "/" + name + ".diff"
if patch.Source != "" && patch.Path != "" {
err = errors.New("path and source are mutually exclusive")
return
} else if patch.Source != "" {
err = os.WriteFile(path, []byte(patch.Source), 0644)
if err != nil {
return
}
} else if patch.Path != "" {
err = copy.Copy(patch.Path, path)
if err != nil {
return
}
}
if patch.Source != "" || patch.Path != "" {
_, err = sh(src, "patch < "+path)
if err != nil {
return
}
}
if patch.Script != "" {
script := src + "/" + name + ".sh"
err = os.WriteFile(script, []byte(patch.Script), 0755)
if err != nil {
return
}
_, err = sh(src, script)
if err != nil {
return
}
}
}
return
}
func Build(flog zerolog.Logger, tmp string, ka Artifact,
ki distro.KernelInfo, dockerTimeout time.Duration) (
outdir, outpath, output string, err error) {
target := strings.Replace(ka.Name, " ", "_", -1)
if target == "" {
target = fmt.Sprintf("%d", rand.Int())
}
outdir = tmp + "/source"
err = copy.Copy(ka.SourcePath, outdir)
if err != nil {
return
}
err = applyPatches(outdir, ka)
if err != nil {
return
}
outpath = outdir + "/" + target
if ka.Type == KernelModule {
outpath += ".ko"
}
if ki.KernelVersion == "" {
ki.KernelVersion = ki.KernelRelease
}
kernel := "/lib/modules/" + ki.KernelVersion + "/build"
if ki.KernelSource != "" {
kernel = ki.KernelSource
}
buildCommand := "make KERNEL=" + kernel + " TARGET=" + target
if ka.Make.Target != "" {
buildCommand += " " + ka.Make.Target
}
if ki.ContainerName != "" {
var c container.Container
container.Timeout = dockerTimeout
c, err = container.NewFromKernelInfo(ki)
c.Log = flog
if err != nil {
log.Fatal().Err(err).Msg("container creation failure")
}
output, err = c.Run(outdir, []string{
buildCommand + " && chmod -R 777 /work",
})
} else {
cmd := exec.Command("bash", "-c", "cd "+outdir+" && "+
buildCommand)
log.Debug().Msgf("%v", cmd)
timer := time.AfterFunc(dockerTimeout, func() {
cmd.Process.Kill()
})
defer timer.Stop()
var raw []byte
raw, err = cmd.CombinedOutput()
if err != nil {
e := fmt.Sprintf("error `%v` for cmd `%v` with output `%v`",
err, buildCommand, string(raw))
err = errors.New(e)
return
}
output = string(raw)
}
return
}
func runScript(q *qemu.System, script string) (output string, err error) {
return q.Command("root", script)
}
func testKernelModule(q *qemu.System, ka Artifact,
test string) (output string, err error) {
output, err = q.Command("root", test)
// TODO generic checks for WARNING's and so on
return
}
func testKernelExploit(q *qemu.System, ka Artifact,
test, exploit string) (output string, err error) {
output, err = q.Command("user", "chmod +x "+exploit)
if err != nil {
return
}
randFilePath := fmt.Sprintf("/root/%d", rand.Int())
cmd := fmt.Sprintf("%s %s %s", test, exploit, randFilePath)
output, err = q.Command("user", cmd)
if err != nil {
return
}
_, err = q.Command("root", "stat "+randFilePath)
if err != nil {
return
}
return
}
type Result struct {
BuildDir string
BuildArtifact string
Build, Run, Test struct {
Output string
Ok bool
}
InternalError error
InternalErrorString string
}
func CopyFile(sourcePath, destinationPath string) (err error) {
sourceFile, err := os.Open(sourcePath)
if err != nil {
return
}
defer sourceFile.Close()
destinationFile, err := os.Create(destinationPath)
if err != nil {
return err
}
if _, err := io.Copy(destinationFile, sourceFile); err != nil {
destinationFile.Close()
return err
}
return destinationFile.Close()
}
func copyArtifactAndTest(slog zerolog.Logger, q *qemu.System, ka Artifact,
res *Result, remoteTest string) (err error) {
// Copy all test files to the remote machine
for _, f := range ka.TestFiles {
if f.Local[0] != '/' {
if res.BuildDir != "" {
f.Local = res.BuildDir + "/" + f.Local
}
}
err = q.CopyFile(f.User, f.Local, f.Remote)
if err != nil {
res.InternalError = err
slog.Error().Err(err).Msg("copy test file")
return
}
}
switch ka.Type {
case KernelModule:
res.Run.Output, err = q.CopyAndInsmod(res.BuildArtifact)
if err != nil {
slog.Error().Err(err).Msg(res.Run.Output)
// TODO errors.As
if strings.Contains(err.Error(), "connection refused") {
res.InternalError = err
}
return
}
res.Run.Ok = true
res.Test.Output, err = testKernelModule(q, ka, remoteTest)
if err != nil {
slog.Error().Err(err).Msg(res.Test.Output)
return
}
res.Test.Ok = true
case KernelExploit:
remoteExploit := fmt.Sprintf("/tmp/exploit_%d", rand.Int())
err = q.CopyFile("user", res.BuildArtifact, remoteExploit)
if err != nil {
return
}
res.Test.Output, err = testKernelExploit(q, ka, remoteTest,
remoteExploit)
if err != nil {
slog.Error().Err(err).Msg(res.Test.Output)
return
}
res.Run.Ok = true // does not really used
res.Test.Ok = true
case Script:
res.Test.Output, err = runScript(q, remoteTest)
if err != nil {
slog.Error().Err(err).Msg(res.Test.Output)
return
}
slog.Info().Msgf("\n%v\n", res.Test.Output)
res.Run.Ok = true
res.Test.Ok = true
default:
slog.Fatal().Msg("Unsupported artifact type")
}
_, err = q.Command("root", "echo")
if err != nil {
slog.Error().Err(err).Msg("after-test ssh reconnect")
res.Test.Ok = false
return
}
return
}
func copyTest(q *qemu.System, testPath string, ka Artifact) (
remoteTest string, err error) {
remoteTest = fmt.Sprintf("/tmp/test_%d", rand.Int())
err = q.CopyFile("user", testPath, remoteTest)
if err != nil {
if ka.Type == KernelExploit {
q.Command("user",
"echo -e '#!/bin/sh\necho touch $2 | $1' "+
"> "+remoteTest+
" && chmod +x "+remoteTest)
} else {
q.Command("user", "echo '#!/bin/sh' "+
"> "+remoteTest+" && chmod +x "+remoteTest)
}
}
_, err = q.Command("root", "chmod +x "+remoteTest)
return
}
func CopyStandardModules(q *qemu.System, ki distro.KernelInfo) (err error) {
_, err = q.Command("root", "mkdir -p /lib/modules/"+ki.KernelVersion)
if err != nil {
return
}
remotePath := "/lib/modules/" + ki.KernelVersion + "/"
err = q.CopyDirectory("root", ki.ModulesPath+"/kernel", remotePath+"/kernel")
if err != nil {
return
}
files, err := os.ReadDir(ki.ModulesPath)
if err != nil {
return
}
for _, de := range files {
var fi fs.FileInfo
fi, err = de.Info()
if err != nil {
continue
}
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
continue
}
if !strings.HasPrefix(fi.Name(), "modules") {
continue
}
err = q.CopyFile("root", ki.ModulesPath+"/"+fi.Name(), remotePath)
}
return
}

125
cache/cache.go vendored Normal file
View File

@ -0,0 +1,125 @@
// Copyright 2023 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package cache
import (
"fmt"
"net/http"
"net/url"
"os"
"os/exec"
"path/filepath"
"github.com/cavaliergopher/grab/v3"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/distro"
"code.dumpstack.io/tools/out-of-tree/fs"
)
var URL = "https://out-of-tree.fra1.digitaloceanspaces.com/1.0.0/"
func unpackTar(archive, destination string) (err error) {
// NOTE: If you're change anything in tar command please check also
// BSD tar (or if you're using macOS, do not forget to check GNU Tar)
// Also make sure that sparse files are extracting correctly
cmd := exec.Command("tar", "-Sxf", archive)
cmd.Dir = destination + "/"
log.Debug().Msgf("%v", cmd)
rawOutput, err := cmd.CombinedOutput()
if err != nil {
err = fmt.Errorf("%v: %s", err, rawOutput)
return
}
return
}
func DownloadRootFS(path, file string) (err error) {
tmp, err := fs.TempDir()
if err != nil {
return
}
defer os.RemoveAll(tmp)
fileurl, err := url.JoinPath(URL, file+".tar.gz")
if err != nil {
return
}
log.Debug().Msgf("download qemu image from %s", fileurl)
resp, err := grab.Get(tmp, fileurl)
if err != nil {
err = fmt.Errorf("cannot download %s. It looks like you need "+
"to generate it manually and place it "+
"to ~/.out-of-tree/images/; "+
"check documentation for additional information",
fileurl)
return
}
err = unpackTar(resp.Filename, path)
if err != nil {
return
}
return os.Remove(resp.Filename)
}
func DownloadDebianCache(cachePath string) (err error) {
tmp, err := fs.TempDir()
if err != nil {
return
}
defer os.RemoveAll(tmp)
file := filepath.Base(cachePath)
fileurl, err := url.JoinPath(URL, file)
if err != nil {
return
}
log.Debug().Msgf("download debian cache from %s", fileurl)
resp, err := grab.Get(tmp, fileurl)
if err != nil {
return
}
return os.Rename(resp.Filename, cachePath)
}
func PackageURL(dt distro.ID, orig string) (found bool, fileurl string) {
if dt != distro.Debian {
return
}
filename := filepath.Base(orig)
fileurl, err := url.JoinPath(URL, "packages/debian", filename)
if err != nil {
return
}
resp, err := http.Head(fileurl)
if err != nil {
return
}
if resp.StatusCode != http.StatusOK {
return
}
found = true
return
}
func ContainerURL(name string) (path string) {
path, _ = url.JoinPath(URL, "containers", fmt.Sprintf("%s.tar.gz", name))
return
}

49
cache/cache_test.go vendored Normal file
View File

@ -0,0 +1,49 @@
package cache
import (
"os"
"path/filepath"
"testing"
"code.dumpstack.io/tools/out-of-tree/fs"
)
func TestDownloadRootFS(t *testing.T) {
tmp, err := os.MkdirTemp("", "out-of-tree_")
if err != nil {
return
}
defer os.RemoveAll(tmp)
file := "out_of_tree_ubuntu_12__04.img"
err = DownloadRootFS(tmp, file)
if err != nil {
t.Fatal(err)
}
if !fs.PathExists(filepath.Join(tmp, file)) {
t.Fatalf("%s does not exist", file)
}
}
func TestDownloadDebianCache(t *testing.T) {
tmp, err := fs.TempDir()
if err != nil {
return
}
defer os.RemoveAll(tmp)
file := "debian.cache"
cachePath := filepath.Join(tmp, file)
err = DownloadDebianCache(cachePath)
if err != nil {
t.Fatal(err)
}
if !fs.PathExists(filepath.Join(tmp, file)) {
t.Fatalf("%s does not exist", file)
}
}

262
client/client.go Normal file
View File

@ -0,0 +1,262 @@
package client
import (
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"io"
"net"
"os"
"os/exec"
"strconv"
"sync"
"github.com/davecgh/go-spew/spew"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/api"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
"code.dumpstack.io/tools/out-of-tree/distro"
"code.dumpstack.io/tools/out-of-tree/fs"
"code.dumpstack.io/tools/out-of-tree/qemu"
)
type Client struct {
RemoteAddr string
}
func (c Client) client() *tls.Conn {
if !fs.PathExists(dotfiles.File("daemon/cert.pem")) {
log.Fatal().Msgf("no {cert,key}.pem at %s",
dotfiles.Dir("daemon"))
}
cert, err := tls.LoadX509KeyPair(
dotfiles.File("daemon/cert.pem"),
dotfiles.File("daemon/key.pem"))
if err != nil {
log.Fatal().Err(err).Msg("")
}
cacert, err := os.ReadFile(dotfiles.File("daemon/cert.pem"))
if err != nil {
log.Fatal().Err(err).Msg("")
}
certpool := x509.NewCertPool()
certpool.AppendCertsFromPEM(cacert)
tlscfg := &tls.Config{
RootCAs: certpool,
Certificates: []tls.Certificate{cert},
}
conn, err := tls.Dial("tcp", c.RemoteAddr, tlscfg)
if err != nil {
log.Fatal().Err(err).Msg("")
}
return conn // conn.Close()
}
func (c Client) request(cmd api.Command, data any) (resp api.Resp, err error) {
req := api.Req{Command: cmd}
if data != nil {
req.SetData(data)
}
conn := c.client()
defer conn.Close()
req.Encode(conn)
err = resp.Decode(conn)
if err != nil {
log.Fatal().Err(err).Msgf("request %v", req)
}
log.Debug().Msgf("resp: %v", resp)
if resp.Error != "" {
err = errors.New(resp.Error)
log.Fatal().Err(err).Msg("")
}
return
}
func (c Client) Jobs(params api.ListJobsParams) (jobs []api.Job, err error) {
resp, _ := c.request(api.ListJobs, &params)
err = resp.GetData(&jobs)
if err != nil {
log.Error().Err(err).Msg("")
}
return
}
func (c Client) AddJob(job api.Job) (uuid string, err error) {
resp, err := c.request(api.AddJob, &job)
if err != nil {
return
}
err = resp.GetData(&uuid)
return
}
func (c Client) Repos() (repos []api.Repo, err error) {
resp, _ := c.request(api.ListRepos, nil)
log.Debug().Msgf("resp: %v", spew.Sdump(resp))
err = resp.GetData(&repos)
if err != nil {
log.Error().Err(err).Msg("")
}
return
}
type logWriter struct {
tag string
}
func (lw logWriter) Write(p []byte) (n int, err error) {
n = len(p)
log.Trace().Str("tag", lw.tag).Msgf("%v", strconv.Quote(string(p)))
return
}
func (c Client) handler(cConn net.Conn) {
defer cConn.Close()
dConn := c.client()
defer dConn.Close()
req := api.Req{Command: api.RawMode}
req.Encode(dConn)
go io.Copy(cConn, io.TeeReader(dConn, logWriter{"recv"}))
io.Copy(dConn, io.TeeReader(cConn, logWriter{"send"}))
}
var ErrRepoNotFound = errors.New("repo not found")
// GetRepo virtual API call
func (c Client) GetRepo(name string) (repo api.Repo, err error) {
// TODO add API call
repos, err := c.Repos()
if err != nil {
return
}
for _, r := range repos {
if r.Name == name {
repo = r
return
}
}
err = ErrRepoNotFound
return
}
func (c Client) GitProxy(addr string, ready *sync.Mutex) {
l, err := net.Listen("tcp", addr)
if err != nil {
log.Fatal().Err(err).Msg("git proxy listen")
}
defer l.Close()
log.Debug().Msgf("git proxy listen on %v", addr)
for {
ready.Unlock()
conn, err := l.Accept()
if err != nil {
log.Fatal().Err(err).Msg("accept")
}
log.Debug().Msgf("git proxy accept %s", conn.RemoteAddr())
go c.handler(conn)
}
}
func (c Client) PushRepo(repo api.Repo) (err error) {
addr := qemu.GetFreeAddrPort()
ready := &sync.Mutex{}
ready.Lock()
go c.GitProxy(addr, ready)
ready.Lock()
remote := fmt.Sprintf("git://%s/%s", addr, repo.Name)
log.Debug().Msgf("git proxy remote: %v", remote)
raw, err := exec.Command("git", "--work-tree", repo.Path, "push", "--force", remote).
CombinedOutput()
if err != nil {
return
}
log.Info().Msgf("push repo %v\n%v", repo, string(raw))
return
}
func (c Client) AddRepo(repo api.Repo) (err error) {
_, err = c.request(api.AddRepo, &repo)
if err != nil {
return
}
log.Info().Msgf("add repo %v", repo)
return
}
func (c Client) Kernels() (kernels []distro.KernelInfo, err error) {
resp, err := c.request(api.Kernels, nil)
if err != nil {
return
}
err = resp.GetData(&kernels)
if err != nil {
log.Error().Err(err).Msg("")
}
log.Info().Msgf("got %d kernels", len(kernels))
return
}
func (c Client) JobStatus(uuid string) (st api.Status, err error) {
resp, err := c.request(api.JobStatus, &uuid)
if err != nil {
return
}
err = resp.GetData(&st)
if err != nil {
log.Error().Err(err).Msg("")
}
return
}
func (c Client) JobLogs(uuid string) (logs []api.JobLog, err error) {
resp, err := c.request(api.JobLogs, &uuid)
if err != nil {
return
}
err = resp.GetData(&logs)
if err != nil {
log.Error().Err(err).Msg("")
}
return
}

94
cmd/container.go Normal file
View File

@ -0,0 +1,94 @@
// Copyright 2023 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package cmd
import (
"fmt"
"os/exec"
"path/filepath"
"strings"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/container"
)
type ContainerCmd struct {
Filter string `help:"filter by name"`
List ContainerListCmd `cmd:"" help:"list containers"`
Save ContainerSaveCmd `cmd:"" help:"save containers"`
Cleanup ContainerCleanupCmd `cmd:"" help:"cleanup containers"`
}
func (cmd ContainerCmd) Containers() (names []string) {
images, err := container.Images()
if err != nil {
log.Fatal().Err(err).Msg("")
}
for _, img := range images {
if cmd.Filter != "" && !strings.Contains(img.Name, cmd.Filter) {
continue
}
names = append(names, img.Name)
}
return
}
type ContainerListCmd struct{}
func (cmd ContainerListCmd) Run(containerCmd *ContainerCmd) (err error) {
for _, name := range containerCmd.Containers() {
fmt.Println(name)
}
return
}
type ContainerSaveCmd struct {
OutDir string `help:"directory to save containers" default:"./" type:"existingdir"`
}
func (cmd ContainerSaveCmd) Run(containerCmd *ContainerCmd) (err error) {
for _, name := range containerCmd.Containers() {
nlog := log.With().Str("name", name).Logger()
output := filepath.Join(cmd.OutDir, name+".tar")
nlog.Info().Msgf("saving to %v", output)
err = container.Save(name, output)
if err != nil {
return
}
compressed := output + ".gz"
nlog.Info().Msgf("compressing to %v", compressed)
var raw []byte
raw, err = exec.Command("gzip", output).CombinedOutput()
if err != nil {
nlog.Error().Err(err).Msg(string(raw))
return
}
nlog.Info().Msg("done")
}
return
}
type ContainerCleanupCmd struct{}
func (cmd ContainerCleanupCmd) Run(containerCmd *ContainerCmd) (err error) {
var output []byte
for _, name := range containerCmd.Containers() {
output, err = exec.Command(container.Runtime, "image", "rm", name).
CombinedOutput()
if err != nil {
log.Error().Err(err).Str("output", string(output)).Msg("")
return
}
}
return
}

123
cmd/daemon.go Normal file
View File

@ -0,0 +1,123 @@
// Copyright 2024 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package cmd
import (
"encoding/json"
"fmt"
"time"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/api"
"code.dumpstack.io/tools/out-of-tree/client"
)
type daemonCmd struct {
Addr string `default:":63527"`
Job DaemonJobCmd `cmd:"" aliases:"jobs" help:"manage jobs"`
Repo DaemonRepoCmd `cmd:"" aliases:"repos" help:"manage repositories"`
}
type DaemonJobCmd struct {
List DaemonJobsListCmd `cmd:"" help:"list jobs"`
Status DaemonJobsStatusCmd `cmd:"" help:"show job status"`
Log DaemonJobsLogsCmd `cmd:"" help:"job logs"`
}
type DaemonJobsListCmd struct {
Group string `help:"group uuid"`
Repo string `help:"repo name"`
Commit string `help:"commit sha"`
Status string `help:"job status"`
After time.Time `help:"updated after" format:"2006-01-02 15:04:05"`
}
func (cmd *DaemonJobsListCmd) Run(dm *DaemonCmd, g *Globals) (err error) {
c := client.Client{RemoteAddr: g.RemoteAddr}
params := api.ListJobsParams{
Group: cmd.Group,
Repo: cmd.Repo,
Commit: cmd.Commit,
Status: api.Status(cmd.Status),
}
if !cmd.After.IsZero() {
params.UpdatedAfter = cmd.After.Unix()
}
jobs, err := c.Jobs(params)
if err != nil {
log.Error().Err(err).Msg("")
return
}
b, err := json.MarshalIndent(jobs, "", " ")
if err != nil {
log.Error().Err(err).Msg("")
}
fmt.Println(string(b))
return
}
type DaemonJobsStatusCmd struct {
UUID string `arg:""`
}
func (cmd *DaemonJobsStatusCmd) Run(dm *DaemonCmd, g *Globals) (err error) {
c := client.Client{RemoteAddr: g.RemoteAddr}
st, err := c.JobStatus(cmd.UUID)
if err != nil {
log.Error().Err(err).Msg("")
return
}
fmt.Println(st)
return
}
type DaemonJobsLogsCmd struct {
UUID string `arg:""`
}
func (cmd *DaemonJobsLogsCmd) Run(dm *DaemonCmd, g *Globals) (err error) {
c := client.Client{RemoteAddr: g.RemoteAddr}
logs, err := c.JobLogs(cmd.UUID)
if err != nil {
log.Error().Err(err).Msg("")
return
}
for _, l := range logs {
log.Info().Msg(l.Name)
fmt.Println(l.Text)
}
return
}
type DaemonRepoCmd struct {
List DaemonRepoListCmd `cmd:"" help:"list repos"`
}
type DaemonRepoListCmd struct{}
func (cmd *DaemonRepoListCmd) Run(dm *DaemonCmd, g *Globals) (err error) {
c := client.Client{RemoteAddr: g.RemoteAddr}
repos, err := c.Repos()
if err != nil {
return
}
b, err := json.MarshalIndent(repos, "", " ")
if err != nil {
log.Error().Err(err).Msg("")
}
fmt.Println(string(b))
return
}

47
cmd/daemon_linux.go Normal file
View File

@ -0,0 +1,47 @@
//go:build linux
// +build linux
package cmd
import (
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/daemon"
)
type DaemonCmd struct {
daemonCmd
Threads int `help:"number of threads to use"`
OvercommitMemory float64 `help:"overcommit memory factor"`
OvercommitCPU float64 `help:"overcommit CPU factor"`
Serve DaemonServeCmd `cmd:"" help:"start daemon"`
}
type DaemonServeCmd struct{}
func (cmd *DaemonServeCmd) Run(dm *DaemonCmd, g *Globals) (err error) {
d, err := daemon.Init(g.Config.Kernels)
if err != nil {
log.Fatal().Err(err).Msg("")
}
defer d.Kill()
if dm.Threads > 0 {
d.Threads = dm.Threads
}
if dm.OvercommitMemory > 0 {
d.Resources.CPU.SetOvercommit(dm.OvercommitMemory)
}
if dm.OvercommitCPU > 0 {
d.Resources.CPU.SetOvercommit(dm.OvercommitCPU)
}
go d.Daemon()
d.Listen(dm.Addr)
return
}

8
cmd/daemon_macos.go Normal file
View File

@ -0,0 +1,8 @@
//go:build darwin
// +build darwin
package cmd
type DaemonCmd struct {
daemonCmd
}

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package main
package cmd
import (
"database/sql"
@ -12,12 +12,13 @@ import (
_ "github.com/mattn/go-sqlite3"
"code.dumpstack.io/tools/out-of-tree/config"
"code.dumpstack.io/tools/out-of-tree/artifact"
"code.dumpstack.io/tools/out-of-tree/distro"
"code.dumpstack.io/tools/out-of-tree/qemu"
)
// Change on ANY database update
const currentDatabaseVersion = 2
const currentDatabaseVersion = 3
const versionField = "db_version"
@ -27,9 +28,9 @@ type logEntry struct {
Timestamp time.Time
qemu.System
config.Artifact
config.KernelInfo
phasesResult
artifact.Artifact
distro.KernelInfo
artifact.Result
}
func createLogTable(db *sql.DB) (err error) {
@ -46,6 +47,8 @@ func createLogTable(db *sql.DB) (err error) {
distro_release TEXT,
kernel_release TEXT,
internal_err TEXT,
build_output TEXT,
build_ok BOOLEAN,
@ -120,18 +123,19 @@ func getVersion(db *sql.DB) (version int, err error) {
return
}
func addToLog(db *sql.DB, q *qemu.System, ka config.Artifact,
ki config.KernelInfo, res *phasesResult, tag string) (err error) {
func addToLog(db *sql.DB, q *qemu.System, ka artifact.Artifact,
ki distro.KernelInfo, res *artifact.Result, tag string) (err error) {
stmt, err := db.Prepare("INSERT INTO log (name, type, tag, " +
"distro_type, distro_release, kernel_release, " +
"internal_err, " +
"build_output, build_ok, " +
"run_output, run_ok, " +
"test_output, test_ok, " +
"qemu_stdout, qemu_stderr, " +
"kernel_panic, timeout_kill) " +
"VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, " +
"$10, $11, $12, $13, $14, $15, $16);")
"$10, $11, $12, $13, $14, $15, $16, $17);")
if err != nil {
return
}
@ -140,7 +144,8 @@ func addToLog(db *sql.DB, q *qemu.System, ka config.Artifact,
_, err = stmt.Exec(
ka.Name, ka.Type, tag,
ki.DistroType, ki.DistroRelease, ki.KernelRelease,
ki.Distro.ID, ki.Distro.Release, ki.KernelRelease,
res.InternalErrorString,
res.Build.Output, res.Build.Ok,
res.Run.Output, res.Run.Ok,
res.Test.Output, res.Test.Ok,
@ -157,6 +162,7 @@ func addToLog(db *sql.DB, q *qemu.System, ka config.Artifact,
func getAllLogs(db *sql.DB, tag string, num int) (les []logEntry, err error) {
stmt, err := db.Prepare("SELECT id, time, name, type, tag, " +
"distro_type, distro_release, kernel_release, " +
"internal_err, " +
"build_ok, run_ok, test_ok, kernel_panic, " +
"timeout_kill FROM log ORDER BY datetime(time) DESC " +
"LIMIT $1")
@ -172,10 +178,12 @@ func getAllLogs(db *sql.DB, tag string, num int) (les []logEntry, err error) {
defer rows.Close()
for rows.Next() {
var internalErr sql.NullString
le := logEntry{}
err = rows.Scan(&le.ID, &le.Timestamp,
&le.Name, &le.Type, &le.Tag,
&le.DistroType, &le.DistroRelease, &le.KernelRelease,
&le.Distro.ID, &le.Distro.Release, &le.KernelRelease,
&internalErr,
&le.Build.Ok, &le.Run.Ok, &le.Test.Ok,
&le.KernelPanic, &le.KilledByTimeout,
)
@ -183,6 +191,8 @@ func getAllLogs(db *sql.DB, tag string, num int) (les []logEntry, err error) {
return
}
le.InternalErrorString = internalErr.String
if tag == "" || tag == le.Tag {
les = append(les, le)
}
@ -191,11 +201,12 @@ func getAllLogs(db *sql.DB, tag string, num int) (les []logEntry, err error) {
return
}
func getAllArtifactLogs(db *sql.DB, tag string, num int, ka config.Artifact) (
func getAllArtifactLogs(db *sql.DB, tag string, num int, ka artifact.Artifact) (
les []logEntry, err error) {
stmt, err := db.Prepare("SELECT id, time, name, type, tag, " +
"distro_type, distro_release, kernel_release, " +
"internal_err, " +
"build_ok, run_ok, test_ok, kernel_panic, " +
"timeout_kill FROM log WHERE name=$1 AND type=$2 " +
"ORDER BY datetime(time) DESC LIMIT $3")
@ -211,10 +222,12 @@ func getAllArtifactLogs(db *sql.DB, tag string, num int, ka config.Artifact) (
defer rows.Close()
for rows.Next() {
var internalErr sql.NullString
le := logEntry{}
err = rows.Scan(&le.ID, &le.Timestamp,
&le.Name, &le.Type, &le.Tag,
&le.DistroType, &le.DistroRelease, &le.KernelRelease,
&le.Distro.ID, &le.Distro.Release, &le.KernelRelease,
&internalErr,
&le.Build.Ok, &le.Run.Ok, &le.Test.Ok,
&le.KernelPanic, &le.KilledByTimeout,
)
@ -222,6 +235,8 @@ func getAllArtifactLogs(db *sql.DB, tag string, num int, ka config.Artifact) (
return
}
le.InternalErrorString = internalErr.String
if tag == "" || tag == le.Tag {
les = append(les, le)
}
@ -233,6 +248,7 @@ func getAllArtifactLogs(db *sql.DB, tag string, num int, ka config.Artifact) (
func getLogByID(db *sql.DB, id int) (le logEntry, err error) {
stmt, err := db.Prepare("SELECT id, time, name, type, tag, " +
"distro_type, distro_release, kernel_release, " +
"internal_err, " +
"build_ok, run_ok, test_ok, " +
"build_output, run_output, test_output, " +
"qemu_stdout, qemu_stderr, " +
@ -243,32 +259,48 @@ func getLogByID(db *sql.DB, id int) (le logEntry, err error) {
}
defer stmt.Close()
var internalErr sql.NullString
err = stmt.QueryRow(id).Scan(&le.ID, &le.Timestamp,
&le.Name, &le.Type, &le.Tag,
&le.DistroType, &le.DistroRelease, &le.KernelRelease,
&le.Distro.ID, &le.Distro.Release, &le.KernelRelease,
&internalErr,
&le.Build.Ok, &le.Run.Ok, &le.Test.Ok,
&le.Build.Output, &le.Run.Output, &le.Test.Output,
&le.Stdout, &le.Stderr,
&le.KernelPanic, &le.KilledByTimeout,
)
if err != nil {
return
}
le.InternalErrorString = internalErr.String
return
}
func getLastLog(db *sql.DB) (le logEntry, err error) {
var internalErr sql.NullString
err = db.QueryRow("SELECT MAX(id), time, name, type, tag, "+
"distro_type, distro_release, kernel_release, "+
"internal_err, "+
"build_ok, run_ok, test_ok, "+
"build_output, run_output, test_output, "+
"qemu_stdout, qemu_stderr, "+
"kernel_panic, timeout_kill "+
"FROM log").Scan(&le.ID, &le.Timestamp,
&le.Name, &le.Type, &le.Tag,
&le.DistroType, &le.DistroRelease, &le.KernelRelease,
&le.Distro.ID, &le.Distro.Release, &le.KernelRelease,
&internalErr,
&le.Build.Ok, &le.Run.Ok, &le.Test.Ok,
&le.Build.Output, &le.Run.Output, &le.Test.Output,
&le.Stdout, &le.Stderr,
&le.KernelPanic, &le.KilledByTimeout,
)
if err != nil {
return
}
le.InternalErrorString = internalErr.String
return
}
@ -323,10 +355,23 @@ func openDatabase(path string) (db *sql.DB, err error) {
}
version = 2
} else if version == 2 {
_, err = db.Exec(`ALTER TABLE log ADD internal_err TEXT`)
if err != nil {
return
}
err = metaSetValue(db, versionField, "3")
if err != nil {
return
}
version = 3
}
if version != currentDatabaseVersion {
err = fmt.Errorf("Database is not supported (%d instead of %d)",
err = fmt.Errorf("database is not supported (%d instead of %d)",
version, currentDatabaseVersion)
return
}

View File

@ -2,12 +2,11 @@
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package main
package cmd
import (
"errors"
"fmt"
"io/ioutil"
"os"
"strings"
"time"
@ -15,7 +14,10 @@ import (
"github.com/rs/zerolog/log"
"gopkg.in/logrusorgru/aurora.v2"
"code.dumpstack.io/tools/out-of-tree/artifact"
"code.dumpstack.io/tools/out-of-tree/config"
"code.dumpstack.io/tools/out-of-tree/distro"
"code.dumpstack.io/tools/out-of-tree/fs"
"code.dumpstack.io/tools/out-of-tree/qemu"
)
@ -52,7 +54,7 @@ func (cmd *DebugCmd) Run(g *Globals) (err error) {
} else {
configPath = cmd.ArtifactConfig
}
ka, err := config.ReadArtifactConfig(configPath)
ka, err := artifact.Artifact{}.Read(configPath)
if err != nil {
return
}
@ -144,7 +146,7 @@ func (cmd *DebugCmd) Run(g *Globals) (err error) {
}
defer q.Stop()
tmp, err := ioutil.TempDir("/tmp/", "out-of-tree_")
tmp, err := fs.TempDir()
if err != nil {
return
}
@ -157,14 +159,14 @@ func (cmd *DebugCmd) Run(g *Globals) (err error) {
if ka.StandardModules {
// Module depends on one of the standard modules
err = copyStandardModules(q, ki)
err = artifact.CopyStandardModules(q, ki)
if err != nil {
log.Print(err)
return
}
}
err = preloadModules(q, ka, ki, g.Config.Docker.Timeout.Duration)
err = artifact.PreloadModules(q, ka, ki, g.Config.Docker.Timeout.Duration)
if err != nil {
log.Print(err)
return
@ -172,21 +174,21 @@ func (cmd *DebugCmd) Run(g *Globals) (err error) {
var buildDir, outFile, output, remoteFile string
if ka.Type == config.Script {
if ka.Type == artifact.Script {
err = q.CopyFile("root", ka.Script, ka.Script)
if err != nil {
return
}
} else {
buildDir, outFile, output, err = build(tmp, ka, ki, g.Config.Docker.Timeout.Duration)
buildDir, outFile, output, err = artifact.Build(log.Logger, tmp, ka, ki, g.Config.Docker.Timeout.Duration)
if err != nil {
log.Print(err, output)
return
}
remoteFile = "/tmp/exploit"
if ka.Type == config.KernelModule {
remoteFile = "/tmp/module.ko"
remoteFile = "/tmp/" + strings.Replace(ka.Name, " ", "_", -1)
if ka.Type == artifact.KernelModule {
remoteFile += ".ko"
}
err = q.CopyFile("user", outFile, remoteFile)
@ -198,7 +200,9 @@ func (cmd *DebugCmd) Run(g *Globals) (err error) {
// Copy all test files to the remote machine
for _, f := range ka.TestFiles {
if f.Local[0] != '/' {
f.Local = buildDir + "/" + f.Local
if buildDir != "" {
f.Local = buildDir + "/" + f.Local
}
}
err = q.CopyFile(f.User, f.Local, f.Remote)
if err != nil {
@ -219,15 +223,15 @@ func (cmd *DebugCmd) Run(g *Globals) (err error) {
return
}
func firstSupported(kcfg config.KernelConfig, ka config.Artifact,
kernel string) (ki config.KernelInfo, err error) {
func firstSupported(kcfg config.KernelConfig, ka artifact.Artifact,
kernel string) (ki distro.KernelInfo, err error) {
km, err := kernelMask(kernel)
if err != nil {
return
}
ka.SupportedKernels = []config.KernelMask{km}
ka.Targets = []artifact.Target{km}
for _, ki = range kcfg.Kernels {
var supported bool
@ -237,7 +241,7 @@ func firstSupported(kcfg config.KernelConfig, ka config.Artifact,
}
}
err = errors.New("No supported kernel found")
err = errors.New("no supported kernel found")
return
}

216
cmd/distro.go Normal file
View File

@ -0,0 +1,216 @@
package cmd
import (
"context"
"fmt"
"math"
"os"
"path/filepath"
"regexp"
"time"
"github.com/cavaliergopher/grab/v3"
"github.com/davecgh/go-spew/spew"
"github.com/remeh/sizedwaitgroup"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/cache"
"code.dumpstack.io/tools/out-of-tree/distro"
"code.dumpstack.io/tools/out-of-tree/distro/debian"
"code.dumpstack.io/tools/out-of-tree/distro/debian/snapshot"
"code.dumpstack.io/tools/out-of-tree/fs"
)
type DistroCmd struct {
List DistroListCmd `cmd:"" help:"list available distros"`
Debian DebianCmd `cmd:"" hidden:""`
}
type DebianCmd struct {
Cache DebianCacheCmd `cmd:"" help:"populate cache"`
Fetch DebianFetchCmd `cmd:"" help:"download deb packages"`
Limit int `help:"limit amount of kernels to fetch"`
Regex string `help:"match deb pkg names by regex" default:".*"`
}
type DebianCacheCmd struct {
Path string `help:"path to cache"`
Refetch int `help:"days before refetch versions without deb package" default:"7"`
UpdateRelease bool `help:"update release data"`
UpdateKbuild bool `help:"update kbuild package"`
Dump bool `help:"dump cache"`
}
func (cmd *DebianCacheCmd) Run(dcmd *DebianCmd) (err error) {
if cmd.Path != "" {
debian.CachePath = cmd.Path
}
debian.RefetchDays = cmd.Refetch
log.Info().Msg("Fetching kernels...")
if dcmd.Limit == 0 {
dcmd.Limit = math.MaxInt32
}
mode := debian.NoMode
if cmd.UpdateRelease {
mode |= debian.UpdateRelease
}
if cmd.UpdateKbuild {
mode |= debian.UpdateKbuild
}
kernels, err := debian.GetKernelsWithLimit(dcmd.Limit, mode)
if err != nil {
log.Error().Err(err).Msg("")
return
}
if cmd.Dump {
re, err := regexp.Compile(dcmd.Regex)
if err != nil {
log.Fatal().Err(err).Msg("regex")
}
for _, kernel := range kernels {
if !re.MatchString(kernel.Image.Deb.Name) {
continue
}
fmt.Println(spew.Sdump(kernel))
}
}
log.Info().Msg("Success")
return
}
type DebianFetchCmd struct {
Path string `help:"path to download directory" type:"existingdir" default:"./"`
IgnoreMirror bool `help:"ignore check if packages on the mirror"`
Max int `help:"do not download more than X" default:"100500"`
Threads int `help:"parallel download threads" default:"8"`
Timeout time.Duration `help:"timeout for each download" default:"1m"`
swg sizedwaitgroup.SizedWaitGroup
hasResults bool
}
func (cmd *DebianFetchCmd) fetch(pkg snapshot.Package) {
flog := log.With().
Str("pkg", pkg.Deb.Name).
Logger()
defer cmd.swg.Done()
if !cmd.IgnoreMirror {
flog.Debug().Msg("check mirror")
found, _ := cache.PackageURL(distro.Debian, pkg.Deb.URL)
if found {
flog.Debug().Msg("found on the mirror")
return
}
}
target := filepath.Join(cmd.Path, filepath.Base(pkg.Deb.URL))
if fs.PathExists(target) {
flog.Debug().Msg("already exists")
return
}
tmp, err := os.MkdirTemp(cmd.Path, "tmp-")
if err != nil {
flog.Fatal().Err(err).Msg("mkdir")
return
}
defer os.RemoveAll(tmp)
flog.Info().Msg("fetch")
flog.Debug().Msg(pkg.Deb.URL)
ctx, cancel := context.WithTimeout(context.Background(), cmd.Timeout)
defer cancel()
req, err := grab.NewRequest(tmp, pkg.Deb.URL)
if err != nil {
flog.Warn().Err(err).Msg("cannot create request")
return
}
req = req.WithContext(ctx)
resp := grab.DefaultClient.Do(req)
if err := resp.Err(); err != nil {
flog.Warn().Err(err).Msg("request cancelled")
return
}
err = os.Rename(resp.Filename, target)
if err != nil {
flog.Fatal().Err(err).Msg("mv")
}
cmd.hasResults = true
cmd.Max--
}
func (cmd *DebianFetchCmd) Run(dcmd *DebianCmd) (err error) {
re, err := regexp.Compile(dcmd.Regex)
if err != nil {
log.Fatal().Err(err).Msg("regex")
}
log.Info().Msg("will not download packages that exist on the mirror")
log.Info().Msg("use --ignore-mirror if you really need it")
if dcmd.Limit == 0 {
dcmd.Limit = math.MaxInt32
}
kernels, err := debian.GetKernelsWithLimit(dcmd.Limit, debian.NoMode)
if err != nil {
log.Error().Err(err).Msg("")
return
}
var packages []snapshot.Package
for _, kernel := range kernels {
for _, pkg := range kernel.Packages() {
if !re.MatchString(pkg.Deb.Name) {
continue
}
packages = append(packages, pkg)
}
}
cmd.swg = sizedwaitgroup.New(cmd.Threads)
for _, pkg := range packages {
if cmd.Max <= 0 {
break
}
cmd.swg.Add()
go cmd.fetch(pkg)
}
cmd.swg.Wait()
if !cmd.hasResults {
log.Fatal().Msg("no packages found to download")
}
return
}
type DistroListCmd struct{}
func (cmd *DistroListCmd) Run() (err error) {
for _, d := range distro.List() {
fmt.Println(d.ID, d.Release)
}
return
}

View File

@ -2,14 +2,15 @@
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package main
package cmd
import (
"fmt"
"github.com/naoina/toml"
"code.dumpstack.io/tools/out-of-tree/config"
"code.dumpstack.io/tools/out-of-tree/artifact"
"code.dumpstack.io/tools/out-of-tree/distro"
)
type GenCmd struct {
@ -19,27 +20,30 @@ type GenCmd struct {
func (cmd *GenCmd) Run(g *Globals) (err error) {
switch cmd.Type {
case "module":
err = genConfig(config.KernelModule)
err = genConfig(artifact.KernelModule)
case "exploit":
err = genConfig(config.KernelExploit)
err = genConfig(artifact.KernelExploit)
}
return
}
func genConfig(at config.ArtifactType) (err error) {
a := config.Artifact{
func genConfig(at artifact.ArtifactType) (err error) {
a := artifact.Artifact{
Name: "Put name here",
Type: at,
}
a.SupportedKernels = append(a.SupportedKernels, config.KernelMask{
DistroType: config.Ubuntu,
DistroRelease: "18.04",
ReleaseMask: ".*",
a.Targets = append(a.Targets, artifact.Target{
Distro: distro.Distro{ID: distro.Ubuntu, Release: "18.04"},
Kernel: artifact.Kernel{Regex: ".*"},
})
a.Preload = append(a.Preload, config.PreloadModule{
a.Targets = append(a.Targets, artifact.Target{
Distro: distro.Distro{ID: distro.Debian, Release: "8"},
Kernel: artifact.Kernel{Regex: ".*"},
})
a.Preload = append(a.Preload, artifact.PreloadModule{
Repo: "Repo name (e.g. https://github.com/openwall/lkrg)",
})
a.Patches = append(a.Patches, config.Patch{
a.Patches = append(a.Patches, artifact.Patch{
Path: "/path/to/profiling.patch",
})

18
cmd/globals.go Normal file
View File

@ -0,0 +1,18 @@
package cmd
import (
"net/url"
"code.dumpstack.io/tools/out-of-tree/config"
)
type Globals struct {
Config config.OutOfTree `help:"path to out-of-tree configuration" default:"~/.out-of-tree/out-of-tree.toml"`
WorkDir string `help:"path to work directory" default:"./" type:"path" existingdir:""`
CacheURL url.URL
Remote bool `help:"run at remote server"`
RemoteAddr string `default:"localhost:63527"`
}

114
cmd/images.go Normal file
View File

@ -0,0 +1,114 @@
// Copyright 2023 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package cmd
import (
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"time"
"code.dumpstack.io/tools/out-of-tree/config"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
"code.dumpstack.io/tools/out-of-tree/distro"
"code.dumpstack.io/tools/out-of-tree/fs"
"code.dumpstack.io/tools/out-of-tree/qemu"
)
type ImageCmd struct {
List ImageListCmd `cmd:"" help:"list images"`
Edit ImageEditCmd `cmd:"" help:"edit image"`
}
type ImageListCmd struct{}
func (cmd *ImageListCmd) Run(g *Globals) (err error) {
entries, err := os.ReadDir(dotfiles.Dir("images"))
if err != nil {
return
}
for _, e := range entries {
fmt.Println(e.Name())
}
return
}
type ImageEditCmd struct {
Name string `help:"image name" required:""`
DryRun bool `help:"do nothing, just print commands"`
}
func (cmd *ImageEditCmd) Run(g *Globals) (err error) {
image := filepath.Join(dotfiles.Dir("images"), cmd.Name)
if !fs.PathExists(image) {
fmt.Println("image does not exist")
}
kcfg, err := config.ReadKernelConfig(g.Config.Kernels)
if err != nil {
return
}
if len(kcfg.Kernels) == 0 {
return errors.New("no kernels found")
}
ki := distro.KernelInfo{}
for _, k := range kcfg.Kernels {
if k.RootFS == image {
ki = k
break
}
}
kernel := qemu.Kernel{
KernelPath: ki.KernelPath,
InitrdPath: ki.InitrdPath,
}
q, err := qemu.NewSystem(qemu.X86x64, kernel, ki.RootFS)
q.Mutable = true
if cmd.DryRun {
s := q.Executable()
for _, arg := range q.Args() {
if strings.Contains(arg, " ") ||
strings.Contains(arg, ",") {
s += fmt.Sprintf(` "%s"`, arg)
} else {
s += fmt.Sprintf(" %s", arg)
}
}
fmt.Println(s)
fmt.Println(q.GetSSHCommand())
return
}
err = q.Start()
if err != nil {
fmt.Println("Qemu start error:", err)
return
}
defer q.Stop()
fmt.Print("ssh command:\n\n\t")
fmt.Println(q.GetSSHCommand())
fmt.Print("\npress enter to stop")
fmt.Scanln()
q.Command("root", "poweroff")
for !q.Died {
time.Sleep(time.Second)
}
return
}

448
cmd/kernel.go Normal file
View File

@ -0,0 +1,448 @@
// Copyright 2023 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package cmd
import (
"errors"
"fmt"
"os"
"strings"
"time"
"github.com/cavaliergopher/grab/v3"
"github.com/naoina/toml"
"github.com/remeh/sizedwaitgroup"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/artifact"
"code.dumpstack.io/tools/out-of-tree/cache"
"code.dumpstack.io/tools/out-of-tree/config"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
"code.dumpstack.io/tools/out-of-tree/container"
"code.dumpstack.io/tools/out-of-tree/distro"
"code.dumpstack.io/tools/out-of-tree/fs"
"code.dumpstack.io/tools/out-of-tree/kernel"
)
type KernelCmd struct {
NoDownload bool `help:"do not download qemu image while kernel generation"`
UseHost bool `help:"also use host kernels"`
Force bool `help:"force reinstall kernel"`
NoHeaders bool `help:"do not install kernel headers"`
Shuffle bool `help:"randomize kernels installation order"`
Retries int `help:"amount of tries for each kernel" default:"2"`
Threads int `help:"threads for parallel installation" default:"1"`
Update bool `help:"update container"`
ContainerCache bool `help:"try prebuilt container images first" default:"true" negatable:""`
Max int `help:"maximum kernels to download" default:"100500"`
NoPrune bool `help:"do not remove dangling or unused images from local storage after build"`
NoCfgRegen bool `help:"do not update kernels.toml"`
ContainerTimeout time.Duration `help:"container timeout"`
List KernelListCmd `cmd:"" help:"list kernels"`
ListRemote KernelListRemoteCmd `cmd:"" help:"list remote kernels"`
Autogen KernelAutogenCmd `cmd:"" help:"generate kernels based on the current config"`
Genall KernelGenallCmd `cmd:"" help:"generate all kernels for distro"`
Install KernelInstallCmd `cmd:"" help:"install specific kernel"`
ConfigRegen KernelConfigRegenCmd `cmd:"" help:"regenerate config"`
shutdown bool
kcfg config.KernelConfig
stats struct {
overall int
success int
}
}
func (cmd KernelCmd) UpdateConfig() (err error) {
if cmd.stats.success != cmd.stats.overall {
log.Warn().Msgf("%d kernels failed to install",
cmd.stats.overall-cmd.stats.success)
}
if cmd.NoCfgRegen {
log.Info().Msgf("kernels.toml is not updated")
return
}
log.Info().Msgf("updating kernels.toml")
kcfg := config.KernelConfig{}
if cmd.UseHost {
// Get host kernels
kcfg.Kernels, err = kernel.GenHostKernels(!cmd.NoDownload)
if err != nil {
return
}
}
for _, dist := range distro.List() {
var kernels []distro.KernelInfo
kernels, err = dist.Kernels()
if err != nil {
return
}
kcfg.Kernels = append(kcfg.Kernels, kernels...)
}
buf, err := toml.Marshal(&kcfg)
if err != nil {
return
}
err = os.WriteFile(dotfiles.File("kernels.toml"), buf, os.ModePerm)
if err != nil {
return
}
log.Info().Msgf("kernels.toml successfully updated")
return
}
func (cmd *KernelCmd) GenKernel(km artifact.Target, pkg string) {
flog := log.With().
Str("kernel", pkg).
Str("distro", km.Distro.String()).
Logger()
reinstall := false
for _, kinfo := range cmd.kcfg.Kernels {
if !km.Distro.Equal(kinfo.Distro) {
continue
}
var found bool
if kinfo.Distro.ID == distro.Debian { // FIXME
found = pkg == kinfo.Package
} else if kinfo.Distro.ID == distro.OpenSUSE {
found = strings.Contains(pkg, kinfo.KernelRelease)
} else {
found = strings.Contains(pkg, kinfo.KernelVersion)
}
if found {
if !cmd.Force {
flog.Info().Msg("already installed")
return
}
reinstall = true
break
}
}
if reinstall {
flog.Info().Msg("reinstall")
} else {
flog.Info().Msg("install")
}
cmd.stats.overall += 1
var attempt int
for {
attempt++
if cmd.shutdown {
return
}
err := km.Distro.Install(pkg, !cmd.NoHeaders)
if err == nil {
cmd.stats.success += 1
flog.Info().Msg("success")
break
} else if attempt >= cmd.Retries {
flog.Error().Err(err).Msg("install kernel")
flog.Debug().Msg("skip")
break
} else {
flog.Warn().Err(err).Msg("install kernel")
time.Sleep(time.Second)
flog.Info().Msg("retry")
}
}
}
func (cmd *KernelCmd) fetchContainerCache(c container.Container) {
if !cmd.ContainerCache {
return
}
if c.Exist() {
return
}
tmp, err := fs.TempDir()
if err != nil {
return
}
defer os.RemoveAll(tmp)
resp, err := grab.Get(tmp, cache.ContainerURL(c.Name()))
if err != nil {
return
}
defer os.Remove(resp.Filename)
err = container.Load(resp.Filename, c.Name())
if err == nil {
log.Info().Msgf("use prebuilt container %s", c.Name())
}
}
func (cmd *KernelCmd) Generate(g *Globals, km artifact.Target) (err error) {
defer func() {
if err != nil {
log.Warn().Err(err).Msg("")
} else {
log.Debug().Err(err).Msg("")
}
}()
if cmd.Update {
container.UseCache = false
}
if cmd.NoPrune {
container.Prune = false
}
cmd.kcfg, err = config.ReadKernelConfig(g.Config.Kernels)
if err != nil {
log.Debug().Err(err).Msg("read kernels config")
}
container.Commands = g.Config.Docker.Commands
container.Registry = g.Config.Docker.Registry
container.Timeout = g.Config.Docker.Timeout.Duration
if cmd.ContainerTimeout != 0 {
container.Timeout = cmd.ContainerTimeout
}
log.Info().Msgf("Generating for target %v", km)
_, err = kernel.GenRootfsImage(km.Distro.RootFS(), !cmd.NoDownload)
if err != nil || cmd.shutdown {
return
}
c, err := container.New(km.Distro)
if err != nil || cmd.shutdown {
return
}
cmd.fetchContainerCache(c)
pkgs, err := kernel.MatchPackages(km)
if err != nil || cmd.shutdown {
return
}
if cmd.Shuffle {
pkgs = kernel.ShuffleStrings(pkgs)
}
swg := sizedwaitgroup.New(cmd.Threads)
for i, pkg := range pkgs {
if cmd.shutdown {
err = nil
return
}
swg.Add()
if cmd.shutdown {
err = nil
swg.Done()
return
}
if cmd.stats.success >= cmd.Max {
log.Print("Max is reached")
swg.Done()
break
}
log.Info().Msgf("%d/%d %s", i+1, len(pkgs), pkg)
go func(p string) {
defer swg.Done()
cmd.GenKernel(km, p)
}(pkg)
}
swg.Wait()
return
}
type KernelListCmd struct{}
func (cmd *KernelListCmd) Run(g *Globals) (err error) {
kcfg, err := config.ReadKernelConfig(g.Config.Kernels)
if err != nil {
log.Debug().Err(err).Msg("read kernel config")
}
if len(kcfg.Kernels) == 0 {
return errors.New("no kernels found")
}
for _, k := range kcfg.Kernels {
fmt.Println(k.Distro.ID, k.Distro.Release, k.KernelRelease)
}
return
}
type KernelListRemoteCmd struct {
Distro string `required:"" help:"distribution"`
Ver string `help:"distro version"`
}
func (cmd *KernelListRemoteCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
if kernelCmd.Update {
container.UseCache = false
}
if kernelCmd.NoPrune {
container.Prune = false
}
distroType, err := distro.NewID(cmd.Distro)
if err != nil {
return
}
km := artifact.Target{
Distro: distro.Distro{ID: distroType, Release: cmd.Ver},
Kernel: artifact.Kernel{Regex: ".*"},
}
_, err = kernel.GenRootfsImage(km.Distro.RootFS(), false)
if err != nil {
return
}
container.Registry = g.Config.Docker.Registry
container.Commands = g.Config.Docker.Commands
c, err := container.New(km.Distro)
if err != nil {
return
}
kernelCmd.fetchContainerCache(c)
pkgs, err := kernel.MatchPackages(km)
// error check skipped on purpose
for _, k := range pkgs {
fmt.Println(k)
}
return
}
type KernelAutogenCmd struct{}
func (cmd *KernelAutogenCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
ka, err := artifact.Artifact{}.Read(g.WorkDir + "/.out-of-tree.toml")
if err != nil {
return
}
kernel.SetSigintHandler(&kernelCmd.shutdown)
for _, sk := range ka.Targets {
if sk.Distro.Release == "" {
err = errors.New("please set distro_release")
return
}
err = kernelCmd.Generate(g, sk)
if err != nil {
return
}
if kernelCmd.shutdown {
break
}
}
return kernelCmd.UpdateConfig()
}
type KernelGenallCmd struct {
Distro string `help:"distribution"`
Ver string `help:"distro version"`
}
func (cmd *KernelGenallCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
distroType, err := distro.NewID(cmd.Distro)
if err != nil {
return
}
kernel.SetSigintHandler(&kernelCmd.shutdown)
for _, dist := range distro.List() {
if kernelCmd.shutdown {
break
}
if distroType != distro.None && distroType != dist.ID {
continue
}
if cmd.Ver != "" && dist.Release != cmd.Ver {
continue
}
target := artifact.Target{
Distro: dist,
Kernel: artifact.Kernel{Regex: ".*"},
}
err = kernelCmd.Generate(g, target)
if err != nil {
continue
}
}
return kernelCmd.UpdateConfig()
}
type KernelInstallCmd struct {
Distro string `required:"" help:"distribution"`
Ver string `required:"" help:"distro version"`
Kernel string `required:"" help:"kernel release mask"`
}
func (cmd *KernelInstallCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
distroType, err := distro.NewID(cmd.Distro)
if err != nil {
return
}
kernel.SetSigintHandler(&kernelCmd.shutdown)
km := artifact.Target{
Distro: distro.Distro{ID: distroType, Release: cmd.Ver},
Kernel: artifact.Kernel{Regex: cmd.Kernel},
}
err = kernelCmd.Generate(g, km)
if err != nil {
return
}
return kernelCmd.UpdateConfig()
}
type KernelConfigRegenCmd struct{}
func (cmd *KernelConfigRegenCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
return kernelCmd.UpdateConfig()
}

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package main
package cmd
import (
"database/sql"
@ -15,7 +15,7 @@ import (
"github.com/rs/zerolog/log"
"gopkg.in/logrusorgru/aurora.v2"
"code.dumpstack.io/tools/out-of-tree/config"
"code.dumpstack.io/tools/out-of-tree/artifact"
)
type LogCmd struct {
@ -40,7 +40,7 @@ func (cmd *LogQueryCmd) Run(g *Globals) (err error) {
var les []logEntry
ka, kaErr := config.ReadArtifactConfig(g.WorkDir + "/.out-of-tree.toml")
ka, kaErr := artifact.Artifact{}.Read(g.WorkDir + "/.out-of-tree.toml")
if kaErr == nil {
log.Print(".out-of-tree.toml found, filter by artifact name")
les, err = getAllArtifactLogs(db, cmd.Tag, cmd.Num, ka)
@ -106,7 +106,7 @@ func (cmd *LogDumpCmd) Run(g *Globals) (err error) {
}
fmt.Println("ID:", l.ID)
fmt.Println("Date:", l.Timestamp)
fmt.Println("Date:", l.Timestamp.Format("2006-01-02 15:04"))
fmt.Println("Tag:", l.Tag)
fmt.Println()
@ -114,12 +114,12 @@ func (cmd *LogDumpCmd) Run(g *Globals) (err error) {
fmt.Println("Name:", l.Name)
fmt.Println()
fmt.Println("Distro:", l.DistroType.String(), l.DistroRelease)
fmt.Println("Distro:", l.Distro.ID.String(), l.Distro.Release)
fmt.Println("Kernel:", l.KernelRelease)
fmt.Println()
fmt.Println("Build ok:", l.Build.Ok)
if l.Type == config.KernelModule {
if l.Type == artifact.KernelModule {
fmt.Println("Insmod ok:", l.Run.Ok)
}
fmt.Println("Test ok:", l.Test.Ok)
@ -128,7 +128,7 @@ func (cmd *LogDumpCmd) Run(g *Globals) (err error) {
fmt.Printf("Build output:\n%s\n", l.Build.Output)
fmt.Println()
if l.Type == config.KernelModule {
if l.Type == artifact.KernelModule {
fmt.Printf("Insmod output:\n%s\n", l.Run.Output)
fmt.Println()
}
@ -207,24 +207,47 @@ func (cmd *LogMarkdownCmd) Run(g *Globals) (err error) {
return
}
func center(s string, w int) string {
return fmt.Sprintf("%[1]*s", -w, fmt.Sprintf("%[1]*s", (w+len(s))/2, s))
}
func genOkFailCentered(name string, ok bool) (aurv aurora.Value) {
name = center(name, 10)
if ok {
aurv = aurora.BgGreen(aurora.Black(name))
} else {
aurv = aurora.BgRed(aurora.White(aurora.Bold(name)))
}
return
}
func logLogEntry(l logEntry) {
distroInfo := fmt.Sprintf("%s-%s {%s}", l.DistroType,
l.DistroRelease, l.KernelRelease)
distroInfo := fmt.Sprintf("%s-%s {%s}", l.Distro.ID,
l.Distro.Release, l.KernelRelease)
artifactInfo := fmt.Sprintf("{[%s] %s}", l.Type, l.Name)
colored := ""
if l.Type == config.KernelExploit {
colored = aurora.Sprintf("[%4d %4s] [%s] %40s %40s: %s %s",
l.ID, l.Tag, l.Timestamp, artifactInfo, distroInfo,
genOkFail("BUILD", l.Build.Ok),
genOkFail("LPE", l.Test.Ok))
timestamp := l.Timestamp.Format("2006-01-02 15:04")
var status aurora.Value
if l.InternalErrorString != "" {
status = genOkFailCentered("INTERNAL", false)
} else if l.Type == artifact.KernelExploit {
if l.Build.Ok {
status = genOkFailCentered("LPE", l.Test.Ok)
} else {
status = genOkFailCentered("BUILD", l.Build.Ok)
}
} else {
colored = aurora.Sprintf("[%4d %4s] [%s] %40s %40s: %s %s %s",
l.ID, l.Tag, l.Timestamp, artifactInfo, distroInfo,
genOkFail("BUILD", l.Build.Ok),
genOkFail("INSMOD", l.Run.Ok),
genOkFail("TEST", l.Test.Ok))
if l.Build.Ok {
if l.Run.Ok {
status = genOkFailCentered("TEST", l.Test.Ok)
} else {
status = genOkFailCentered("INSMOD", l.Run.Ok)
}
} else {
status = genOkFailCentered("BUILD", l.Build.Ok)
}
}
additional := ""
@ -234,11 +257,11 @@ func logLogEntry(l logEntry) {
additional = "(timeout)"
}
if additional != "" {
fmt.Println(colored, additional)
} else {
fmt.Println(colored)
}
colored := aurora.Sprintf("[%4d %4s] [%s] %s %-70s: %s %s",
l.ID, l.Tag, timestamp, artifactInfo, distroInfo, status,
additional)
fmt.Println(colored)
}
type runstat struct {
@ -250,7 +273,7 @@ func getStats(db *sql.DB, path, tag string) (
var les []logEntry
ka, kaErr := config.ReadArtifactConfig(path + "/.out-of-tree.toml")
ka, kaErr := artifact.Artifact{}.Read(path + "/.out-of-tree.toml")
if kaErr == nil {
les, err = getAllArtifactLogs(db, tag, -1, ka)
} else {
@ -263,17 +286,17 @@ func getStats(db *sql.DB, path, tag string) (
distros = make(map[string]map[string]map[string]runstat)
for _, l := range les {
_, ok := distros[l.DistroType.String()]
_, ok := distros[l.Distro.ID.String()]
if !ok {
distros[l.DistroType.String()] = make(map[string]map[string]runstat)
distros[l.Distro.ID.String()] = make(map[string]map[string]runstat)
}
_, ok = distros[l.DistroType.String()][l.DistroRelease]
_, ok = distros[l.Distro.ID.String()][l.Distro.Release]
if !ok {
distros[l.DistroType.String()][l.DistroRelease] = make(map[string]runstat)
distros[l.Distro.ID.String()][l.Distro.Release] = make(map[string]runstat)
}
rs := distros[l.DistroType.String()][l.DistroRelease][l.KernelRelease]
rs := distros[l.Distro.ID.String()][l.Distro.Release][l.KernelRelease]
rs.All++
if l.Build.Ok {
@ -292,7 +315,7 @@ func getStats(db *sql.DB, path, tag string) (
rs.Timeout++
}
distros[l.DistroType.String()][l.DistroRelease][l.KernelRelease] = rs
distros[l.Distro.ID.String()][l.Distro.Release][l.KernelRelease] = rs
}
return

View File

@ -2,13 +2,15 @@
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package main
package cmd
import (
"fmt"
"io/ioutil"
"os"
"time"
"code.dumpstack.io/tools/out-of-tree/fs"
"github.com/rs/zerolog/log"
)
@ -18,7 +20,7 @@ type PackCmd struct {
NoDownload bool `help:"do not download qemu image while kernel generation"`
ExploitRuns int64 `default:"4" help:"amount of runs of each exploit"`
KernelRuns int64 `default:"1" help:"amount of runs of each kernel"`
Max int64 `help:"download random kernels from set defined by regex in release_mask, but no more than X for each of release_mask" default:"1"`
Max int `help:"download random kernels from set defined by regex in release_mask, but no more than X for each of release_mask" default:"1"`
Threads int `help:"threads" default:"4"`
@ -33,7 +35,7 @@ func (cmd *PackCmd) Run(g *Globals) (err error) {
tag := fmt.Sprintf("pack_run_%d", time.Now().Unix())
log.Print("Tag:", tag)
files, err := ioutil.ReadDir(g.WorkDir)
files, err := os.ReadDir(g.WorkDir)
if err != nil {
return
}
@ -41,15 +43,17 @@ func (cmd *PackCmd) Run(g *Globals) (err error) {
for _, f := range files {
workPath := g.WorkDir + "/" + f.Name()
if !exists(workPath + "/.out-of-tree.toml") {
if !fs.PathExists(workPath + "/.out-of-tree.toml") {
continue
}
if cmd.Autogen {
err = KernelAutogenCmd{Max: cmd.Max}.Run(
autogen := KernelAutogenCmd{}
err = autogen.Run(
&KernelCmd{
NoDownload: cmd.NoDownload,
UseHost: cmd.UseHost,
Max: cmd.Max,
},
&Globals{
Config: g.Config,

601
cmd/pew.go Normal file
View File

@ -0,0 +1,601 @@
// Copyright 2023 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package cmd
import (
"database/sql"
"errors"
"fmt"
"io"
"math/rand"
"os"
"os/exec"
"strings"
"time"
"github.com/davecgh/go-spew/spew"
"github.com/google/uuid"
"github.com/remeh/sizedwaitgroup"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"gopkg.in/logrusorgru/aurora.v2"
"code.dumpstack.io/tools/out-of-tree/api"
"code.dumpstack.io/tools/out-of-tree/artifact"
"code.dumpstack.io/tools/out-of-tree/client"
"code.dumpstack.io/tools/out-of-tree/config"
"code.dumpstack.io/tools/out-of-tree/distro"
"code.dumpstack.io/tools/out-of-tree/qemu"
)
const pathDevNull = "/dev/null"
type LevelWriter struct {
io.Writer
Level zerolog.Level
}
func (lw *LevelWriter) WriteLevel(l zerolog.Level, p []byte) (n int, err error) {
if l >= lw.Level {
return lw.Writer.Write(p)
}
return len(p), nil
}
var ConsoleWriter, FileWriter LevelWriter
var LogLevel zerolog.Level
type runstate struct {
Overall, Success float64
InternalErrors int
}
var (
state runstate
)
func successRate(state runstate) float64 {
return state.Success / state.Overall
}
type PewCmd struct {
Max int64 `help:"test no more than X kernels" default:"100500"`
Runs int64 `help:"runs per each kernel" default:"1"`
Kernel string `help:"override kernel regex"`
RootFS string `help:"override rootfs image" type:"existingfile"`
Guess bool `help:"try all defined kernels"`
Shuffle bool `help:"randomize kernels test order"`
Binary string `help:"use binary, do not build"`
Test string `help:"override path for test"`
Dist string `help:"build result path" default:"/dev/null"`
Threads int `help:"threads" default:"1"`
Tag string `help:"log tagging"`
Timeout time.Duration `help:"timeout after tool will not spawn new tests"`
ArtifactConfig string `help:"path to artifact config" type:"path"`
QemuTimeout time.Duration `help:"timeout for qemu"`
QemuAfterStartTimeout time.Duration `help:"timeout after starting of the qemu vm before tests"`
DockerTimeout time.Duration `help:"timeout for docker"`
Threshold float64 `help:"reliablity threshold for exit code" default:"1.00"`
IncludeInternalErrors bool `help:"count internal errors as part of the success rate"`
Endless bool `help:"endless tests"`
EndlessTimeout time.Duration `help:"timeout between tests" default:"1m"`
EndlessStress string `help:"endless stress script" type:"existingfile"`
DB *sql.DB `kong:"-" json:"-"`
Kcfg config.KernelConfig `kong:"-" json:"-"`
TimeoutDeadline time.Time `kong:"-" json:"-"`
Watch bool `help:"watch job status"`
repoName string
commit string
useRemote bool
remoteAddr string
// UUID of the job set
groupUUID string
}
func (cmd *PewCmd) getRepoName(worktree string, ka artifact.Artifact) {
raw, err := exec.Command("git", "--work-tree="+worktree,
"rev-list", "--max-parents=0", "HEAD").CombinedOutput()
if err != nil {
log.Error().Err(err).Msg(string(raw))
return
}
cmd.repoName = fmt.Sprintf("%s-%s", ka.Name, string(raw[:7]))
}
func (cmd *PewCmd) syncRepo(worktree string, ka artifact.Artifact) (err error) {
c := client.Client{RemoteAddr: cmd.remoteAddr}
cmd.getRepoName(worktree, ka)
raw, err := exec.Command("git", "--work-tree="+worktree,
"rev-parse", "HEAD").CombinedOutput()
if err != nil {
return
}
cmd.commit = strings.TrimSuffix(string(raw), "\n")
_, err = c.GetRepo(cmd.repoName)
if err != nil && err != client.ErrRepoNotFound {
log.Error().Err(err).Msg("GetRepo API error")
return
}
if err == client.ErrRepoNotFound {
log.Warn().Msg("repo not found")
log.Info().Msg("add repo")
log.Warn().Msgf("%v", spew.Sdump(ka))
err = c.AddRepo(api.Repo{Name: cmd.repoName})
if err != nil {
return
}
}
err = c.PushRepo(api.Repo{Name: cmd.repoName, Path: worktree})
if err != nil {
log.Error().Err(err).Msg("push repo error")
return
}
return
}
func (cmd *PewCmd) Run(g *Globals) (err error) {
cmd.groupUUID = uuid.New().String()
log.Info().Str("group", cmd.groupUUID).Msg("")
cmd.useRemote = g.Remote
cmd.remoteAddr = g.RemoteAddr
if cmd.useRemote {
c := client.Client{RemoteAddr: cmd.remoteAddr}
cmd.Kcfg.Kernels, err = c.Kernels()
if err != nil {
log.Fatal().Err(err).Msg("read kernels config")
}
} else {
cmd.Kcfg, err = config.ReadKernelConfig(
g.Config.Kernels)
if err != nil {
log.Fatal().Err(err).Msg("read kernels config")
}
}
if cmd.Timeout != 0 {
log.Info().Msgf("Set global timeout to %s", cmd.Timeout)
cmd.TimeoutDeadline = time.Now().Add(cmd.Timeout)
}
cmd.DB, err = openDatabase(g.Config.Database)
if err != nil {
log.Fatal().Err(err).
Msgf("Cannot open database %s", g.Config.Database)
}
defer cmd.DB.Close()
var configPath string
if cmd.ArtifactConfig == "" {
configPath = g.WorkDir + "/.out-of-tree.toml"
} else {
configPath = cmd.ArtifactConfig
}
ka, err := artifact.Artifact{}.Read(configPath)
if err != nil {
return
}
if cmd.useRemote {
err = cmd.syncRepo(g.WorkDir, ka)
if err != nil {
return
}
}
if len(ka.Targets) == 0 || cmd.Guess {
log.Debug().Msg("will use all available targets")
for _, dist := range distro.List() {
ka.Targets = append(ka.Targets, artifact.Target{
Distro: dist,
Kernel: artifact.Kernel{
Regex: ".*",
},
})
}
}
if ka.SourcePath == "" {
ka.SourcePath = g.WorkDir
}
if cmd.Kernel != "" {
var km artifact.Target
km, err = kernelMask(cmd.Kernel)
if err != nil {
return
}
ka.Targets = []artifact.Target{km}
}
// TODO there was a lib for merge structures
ka.Qemu.Timeout.Duration = g.Config.Qemu.Timeout.Duration
ka.Docker.Timeout.Duration = g.Config.Docker.Timeout.Duration
if cmd.QemuTimeout != 0 {
log.Info().Msgf("Set qemu timeout to %s", cmd.QemuTimeout)
g.Config.Qemu.Timeout.Duration = cmd.QemuTimeout
ka.Qemu.Timeout.Duration = cmd.QemuTimeout
}
if cmd.DockerTimeout != 0 {
log.Info().Msgf("Set docker timeout to %s", cmd.DockerTimeout)
g.Config.Docker.Timeout.Duration = cmd.DockerTimeout
ka.Docker.Timeout.Duration = cmd.DockerTimeout
}
if cmd.Tag == "" {
cmd.Tag = fmt.Sprintf("%d", time.Now().Unix())
}
if !cmd.useRemote {
log.Info().Str("tag", cmd.Tag).Msg("")
}
err = cmd.performCI(ka)
if err != nil {
return
}
if cmd.useRemote {
return
}
if state.InternalErrors > 0 {
s := "not counted towards success rate"
if cmd.IncludeInternalErrors {
s = "included in success rate"
}
log.Warn().Msgf("%d internal errors "+
"(%s)", state.InternalErrors, s)
}
if cmd.IncludeInternalErrors {
state.Overall += float64(state.InternalErrors)
}
msg := fmt.Sprintf("Success rate: %.02f (%d/%d), Threshold: %.02f",
successRate(state),
int(state.Success), int(state.Overall),
cmd.Threshold)
if successRate(state) < cmd.Threshold {
log.Error().Msg(msg)
err = errors.New("reliability threshold not met")
} else {
log.Info().Msg(msg)
}
return
}
func (cmd PewCmd) watchJob(swg *sizedwaitgroup.SizedWaitGroup,
slog zerolog.Logger, uuid string) {
defer swg.Done() // FIXME
c := client.Client{RemoteAddr: cmd.remoteAddr}
var err error
var st api.Status
for {
st, err = c.JobStatus(uuid)
if err != nil {
slog.Error().Err(err).Msg("")
continue
}
if st == api.StatusSuccess || st == api.StatusFailure {
break
}
time.Sleep(time.Second)
}
switch st {
case api.StatusSuccess:
slog.Info().Msg("success")
case api.StatusFailure:
slog.Warn().Msg("failure")
}
}
func (cmd PewCmd) remote(swg *sizedwaitgroup.SizedWaitGroup,
ka artifact.Artifact, ki distro.KernelInfo) {
defer swg.Done()
slog := log.With().
Str("distro_type", ki.Distro.ID.String()).
Str("distro_release", ki.Distro.Release).
Str("kernel", ki.KernelRelease).
Logger()
job := api.Job{}
job.Group = cmd.groupUUID
job.RepoName = cmd.repoName
job.Commit = cmd.commit
job.Artifact = ka
job.Target = ki
c := client.Client{RemoteAddr: cmd.remoteAddr}
uuid, err := c.AddJob(job)
slog = slog.With().Str("uuid", uuid).Logger()
if err != nil {
slog.Error().Err(err).Msg("cannot add job")
return
}
slog.Info().Msg("add")
if cmd.Watch {
// FIXME dummy (almost)
go cmd.watchJob(swg, slog, uuid)
}
}
func (cmd PewCmd) testArtifact(swg *sizedwaitgroup.SizedWaitGroup,
ka artifact.Artifact, ki distro.KernelInfo) {
defer swg.Done()
logdir := "logs/" + cmd.Tag
err := os.MkdirAll(logdir, os.ModePerm)
if err != nil {
log.Error().Err(err).Msgf("mkdir %s", logdir)
return
}
logfile := fmt.Sprintf("logs/%s/%s-%s-%s.log",
cmd.Tag,
ki.Distro.ID.String(),
ki.Distro.Release,
ki.KernelRelease,
)
f, err := os.Create(logfile)
if err != nil {
log.Error().Err(err).Msgf("create %s", logfile)
return
}
defer f.Close()
slog := zerolog.New(zerolog.MultiLevelWriter(
&ConsoleWriter,
&FileWriter,
&zerolog.ConsoleWriter{
Out: f,
FieldsExclude: []string{
"distro_release",
"distro_type",
"kernel",
},
NoColor: true,
},
))
switch LogLevel {
case zerolog.TraceLevel, zerolog.DebugLevel:
slog = slog.With().Caller().Logger()
}
slog = slog.With().Timestamp().
Str("distro_type", ki.Distro.ID.String()).
Str("distro_release", ki.Distro.Release).
Str("kernel", ki.KernelRelease).
Logger()
ka.Process(slog, ki,
cmd.Endless, cmd.Binary, cmd.EndlessStress, cmd.EndlessTimeout,
func(q *qemu.System, ka artifact.Artifact, ki distro.KernelInfo, result *artifact.Result) {
dumpResult(q, ka, ki, result, cmd.Dist, cmd.Tag, cmd.Binary, cmd.DB)
},
)
}
func shuffleKernels(a []distro.KernelInfo) []distro.KernelInfo {
// FisherYates shuffle
for i := len(a) - 1; i > 0; i-- {
j := rand.Intn(i + 1)
a[i], a[j] = a[j], a[i]
}
return a
}
func (cmd PewCmd) process(swg *sizedwaitgroup.SizedWaitGroup,
ka artifact.Artifact, kernel distro.KernelInfo) {
if cmd.useRemote {
go cmd.remote(swg, ka, kernel)
} else {
go cmd.testArtifact(swg, ka, kernel)
}
}
func (cmd PewCmd) performCI(ka artifact.Artifact) (err error) {
found := false
max := cmd.Max
threadCounter := 0
swg := sizedwaitgroup.New(cmd.Threads)
if cmd.Shuffle {
cmd.Kcfg.Kernels = shuffleKernels(cmd.Kcfg.Kernels)
}
for _, kernel := range cmd.Kcfg.Kernels {
if max <= 0 {
break
}
var supported bool
supported, err = ka.Supported(kernel)
if err != nil {
return
}
if kernel.Blocklisted {
log.Debug().Str("kernel", kernel.KernelVersion).
Msgf("skip (blocklisted)")
continue
}
if cmd.RootFS != "" {
kernel.RootFS = cmd.RootFS
}
if supported {
found = true
max--
for i := int64(0); i < cmd.Runs; i++ {
if !cmd.TimeoutDeadline.IsZero() &&
time.Now().After(cmd.TimeoutDeadline) {
break
}
swg.Add()
if threadCounter < cmd.Threads {
time.Sleep(time.Second)
threadCounter++
}
go cmd.process(&swg, ka, kernel)
}
}
}
swg.Wait()
if !found {
err = errors.New("no supported kernels found")
}
return
}
func kernelMask(kernel string) (km artifact.Target, err error) {
parts := strings.Split(kernel, ":")
if len(parts) != 2 {
err = errors.New("kernel is not 'distroType:regex'")
return
}
dt, err := distro.NewID(parts[0])
if err != nil {
return
}
km = artifact.Target{
Distro: distro.Distro{ID: dt},
Kernel: artifact.Kernel{Regex: parts[1]},
}
return
}
func genOkFail(name string, ok bool) (aurv aurora.Value) {
s := " " + name
if name == "" {
s = ""
}
if ok {
s += " SUCCESS "
aurv = aurora.BgGreen(aurora.Black(s))
} else {
s += " FAILURE "
aurv = aurora.BgRed(aurora.White(aurora.Bold(s)))
}
return
}
func dumpResult(q *qemu.System, ka artifact.Artifact, ki distro.KernelInfo,
res *artifact.Result, dist, tag, binary string, db *sql.DB) {
// TODO refactor
if res.InternalError != nil {
q.Log.Warn().Err(res.InternalError).
Str("panic", fmt.Sprintf("%v", q.KernelPanic)).
Str("timeout", fmt.Sprintf("%v", q.KilledByTimeout)).
Msg("internal")
res.InternalErrorString = res.InternalError.Error()
state.InternalErrors += 1
} else {
colored := ""
state.Overall += 1
if res.Test.Ok {
state.Success += 1
}
switch ka.Type {
case artifact.KernelExploit:
colored = aurora.Sprintf("%s %s",
genOkFail("BUILD", res.Build.Ok),
genOkFail("LPE", res.Test.Ok))
case artifact.KernelModule:
colored = aurora.Sprintf("%s %s %s",
genOkFail("BUILD", res.Build.Ok),
genOkFail("INSMOD", res.Run.Ok),
genOkFail("TEST", res.Test.Ok))
case artifact.Script:
colored = aurora.Sprintf("%s",
genOkFail("", res.Test.Ok))
}
additional := ""
if q.KernelPanic {
additional = "(panic)"
} else if q.KilledByTimeout {
additional = "(timeout)"
}
if additional != "" {
q.Log.Info().Msgf("%v %v", colored, additional)
} else {
q.Log.Info().Msgf("%v", colored)
}
}
err := addToLog(db, q, ka, ki, res, tag)
if err != nil {
q.Log.Warn().Err(err).Msgf("[db] addToLog (%v)", ka)
}
if binary == "" && dist != pathDevNull {
err = os.MkdirAll(dist, os.ModePerm)
if err != nil {
log.Warn().Err(err).Msgf("os.MkdirAll (%v)", ka)
}
path := fmt.Sprintf("%s/%s-%s-%s", dist, ki.Distro.ID,
ki.Distro.Release, ki.KernelRelease)
if ka.Type != artifact.KernelExploit {
path += ".ko"
}
err = artifact.CopyFile(res.BuildArtifact, path)
if err != nil {
log.Warn().Err(err).Msgf("copy file (%v)", ka)
}
}
}

View File

@ -5,286 +5,17 @@
package config
import (
"errors"
"fmt"
"io/ioutil"
"io"
"os"
"regexp"
"strconv"
"strings"
"time"
"code.dumpstack.io/tools/out-of-tree/distro"
"github.com/naoina/toml"
)
type kernel struct {
Version []int
Major []int
Minor []int
Patch []int
}
// KernelMask defines the kernel
type KernelMask struct {
DistroType DistroType
DistroRelease string // 18.04/7.4.1708/9.1
ReleaseMask string
// Overrides ReleaseMask
Kernel kernel
}
// DockerName is returns stable name for docker container
func (km KernelMask) DockerName() string {
distro := strings.ToLower(km.DistroType.String())
release := strings.Replace(km.DistroRelease, ".", "__", -1)
return fmt.Sprintf("out_of_tree_%s_%s", distro, release)
}
// ArtifactType is the kernel module or exploit
type ArtifactType int
const (
// KernelModule is any kind of kernel module
KernelModule ArtifactType = iota
// KernelExploit is the privilege escalation exploit
KernelExploit
// Script for information gathering or automation
Script
)
func (at ArtifactType) String() string {
return [...]string{"module", "exploit", "script"}[at]
}
// UnmarshalTOML is for support github.com/naoina/toml
func (at *ArtifactType) UnmarshalTOML(data []byte) (err error) {
stype := strings.Trim(string(data), `"`)
stypelower := strings.ToLower(stype)
if strings.Contains(stypelower, "module") {
*at = KernelModule
} else if strings.Contains(stypelower, "exploit") {
*at = KernelExploit
} else if strings.Contains(stypelower, "script") {
*at = Script
} else {
err = fmt.Errorf("Type %s is unsupported", stype)
}
return
}
// MarshalTOML is for support github.com/naoina/toml
func (at ArtifactType) MarshalTOML() (data []byte, err error) {
s := ""
switch at {
case KernelModule:
s = "module"
case KernelExploit:
s = "exploit"
case Script:
s = "script"
default:
err = fmt.Errorf("Cannot marshal %d", at)
}
data = []byte(`"` + s + `"`)
return
}
// Duration type with toml unmarshalling support
type Duration struct {
time.Duration
}
// UnmarshalTOML for Duration
func (d *Duration) UnmarshalTOML(data []byte) (err error) {
duration := strings.Replace(string(data), "\"", "", -1)
d.Duration, err = time.ParseDuration(duration)
return
}
// MarshalTOML for Duration
func (d Duration) MarshalTOML() (data []byte, err error) {
data = []byte(`"` + d.Duration.String() + `"`)
return
}
type PreloadModule struct {
Repo string
Path string
TimeoutAfterLoad Duration
}
// Extra test files to copy over
type FileTransfer struct {
User string
Local string
Remote string
}
type Patch struct {
Path string
Source string
Script string
}
// Artifact is for .out-of-tree.toml
type Artifact struct {
Name string
Type ArtifactType
TestFiles []FileTransfer
SourcePath string
SupportedKernels []KernelMask
Script string
Qemu struct {
Cpus int
Memory int
Timeout Duration
}
Docker struct {
Timeout Duration
}
Mitigations struct {
DisableSmep bool
DisableSmap bool
DisableKaslr bool
DisableKpti bool
}
Patches []Patch
Make struct {
Target string
}
StandardModules bool
Preload []PreloadModule
}
func (ka Artifact) checkSupport(ki KernelInfo, km KernelMask) (
supported bool, err error) {
if ki.DistroType != km.DistroType {
supported = false
return
}
// DistroRelease is optional
if km.DistroRelease != "" && ki.DistroRelease != km.DistroRelease {
supported = false
return
}
supported, err = regexp.MatchString(km.ReleaseMask, ki.KernelRelease)
return
}
// Supported returns true if given kernel is supported by artifact
func (ka Artifact) Supported(ki KernelInfo) (supported bool, err error) {
for _, km := range ka.SupportedKernels {
supported, err = ka.checkSupport(ki, km)
if supported {
break
}
}
return
}
// DistroType is enum with all supported distros
type DistroType int
const (
// Ubuntu https://ubuntu.com/
Ubuntu DistroType = iota
// CentOS https://www.centos.org/
CentOS
// Debian https://www.debian.org/
Debian
)
// DistroTypeStrings is the string version of enum DistroType
var DistroTypeStrings = [...]string{"Ubuntu", "CentOS", "Debian"}
// NewDistroType is create new Distro object
func NewDistroType(dType string) (dt DistroType, err error) {
err = dt.UnmarshalTOML([]byte(dType))
return
}
func (dt DistroType) String() string {
return DistroTypeStrings[dt]
}
// UnmarshalTOML is for support github.com/naoina/toml
func (dt *DistroType) UnmarshalTOML(data []byte) (err error) {
sDistro := strings.Trim(string(data), `"`)
if strings.EqualFold(sDistro, "Ubuntu") {
*dt = Ubuntu
} else if strings.EqualFold(sDistro, "CentOS") {
*dt = CentOS
} else if strings.EqualFold(sDistro, "Debian") {
*dt = Debian
} else {
err = fmt.Errorf("Distro %s is unsupported", sDistro)
}
return
}
// MarshalTOML is for support github.com/naoina/toml
func (dt DistroType) MarshalTOML() (data []byte, err error) {
s := ""
switch dt {
case Ubuntu:
s = "Ubuntu"
case CentOS:
s = "CentOS"
case Debian:
s = "Debian"
default:
err = fmt.Errorf("Cannot marshal %d", dt)
}
data = []byte(`"` + s + `"`)
return
}
// ByRootFS is sorting by .RootFS lexicographically
type ByRootFS []KernelInfo
func (a ByRootFS) Len() int { return len(a) }
func (a ByRootFS) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByRootFS) Less(i, j int) bool { return a[i].RootFS < a[j].RootFS }
// KernelInfo defines kernels.toml entries
type KernelInfo struct {
DistroType DistroType
DistroRelease string // 18.04/7.4.1708/9.1
// Must be *exactly* same as in `uname -r`
KernelRelease string
// Build-time information
KernelSource string // module/exploit will be build on host
ContainerName string
// Runtime information
KernelPath string
InitrdPath string
ModulesPath string
RootFS string
// Debug symbols
VmlinuxPath string
}
// KernelConfig is the ~/.out-of-tree/kernels.toml configuration description
type KernelConfig struct {
Kernels []KernelInfo
Kernels []distro.KernelInfo
}
func readFileAll(path string) (buf []byte, err error) {
@ -294,7 +25,7 @@ func readFileAll(path string) (buf []byte, err error) {
}
defer f.Close()
buf, err = ioutil.ReadAll(f)
buf, err = io.ReadAll(f)
return
}
@ -312,94 +43,3 @@ func ReadKernelConfig(path string) (kernelCfg KernelConfig, err error) {
return
}
func rangeRegexp(start, end int) (s string) {
s += "("
for i := start; i <= end; i++ {
s += strconv.Itoa(i)
if i != end {
s += "|"
}
}
s += ")"
return
}
func versionRegexp(l []int) (s string, err error) {
switch len(l) {
case 1:
s += strconv.Itoa(l[0])
case 2:
s += rangeRegexp(l[0], l[1])
default:
err = errors.New("version must contain one value or range")
return
}
return
}
func genReleaseMask(km kernel) (mask string, err error) {
s, err := versionRegexp(km.Version)
if err != nil {
return
}
mask += s + "[.]"
s, err = versionRegexp(km.Major)
if err != nil {
return
}
mask += s + "[.]"
s, err = versionRegexp(km.Minor)
if err != nil {
return
}
mask += s
switch len(km.Patch) {
case 0:
// ok
case 1:
mask += "-" + strconv.Itoa(km.Patch[0]) + "-"
case 2:
mask += "-" + rangeRegexp(km.Patch[0], km.Patch[1]) + "-"
default:
err = errors.New("version must contain one value or range")
return
}
mask += ".*"
return
}
// ReadArtifactConfig is for read .out-of-tree.toml
func ReadArtifactConfig(path string) (ka Artifact, err error) {
buf, err := readFileAll(path)
if err != nil {
return
}
err = toml.Unmarshal(buf, &ka)
if err != nil {
return
}
for i, _ := range ka.SupportedKernels {
km := &ka.SupportedKernels[i]
if len(km.Kernel.Version) != 0 && km.ReleaseMask != "" {
s := "Only one way to define kernel version is allowed"
err = errors.New(s)
return
}
if km.ReleaseMask == "" {
km.ReleaseMask, err = genReleaseMask(km.Kernel)
if err != nil {
return
}
}
}
return
}

View File

@ -1,65 +0,0 @@
// Copyright 2018 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package config
import (
"testing"
"github.com/naoina/toml"
)
func TestMarshalUnmarshal(t *testing.T) {
artifactCfg := Artifact{
Name: "Put name here",
Type: KernelModule,
}
artifactCfg.SupportedKernels = append(artifactCfg.SupportedKernels,
KernelMask{Ubuntu, "18.04", ".*", kernel{}})
buf, err := toml.Marshal(&artifactCfg)
if err != nil {
t.Fatal(err)
}
var artifactCfgNew Artifact
err = toml.Unmarshal(buf, &artifactCfgNew)
if err != nil {
t.Fatal(err)
}
}
func TestKernelRegex(t *testing.T) {
mask := "4[.]4[.]0-(1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20|21|22|23|24|25|26|27|28|29|30|31|32|33|34|35|36|37|38|39|40|41|42|43|44|45|46|47|48|49|50|51|52|53|54|55|56|57|58|59|60|61|62|63|64|65|66|67|68|69|70|71|72|73|74|75|76|77|78|79|80|81|82|83|84|85|86|87|88|89|90|91|92|93|94|95|96|97|98|99|100|101|102|103|104|105|106|107|108|109|110|111|112|113|114|115|116)-.*"
k := kernel{
Version: []int{4},
Major: []int{4},
Minor: []int{0},
Patch: []int{1, 116},
}
gmask, err := genReleaseMask(k)
if err != nil {
t.Fatal(err)
}
if mask != gmask {
t.Fatal("Got", gmask, "instead of", mask)
}
mask = "4[.]4[.]0.*"
k = kernel{
Version: []int{4},
Major: []int{4},
Minor: []int{0},
}
gmask, err = genReleaseMask(k)
if err != nil {
t.Fatal(err)
}
if mask != gmask {
t.Fatal("Got", gmask, "instead of", mask)
}
}

View File

@ -0,0 +1,47 @@
package dotfiles
import (
"os"
"os/user"
"path/filepath"
"github.com/rs/zerolog/log"
)
// Directory for config files
var Directory string
func directory() string {
if Directory != "" {
return Directory
}
usr, err := user.Current()
if err != nil {
log.Fatal().Err(err).Msg("get current user")
}
Directory = filepath.Join(usr.HomeDir, ".out-of-tree")
return Directory
}
// Dir that exist relative to config directory
func Dir(s ...string) (dir string) {
dir = filepath.Join(append([]string{directory()}, s...)...)
err := os.MkdirAll(dir, os.ModePerm)
if err != nil {
log.Fatal().Err(err).Msg("mkdir")
}
return
}
// File in existing dir relative to config directory
func File(s ...string) (file string) {
file = filepath.Join(append([]string{directory()}, s...)...)
err := os.MkdirAll(filepath.Dir(file), os.ModePerm)
if err != nil {
log.Fatal().Err(err).Msg("mkdir")
}
return
}

View File

@ -0,0 +1,113 @@
package dotfiles
import (
"os"
"path/filepath"
"testing"
)
func TestDirectory(t *testing.T) {
testdir := "test"
Directory = testdir
if directory() != testdir {
t.Fatalf("%s != %s", directory(), testdir)
}
}
func TestDir(t *testing.T) {
tmpdir, err := os.MkdirTemp("", "out-of-tree_")
if err != nil {
return
}
defer os.RemoveAll(tmpdir)
Directory = tmpdir
for _, testdir := range []string{"a", "a/b", "a/b/c"} {
expected := filepath.Join(tmpdir, testdir)
t.Log(testdir, "->", expected)
resdir := Dir(testdir)
if resdir != expected {
t.Fatalf("%s != %s", resdir, expected)
}
fi, err := os.Stat(expected)
if err != nil {
t.Fatal(err)
}
if !fi.IsDir() {
t.Fatal("not a directory")
}
}
testdir := []string{"a", "b", "c", "d"}
expected := filepath.Join(append([]string{tmpdir}, testdir...)...)
t.Log(testdir, "->", expected)
resdir := Dir(testdir...)
if resdir != expected {
t.Fatalf("%s != %s", resdir, expected)
}
fi, err := os.Stat(expected)
if err != nil {
t.Fatal(err)
}
if !fi.IsDir() {
t.Fatal("not a directory")
}
}
func TestFile(t *testing.T) {
tmpdir, err := os.MkdirTemp("", "out-of-tree_")
if err != nil {
return
}
defer os.RemoveAll(tmpdir)
Directory = tmpdir
for _, testfile := range []string{"a", "a/b", "a/b/c"} {
expected := filepath.Join(tmpdir, testfile)
t.Log(testfile, "->", expected)
resfile := File(testfile)
if resfile != expected {
t.Fatalf("%s != %s", resfile, expected)
}
_, err := os.Stat(expected)
if err == nil {
t.Fatal("should not exist")
}
fi, err := os.Stat(filepath.Dir(expected))
if err != nil {
t.Fatal(err)
}
if !fi.IsDir() {
t.Fatal("not a directory")
}
}
testfile := []string{"a", "b", "c"}
expected := filepath.Join(append([]string{tmpdir}, testfile...)...)
t.Log(testfile, "->", expected)
resdir := Dir(testfile...)
if resdir != expected {
t.Fatalf("%s != %s", resdir, expected)
}
fi, err := os.Stat(expected)
if err != nil {
t.Fatal(err)
}
if !fi.IsDir() {
t.Fatal("not a directory")
}
}

View File

@ -7,36 +7,37 @@ package config
import (
"errors"
"os"
"os/user"
"time"
"code.dumpstack.io/tools/out-of-tree/artifact"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
"code.dumpstack.io/tools/out-of-tree/distro"
"github.com/alecthomas/kong"
"github.com/mitchellh/go-homedir"
"github.com/naoina/toml"
)
type DockerCommand struct {
DistroType DistroType
Command string
}
type OutOfTree struct {
// Directory for all files if not explicitly specified
Directory string
Kernels string
UserKernels string
Database string
Qemu struct {
Timeout Duration
Timeout artifact.Duration
}
Docker struct {
Timeout Duration
Timeout artifact.Duration
Registry string
// Commands that will be executed before
// the base layer of Dockerfile
Commands []DockerCommand
Commands []distro.Command
}
}
@ -77,21 +78,22 @@ func ReadOutOfTreeConf(path string) (c OutOfTree, err error) {
err = nil
}
usr, err := user.Current()
if err != nil {
return
if c.Directory != "" {
dotfiles.Directory = c.Directory
} else {
c.Directory = dotfiles.Dir("")
}
if c.Kernels == "" {
c.Kernels = usr.HomeDir + "/.out-of-tree/kernels.toml"
c.Kernels = dotfiles.File("kernels.toml")
}
if c.UserKernels == "" {
c.UserKernels = usr.HomeDir + "/.out-of-tree/kernels.user.toml"
c.UserKernels = dotfiles.File("kernels.user.toml")
}
if c.Database == "" {
c.Database = usr.HomeDir + "/.out-of-tree/db.sqlite"
c.Database = dotfiles.File("db.sqlite")
}
if c.Qemu.Timeout.Duration == 0 {
@ -99,7 +101,7 @@ func ReadOutOfTreeConf(path string) (c OutOfTree, err error) {
}
if c.Docker.Timeout.Duration == 0 {
c.Docker.Timeout.Duration = time.Minute
c.Docker.Timeout.Duration = 8 * time.Minute
}
return

View File

@ -1,237 +0,0 @@
// Copyright 2023 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package main
import (
"bufio"
"errors"
"fmt"
"os"
"os/exec"
"os/user"
"regexp"
"strings"
"time"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/config"
)
type ContainerCmd struct {
Filter string `help:"filter by name"`
List ContainerListCmd `cmd:"" help:"list containers"`
Cleanup ContainerCleanupCmd `cmd:"" help:"cleanup containers"`
}
func (cmd ContainerCmd) Containers() (names []string) {
images, err := listContainerImages()
if err != nil {
log.Fatal().Err(err).Msg("")
}
for _, img := range images {
if cmd.Filter != "" && !strings.Contains(img.Name, cmd.Filter) {
continue
}
names = append(names, img.Name)
}
return
}
type ContainerListCmd struct{}
func (cmd ContainerListCmd) Run(containerCmd *ContainerCmd) (err error) {
for _, name := range containerCmd.Containers() {
fmt.Println(name)
}
return
}
type ContainerCleanupCmd struct{}
func (cmd ContainerCleanupCmd) Run(containerCmd *ContainerCmd) (err error) {
var output []byte
for _, name := range containerCmd.Containers() {
output, err = exec.Command("docker", "image", "rm", name).CombinedOutput()
if err != nil {
log.Error().Err(err).Str("output", string(output)).Msg("")
return
}
}
return
}
type containerImageInfo struct {
Name string
DistroType config.DistroType
DistroRelease string // 18.04/7.4.1708/9.1
}
func listContainerImages() (diis []containerImageInfo, err error) {
cmd := exec.Command("docker", "images")
log.Debug().Msgf("%v", cmd)
rawOutput, err := cmd.CombinedOutput()
if err != nil {
return
}
r, err := regexp.Compile("out_of_tree_.*")
if err != nil {
return
}
containers := r.FindAll(rawOutput, -1)
for _, c := range containers {
container := strings.Fields(string(c))[0]
s := strings.Replace(container, "__", ".", -1)
values := strings.Split(s, "_")
distro, ver := values[3], values[4]
dii := containerImageInfo{
Name: container,
DistroRelease: ver,
}
dii.DistroType, err = config.NewDistroType(distro)
if err != nil {
return
}
diis = append(diis, dii)
}
return
}
type container struct {
name string
timeout time.Duration
Volumes struct {
LibModules string
UsrSrc string
Boot string
}
// Additional arguments
Args []string
}
func NewContainer(name string, timeout time.Duration) (c container, err error) {
c.name = name
c.timeout = timeout
usr, err := user.Current()
if err != nil {
return
}
c.Volumes.LibModules = fmt.Sprintf(
"%s/.out-of-tree/volumes/%s/lib/modules", usr.HomeDir, name)
os.MkdirAll(c.Volumes.LibModules, 0777)
c.Volumes.UsrSrc = fmt.Sprintf(
"%s/.out-of-tree/volumes/%s/usr/src", usr.HomeDir, name)
os.MkdirAll(c.Volumes.UsrSrc, 0777)
c.Volumes.Boot = fmt.Sprintf(
"%s/.out-of-tree/volumes/%s/boot", usr.HomeDir, name)
os.MkdirAll(c.Volumes.Boot, 0777)
return
}
func (c container) Build(imagePath string) (output string, err error) {
args := []string{"build"}
args = append(args, "-t", c.name, imagePath)
cmd := exec.Command("docker", args...)
flog := log.With().
Str("command", fmt.Sprintf("%v", cmd)).
Logger()
stdout, err := cmd.StdoutPipe()
if err != nil {
return
}
cmd.Stderr = cmd.Stdout
err = cmd.Start()
if err != nil {
return
}
go func() {
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
m := scanner.Text()
output += m + "\n"
flog.Trace().Str("stdout", m).Msg("")
}
}()
err = cmd.Wait()
return
}
func (c container) Run(workdir string, command string) (output string, err error) {
flog := log.With().
Str("container", c.name).
Str("workdir", workdir).
Str("command", command).
Logger()
var args []string
args = append(args, "run", "--rm")
args = append(args, c.Args...)
args = append(args,
"-v", workdir+":/work",
"-v", c.Volumes.LibModules+":/lib/modules",
"-v", c.Volumes.UsrSrc+":/usr/src",
"-v", c.Volumes.Boot+":/boot")
args = append(args, c.name, "bash", "-c", "cd /work && "+command)
cmd := exec.Command("docker", args...)
log.Debug().Msgf("%v", cmd)
stdout, err := cmd.StdoutPipe()
if err != nil {
return
}
cmd.Stderr = cmd.Stdout
timer := time.AfterFunc(c.timeout, func() {
flog.Info().Msg("killing container by timeout")
cmd.Process.Kill()
})
defer timer.Stop()
err = cmd.Start()
if err != nil {
return
}
go func() {
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
m := scanner.Text()
output += m + "\n"
flog.Trace().Str("stdout", m).Msg("")
}
}()
err = cmd.Wait()
if err != nil {
e := fmt.Sprintf("error `%v` for cmd `%v` with output `%v`",
err, command, output)
err = errors.New(e)
return
}
return
}

531
container/container.go Normal file
View File

@ -0,0 +1,531 @@
// Copyright 2023 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package container
import (
"bufio"
"errors"
"fmt"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
"code.dumpstack.io/tools/out-of-tree/distro"
)
var Runtime = "docker"
var Registry = ""
var Timeout time.Duration
var Commands []distro.Command
var UseCache = true
var Prune = true
type Image struct {
Name string
Distro distro.Distro
}
func Images() (diis []Image, err error) {
cmd := exec.Command(Runtime, "images")
log.Debug().Msgf("%v", cmd)
rawOutput, err := cmd.CombinedOutput()
if err != nil {
return
}
r, err := regexp.Compile("out_of_tree_.*")
if err != nil {
return
}
containers := r.FindAll(rawOutput, -1)
for _, c := range containers {
containerName := strings.Fields(string(c))[0]
s := strings.Replace(containerName, "__", ".", -1)
values := strings.Split(s, "_")
distroName, ver := values[3], values[4]
dii := Image{
Name: containerName,
}
dii.Distro.Release = ver
dii.Distro.ID, err = distro.NewID(distroName)
if err != nil {
return
}
diis = append(diis, dii)
}
return
}
func Load(localpath string, name string) (err error) {
exist := Container{name: name}.Exist()
if exist && UseCache {
return
}
cmd := exec.Command(Runtime, "load", "-i", localpath)
log.Debug().Msgf("%v", cmd)
raw, err := cmd.CombinedOutput()
if err != nil {
log.Debug().Err(err).Msg(string(raw))
return
}
return
}
func Import(path, name string) (err error) {
exist := Container{name: name}.Exist()
if exist && UseCache {
return
}
cmd := exec.Command(Runtime, "import", path, name)
log.Debug().Msgf("%v", cmd)
raw, err := cmd.CombinedOutput()
if err != nil {
log.Debug().Err(err).Msg(string(raw))
return
}
return
}
func Save(name, path string) (err error) {
exist := Container{name: name}.Exist()
if !exist {
err = errors.New("container does not exist")
log.Error().Err(err).Msg("")
return
}
cmd := exec.Command(Runtime, "save", name, "-o", path)
log.Debug().Msgf("%v", cmd)
raw, err := cmd.CombinedOutput()
if err != nil {
log.Error().Err(err).Msg(string(raw))
return
}
return
}
type Volume struct {
Src, Dest string
}
type Container struct {
name string
dist distro.Distro
Volumes []Volume
// Additional arguments
Args []string
Log zerolog.Logger
}
func New(dist distro.Distro) (c Container, err error) {
distro := strings.ToLower(dist.ID.String())
release := strings.Replace(dist.Release, ".", "__", -1)
c.name = fmt.Sprintf("out_of_tree_%s_%s", distro, release)
c.Log = log.With().
Str("container", c.name).
Logger()
c.dist = dist
c.Volumes = append(c.Volumes, Volume{
Src: dotfiles.Dir("volumes", c.name, "lib", "modules"),
Dest: "/lib/modules",
})
c.Volumes = append(c.Volumes, Volume{
Src: dotfiles.Dir("volumes", c.name, "usr", "src"),
Dest: "/usr/src",
})
c.Volumes = append(c.Volumes, Volume{
Src: dotfiles.Dir("volumes", c.name, "boot"),
Dest: "/boot",
})
return
}
func NewFromKernelInfo(ki distro.KernelInfo) (
c Container, err error) {
c.name = ki.ContainerName
c.Log = log.With().
Str("container", c.name).
Logger()
c.Volumes = append(c.Volumes, Volume{
Src: path.Dir(ki.ModulesPath),
Dest: "/lib/modules",
})
c.Volumes = append(c.Volumes, Volume{
Src: filepath.Join(path.Dir(ki.KernelPath), "../usr/src"),
Dest: "/usr/src",
})
c.Volumes = append(c.Volumes, Volume{
Src: path.Dir(ki.KernelPath),
Dest: "/boot",
})
return
}
func (c Container) Name() string {
return c.name
}
func (c Container) Exist() (yes bool) {
cmd := exec.Command(Runtime, "images", "-q", c.name)
c.Log.Debug().Msgf("run %v", cmd)
raw, err := cmd.CombinedOutput()
if err != nil {
c.Log.Error().Err(err).Msg(string(raw))
return false
}
yes = string(raw) != ""
if yes {
c.Log.Debug().Msg("exist")
} else {
c.Log.Debug().Msg("does not exist")
}
return
}
func (c Container) Build(image string, envs, runs []string) (err error) {
cdir := dotfiles.Dir("containers", c.name)
cfile := filepath.Join(cdir, "Dockerfile")
cf := "FROM "
if Registry != "" {
cf += Registry + "/"
}
cf += image + "\n"
for _, c := range Commands {
// TODO check for distro type
cf += "RUN " + c.Command + "\n"
}
for _, e := range envs {
cf += "ENV " + e + "\n"
}
for _, c := range runs {
cf += "RUN " + c + "\n"
}
buf, err := os.ReadFile(cfile)
if err != nil {
err = os.WriteFile(cfile, []byte(cf), os.ModePerm)
if err != nil {
return
}
}
if string(buf) == cf && c.Exist() && UseCache {
return
}
err = os.WriteFile(cfile, []byte(cf), os.ModePerm)
if err != nil {
return
}
if c.Exist() {
c.Log.Info().Msg("update")
} else {
c.Log.Info().Msg("build")
}
output, err := c.build(cdir)
if err != nil {
c.Log.Error().Err(err).Msg(output)
return
}
c.Log.Info().Msg("success")
return
}
func (c Container) prune() error {
c.Log.Debug().Msg("remove dangling or unused images from local storage")
return exec.Command(Runtime, "image", "prune", "-f").Run()
}
func (c Container) build(imagePath string) (output string, err error) {
if Prune {
defer c.prune()
}
args := []string{"build"}
if !UseCache {
args = append(args, "--pull", "--no-cache")
}
args = append(args, "-t", c.name, imagePath)
cmd := exec.Command(Runtime, args...)
flog := c.Log.With().
Str("command", fmt.Sprintf("%v", cmd)).
Logger()
stdout, err := cmd.StdoutPipe()
if err != nil {
return
}
cmd.Stderr = cmd.Stdout
err = cmd.Start()
if err != nil {
return
}
go func() {
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
m := scanner.Text()
output += m + "\n"
flog.Trace().Str("stdout", m).Msg("")
}
}()
err = cmd.Wait()
return
}
func (c Container) Run(workdir string, cmds []string) (out string, err error) {
flog := c.Log.With().
Str("workdir", workdir).
Str("command", fmt.Sprintf("%v", cmds)).
Logger()
var args []string
args = append(args, "run", "--rm")
args = append(args, c.Args...)
if workdir != "" {
args = append(args, "-v", workdir+":/work")
}
for _, volume := range c.Volumes {
mount := fmt.Sprintf("%s:%s", volume.Src, volume.Dest)
args = append(args, "-v", mount)
}
command := "true"
for _, c := range cmds {
command += fmt.Sprintf(" && %s", c)
}
args = append(args, c.name, "bash", "-c")
if workdir != "" {
args = append(args, "cd /work && "+command)
} else {
args = append(args, command)
}
cmd := exec.Command(Runtime, args...)
flog.Debug().Msgf("%v", cmd)
stdout, err := cmd.StdoutPipe()
if err != nil {
return
}
cmd.Stderr = cmd.Stdout
if Timeout != 0 {
timer := time.AfterFunc(Timeout, func() {
flog.Info().Msg("killing container by timeout")
flog.Debug().Msg("SIGINT")
cmd.Process.Signal(os.Interrupt)
time.Sleep(time.Minute)
flog.Debug().Msg("SIGKILL")
cmd.Process.Kill()
})
defer timer.Stop()
}
err = cmd.Start()
if err != nil {
return
}
go func() {
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
m := scanner.Text()
out += m + "\n"
flog.Trace().Str("stdout", m).Msg("")
}
}()
err = cmd.Wait()
if err != nil {
e := fmt.Sprintf("error `%v` for cmd `%v` with output `%v`",
err, cmds, out)
err = errors.New(e)
return
}
return
}
func FindKernel(entries []os.DirEntry, kname string) (name string, err error) {
for _, e := range entries {
var fi os.FileInfo
fi, err = e.Info()
if err != nil {
return
}
if strings.HasPrefix(fi.Name(), "vmlinuz") {
if strings.Contains(fi.Name(), kname) {
name = fi.Name()
return
}
}
}
err = errors.New("cannot find kernel")
return
}
func FindInitrd(entries []os.DirEntry, kname string) (name string, err error) {
for _, e := range entries {
var fi os.FileInfo
fi, err = e.Info()
if err != nil {
return
}
if strings.HasPrefix(fi.Name(), "initrd") ||
strings.HasPrefix(fi.Name(), "initramfs") {
if strings.Contains(fi.Name(), kname) {
name = fi.Name()
return
}
}
}
err = errors.New("cannot find kernel")
return
}
func (c Container) Kernels() (kernels []distro.KernelInfo, err error) {
if !c.Exist() {
return
}
var libmodules, boot string
for _, volume := range c.Volumes {
switch volume.Dest {
case "/lib/modules":
libmodules = volume.Src
case "/boot":
boot = volume.Src
}
}
moddirs, err := os.ReadDir(libmodules)
if err != nil {
return
}
bootfiles, err := os.ReadDir(boot)
if err != nil {
return
}
for _, e := range moddirs {
var krel os.FileInfo
krel, err = e.Info()
if err != nil {
return
}
c.Log.Debug().Msgf("generate config entry for %s", krel.Name())
var kernelFile, initrdFile string
kernelFile, err = FindKernel(bootfiles, krel.Name())
if err != nil {
c.Log.Warn().Msgf("cannot find kernel %s", krel.Name())
continue
}
initrdFile, err = FindInitrd(bootfiles, krel.Name())
if err != nil {
c.Log.Warn().Msgf("cannot find initrd %s", krel.Name())
continue
}
ki := distro.KernelInfo{
Distro: c.dist,
KernelVersion: krel.Name(),
KernelRelease: krel.Name(),
ContainerName: c.name,
KernelPath: filepath.Join(boot, kernelFile),
InitrdPath: filepath.Join(boot, initrdFile),
ModulesPath: filepath.Join(libmodules, krel.Name()),
RootFS: dotfiles.File("images", c.dist.RootFS()),
}
kernels = append(kernels, ki)
}
for _, cmd := range []string{
"find /boot -type f -exec chmod a+r {} \\;",
} {
_, err = c.Run(dotfiles.Dir("tmp"), []string{cmd})
if err != nil {
return
}
}
return
}

302
daemon/commands.go Normal file
View File

@ -0,0 +1,302 @@
package daemon
import (
"database/sql"
"errors"
"fmt"
"io"
"net"
"os"
"os/exec"
"path/filepath"
"sync"
"time"
"github.com/davecgh/go-spew/spew"
"github.com/google/uuid"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/api"
"code.dumpstack.io/tools/out-of-tree/config"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
"code.dumpstack.io/tools/out-of-tree/daemon/db"
)
type cmdenv struct {
Conn net.Conn
Log zerolog.Logger
DB *sql.DB
WG *sync.WaitGroup
KernelConfig string
}
func command(req *api.Req, resp *api.Resp, e cmdenv) (err error) {
e.Log.Trace().Msgf("%v", spew.Sdump(req))
defer e.Log.Trace().Msgf("%v", spew.Sdump(resp))
e.WG.Add(1)
defer e.WG.Done()
e.Log.Debug().Msgf("%v", req.Command)
switch req.Command {
case api.RawMode:
err = rawMode(req, e)
case api.AddJob:
err = addJob(req, resp, e)
case api.ListJobs:
err = listJobs(req, resp, e)
case api.AddRepo:
err = addRepo(req, resp, e)
case api.ListRepos:
err = listRepos(resp, e)
case api.Kernels:
err = kernels(resp, e)
case api.JobStatus:
err = jobStatus(req, resp, e)
case api.JobLogs:
err = jobLogs(req, resp, e)
default:
err = errors.New("unknown command")
}
resp.Err = err
return
}
type logWriter struct {
log zerolog.Logger
}
func (lw logWriter) Write(p []byte) (n int, err error) {
n = len(p)
//lw.log.Trace().Msgf("%v", strconv.Quote(string(p)))
return
}
func rawMode(req *api.Req, e cmdenv) (err error) {
uuid := uuid.New().String()
lwsend := logWriter{log.With().Str("uuid", uuid).Str("git", "send").Logger()}
lwrecv := logWriter{log.With().Str("uuid", uuid).Str("git", "recv").Logger()}
conn, err := net.Dial("tcp", ":9418")
if err != nil {
log.Error().Err(err).Msg("dial")
return
}
go io.Copy(e.Conn, io.TeeReader(conn, lwrecv))
io.Copy(conn, io.TeeReader(e.Conn, lwsend))
return
}
func listJobs(req *api.Req, resp *api.Resp, e cmdenv) (err error) {
var params api.ListJobsParams
err = req.GetData(&params)
if err != nil {
return
}
jobs, err := db.Jobs(e.DB, "updated >= ?", params.UpdatedAfter)
if err != nil {
return
}
var result []api.Job
for _, j := range jobs {
if params.Group != "" && j.Group != params.Group {
continue
}
if params.Repo != "" && j.RepoName != params.Repo {
continue
}
if params.Commit != "" && j.Commit != params.Commit {
continue
}
if params.Status != "" && j.Status != params.Status {
continue
}
result = append(result, j)
}
resp.SetData(&result)
return
}
func addJob(req *api.Req, resp *api.Resp, e cmdenv) (err error) {
var job api.Job
err = req.GetData(&job)
if err != nil {
return
}
job.GenUUID()
job.Created = time.Now()
var repos []api.Repo
repos, err = db.Repos(e.DB)
if err != nil {
return
}
var found bool
for _, r := range repos {
if job.RepoName == r.Name {
found = true
}
}
if !found {
err = errors.New("repo does not exist")
return
}
if job.RepoName == "" {
err = errors.New("repo name cannot be empty")
return
}
if job.Commit == "" {
err = errors.New("invalid commit")
return
}
err = db.AddJob(e.DB, &job)
if err != nil {
return
}
resp.SetData(&job.UUID)
return
}
func listRepos(resp *api.Resp, e cmdenv) (err error) {
repos, err := db.Repos(e.DB)
if err != nil {
e.Log.Error().Err(err).Msg("")
return
}
for i := range repos {
repos[i].Path = dotfiles.Dir("daemon/repos",
repos[i].Name)
}
log.Trace().Msgf("%v", spew.Sdump(repos))
resp.SetData(&repos)
return
}
func addRepo(req *api.Req, resp *api.Resp, e cmdenv) (err error) {
var repo api.Repo
err = req.GetData(&repo)
if err != nil {
return
}
var repos []api.Repo
repos, err = db.Repos(e.DB)
if err != nil {
return
}
for _, r := range repos {
log.Debug().Msgf("%v, %v", r, repo.Name)
if repo.Name == r.Name {
err = fmt.Errorf("repo already exist")
return
}
}
cmd := exec.Command("git", "init", "--bare")
cmd.Dir = dotfiles.Dir("daemon/repos", repo.Name)
var out []byte
out, err = cmd.Output()
e.Log.Debug().Msgf("%v -> %v\n%v", cmd, err, string(out))
if err != nil {
return
}
err = db.AddRepo(e.DB, &repo)
return
}
func kernels(resp *api.Resp, e cmdenv) (err error) {
kcfg, err := config.ReadKernelConfig(e.KernelConfig)
if err != nil {
e.Log.Error().Err(err).Msg("read kernels config")
return
}
e.Log.Info().Msgf("send back %d kernels", len(kcfg.Kernels))
resp.SetData(&kcfg.Kernels)
return
}
func jobLogs(req *api.Req, resp *api.Resp, e cmdenv) (err error) {
var uuid string
err = req.GetData(&uuid)
if err != nil {
return
}
logdir := filepath.Join(dotfiles.File("daemon/logs"), uuid)
if _, err = os.Stat(logdir); err != nil {
return
}
files, err := os.ReadDir(logdir)
if err != nil {
return
}
var logs []api.JobLog
for _, f := range files {
if f.IsDir() {
continue
}
logfile := filepath.Join(logdir, f.Name())
var buf []byte
buf, err = os.ReadFile(logfile)
if err != nil {
return
}
logs = append(logs, api.JobLog{
Name: f.Name(),
Text: string(buf),
})
}
resp.SetData(&logs)
return
}
func jobStatus(req *api.Req, resp *api.Resp, e cmdenv) (err error) {
var uuid string
err = req.GetData(&uuid)
if err != nil {
return
}
st, err := db.JobStatus(e.DB, uuid)
if err != nil {
return
}
resp.SetData(&st)
return
}

247
daemon/daemon.go Normal file
View File

@ -0,0 +1,247 @@
package daemon
import (
"crypto/tls"
"database/sql"
"io"
"net"
"os/exec"
"runtime"
"sync"
"time"
"github.com/remeh/sizedwaitgroup"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/api"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
"code.dumpstack.io/tools/out-of-tree/daemon/db"
"code.dumpstack.io/tools/out-of-tree/fs"
)
type Daemon struct {
Threads int
Resources *Resources
db *sql.DB
kernelConfig string
shutdown bool
wg sync.WaitGroup
}
func Init(kernelConfig string) (d *Daemon, err error) {
d = &Daemon{}
d.Threads = runtime.NumCPU()
d.Resources = NewResources()
d.kernelConfig = kernelConfig
d.wg.Add(1) // matches with db.Close()
d.db, err = db.OpenDatabase(dotfiles.File("daemon/daemon.db"))
if err != nil {
log.Error().Err(err).Msg("cannot open daemon.db")
}
log.Info().Msgf("database %s", dotfiles.File("daemon/daemon.db"))
return
}
func (d *Daemon) Kill() {
d.shutdown = true
d.db.Close()
d.wg.Done()
}
func (d *Daemon) Daemon() {
if d.db == nil {
log.Fatal().Msg("db is not initialized")
}
swg := sizedwaitgroup.New(d.Threads)
log.Info().Int("threads", d.Threads).Msg("start")
first := true
for !d.shutdown {
d.wg.Add(1)
jobs, err := db.Jobs(d.db, "")
if err != nil && !d.shutdown {
log.Error().Err(err).Msg("")
d.wg.Done()
time.Sleep(time.Minute)
continue
}
for _, job := range jobs {
if d.shutdown {
break
}
pj := newJobProcessor(job, d.db)
if first && job.Status == api.StatusRunning {
pj.SetStatus(api.StatusWaiting)
continue
}
if job.Status == api.StatusNew {
pj.SetStatus(api.StatusWaiting)
continue
}
if job.Status != api.StatusWaiting {
continue
}
swg.Add()
go func(pj jobProcessor) {
defer swg.Done()
pj.Process(d.Resources)
time.Sleep(time.Second)
}(pj)
}
first = false
d.wg.Done()
time.Sleep(time.Second)
}
swg.Wait()
}
func handler(conn net.Conn, e cmdenv) {
defer conn.Close()
resp := api.NewResp()
e.Log = log.With().
Str("resp_uuid", resp.UUID).
Str("remote_addr", conn.RemoteAddr().String()).
Logger()
e.Log.Info().Msg("")
var req api.Req
defer func() {
if req.Command != api.RawMode {
resp.Encode(conn)
} else {
log.Debug().Msg("raw mode, not encode response")
}
}()
err := req.Decode(conn)
if err != nil {
e.Log.Error().Err(err).Msg("cannot decode")
return
}
err = command(&req, &resp, e)
if err != nil {
e.Log.Error().Err(err).Msg("")
return
}
}
func (d *Daemon) Listen(addr string) {
if d.db == nil {
log.Fatal().Msg("db is not initialized")
}
go func() {
repodir := dotfiles.Dir("daemon/repos")
git := exec.Command("git", "daemon", "--port=9418", "--verbose",
"--reuseaddr",
"--export-all", "--base-path="+repodir,
"--enable=receive-pack",
"--enable=upload-pack",
repodir)
stdout, err := git.StdoutPipe()
if err != nil {
log.Fatal().Err(err).Msgf("%v", git)
return
}
go io.Copy(logWriter{log: log.Logger}, stdout)
stderr, err := git.StderrPipe()
if err != nil {
log.Fatal().Err(err).Msgf("%v", git)
return
}
go io.Copy(logWriter{log: log.Logger}, stderr)
log.Debug().Msgf("start %v", git)
git.Start()
defer func() {
log.Debug().Msgf("stop %v", git)
}()
err = git.Wait()
if err != nil {
log.Fatal().Err(err).Msgf("%v", git)
return
}
}()
if !fs.PathExists(dotfiles.File("daemon/cert.pem")) {
log.Info().Msg("No cert.pem, generating...")
cmd := exec.Command("openssl",
"req", "-batch", "-newkey", "rsa:2048",
"-new", "-nodes", "-x509",
"-subj", "/CN=*",
"-addext", "subjectAltName = DNS:*",
"-out", dotfiles.File("daemon/cert.pem"),
"-keyout", dotfiles.File("daemon/key.pem"))
out, err := cmd.Output()
if err != nil {
log.Error().Err(err).Msg(string(out))
return
}
}
log.Info().Msg("copy to client:")
log.Info().Msgf("cert: %s, key: %s",
dotfiles.File("daemon/cert.pem"),
dotfiles.File("daemon/key.pem"))
cert, err := tls.LoadX509KeyPair(dotfiles.File("daemon/cert.pem"),
dotfiles.File("daemon/key.pem"))
if err != nil {
log.Fatal().Err(err).Msg("LoadX509KeyPair")
}
tlscfg := &tls.Config{Certificates: []tls.Certificate{cert}}
l, err := tls.Listen("tcp", addr, tlscfg)
if err != nil {
log.Fatal().Err(err).Msg("listen")
}
log.Info().Str("addr", ":9418").Msg("git")
log.Info().Str("addr", addr).Msg("daemon")
for {
conn, err := l.Accept()
if err != nil {
log.Fatal().Err(err).Msg("accept")
}
log.Info().Msgf("accept %s", conn.RemoteAddr())
e := cmdenv{
DB: d.db,
WG: &d.wg,
Conn: conn,
KernelConfig: d.kernelConfig,
}
go handler(conn, e)
}
}

15
daemon/daemon_test.go Normal file
View File

@ -0,0 +1,15 @@
package daemon
import (
"os"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
)
func init() {
log.Logger = zerolog.New(zerolog.ConsoleWriter{
Out: os.Stdout,
NoColor: true,
})
}

123
daemon/db/db.go Normal file
View File

@ -0,0 +1,123 @@
package db
import (
"database/sql"
"fmt"
"strconv"
_ "github.com/mattn/go-sqlite3"
)
// Change on ANY database update
const currentDatabaseVersion = 1
const versionField = "db_version"
func createMetadataTable(db *sql.DB) (err error) {
_, err = db.Exec(`
CREATE TABLE IF NOT EXISTS metadata (
id INTEGER PRIMARY KEY,
key TEXT UNIQUE,
value TEXT
)`)
return
}
func metaChkValue(db *sql.DB, key string) (exist bool, err error) {
sql := "SELECT EXISTS(SELECT id FROM metadata WHERE key = $1)"
stmt, err := db.Prepare(sql)
if err != nil {
return
}
defer stmt.Close()
err = stmt.QueryRow(key).Scan(&exist)
return
}
func metaGetValue(db *sql.DB, key string) (value string, err error) {
stmt, err := db.Prepare("SELECT value FROM metadata " +
"WHERE key = $1")
if err != nil {
return
}
defer stmt.Close()
err = stmt.QueryRow(key).Scan(&value)
return
}
func metaSetValue(db *sql.DB, key, value string) (err error) {
stmt, err := db.Prepare("INSERT OR REPLACE INTO metadata " +
"(key, value) VALUES ($1, $2)")
if err != nil {
return
}
defer stmt.Close()
_, err = stmt.Exec(key, value)
return
}
func getVersion(db *sql.DB) (version int, err error) {
s, err := metaGetValue(db, versionField)
if err != nil {
return
}
version, err = strconv.Atoi(s)
return
}
func createSchema(db *sql.DB) (err error) {
err = createMetadataTable(db)
if err != nil {
return
}
err = createJobTable(db)
if err != nil {
return
}
err = createRepoTable(db)
if err != nil {
return
}
return
}
func OpenDatabase(path string) (db *sql.DB, err error) {
db, err = sql.Open("sqlite3", path)
if err != nil {
return
}
db.SetMaxOpenConns(1)
exists, _ := metaChkValue(db, versionField)
if !exists {
err = createSchema(db)
if err != nil {
return
}
err = metaSetValue(db, versionField,
strconv.Itoa(currentDatabaseVersion))
return
}
version, err := getVersion(db)
if err != nil {
return
}
if version != currentDatabaseVersion {
err = fmt.Errorf("database is not supported (%d instead of %d)",
version, currentDatabaseVersion)
return
}
return
}

31
daemon/db/db_test.go Normal file
View File

@ -0,0 +1,31 @@
package db
import (
"database/sql"
"os"
"testing"
"github.com/stretchr/testify/assert"
)
func tmpdb(t *testing.T) (file *os.File, db *sql.DB) {
file, err := os.CreateTemp("", "temp-sqlite.db")
assert.Nil(t, err)
// defer os.Remove(file.Name())
db, err = OpenDatabase(file.Name())
assert.Nil(t, err)
// defer db.Close()
return
}
func TestOpenDatabase(t *testing.T) {
file, db := tmpdb(t)
defer os.Remove(file.Name())
db.Close()
db, err := OpenDatabase(file.Name())
assert.Nil(t, err)
db.Close()
}

191
daemon/db/job.go Normal file
View File

@ -0,0 +1,191 @@
package db
import (
"bytes"
"database/sql"
"encoding/gob"
"time"
"code.dumpstack.io/tools/out-of-tree/api"
)
func createJobTable(db *sql.DB) (err error) {
_, err = db.Exec(`
CREATE TABLE IF NOT EXISTS job (
id INTEGER PRIMARY KEY,
updated INT,
uuid TEXT,
group_uuid TEXT,
repo TEXT,
"commit" TEXT,
config TEXT,
target TEXT,
created INT,
started INT,
finished INT,
status TEXT DEFAULT "new"
)`)
return
}
func AddJob(db *sql.DB, job *api.Job) (err error) {
stmt, err := db.Prepare(`INSERT INTO job (updated, uuid, group_uuid, repo, "commit", ` +
`config, target, created, started, finished) ` +
`VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10);`)
if err != nil {
return
}
defer stmt.Close()
var abuf bytes.Buffer
err = gob.NewEncoder(&abuf).Encode(job.Artifact)
if err != nil {
return
}
config := abuf.Bytes()
var tbuf bytes.Buffer
err = gob.NewEncoder(&tbuf).Encode(job.Target)
if err != nil {
return
}
target := tbuf.Bytes()
res, err := stmt.Exec(time.Now().Unix(), job.UUID, job.Group,
job.RepoName, job.Commit, config, target,
job.Created.Unix(), job.Started.Unix(),
job.Finished.Unix(),
)
if err != nil {
return
}
job.ID, err = res.LastInsertId()
return
}
func UpdateJob(db *sql.DB, job *api.Job) (err error) {
stmt, err := db.Prepare(`UPDATE job ` +
`SET updated=$1, uuid=$2, group_uuid=$3, repo=$4, ` +
`"commit"=$5, config=$6, target=$7, ` +
`created=$8, started=$9, finished=$10, ` +
`status=$11 ` +
`WHERE id=$12`)
if err != nil {
return
}
defer stmt.Close()
var abuf bytes.Buffer
err = gob.NewEncoder(&abuf).Encode(job.Artifact)
if err != nil {
return
}
config := abuf.Bytes()
var tbuf bytes.Buffer
err = gob.NewEncoder(&tbuf).Encode(job.Target)
if err != nil {
return
}
target := tbuf.Bytes()
_, err = stmt.Exec(time.Now().Unix(), job.UUID, job.Group,
job.RepoName, job.Commit,
config, target,
job.Created.Unix(), job.Started.Unix(),
job.Finished.Unix(), job.Status, job.ID)
return
}
func scanJob(scan func(dest ...any) error) (job api.Job, err error) {
var config, target []byte
var updated, created, started, finished int64
err = scan(&job.ID, &updated, &job.UUID, &job.Group,
&job.RepoName, &job.Commit, &config, &target,
&created, &started, &finished, &job.Status)
if err != nil {
return
}
abuf := bytes.NewBuffer(config)
err = gob.NewDecoder(abuf).Decode(&job.Artifact)
if err != nil {
return
}
tbuf := bytes.NewBuffer(target)
err = gob.NewDecoder(tbuf).Decode(&job.Target)
if err != nil {
return
}
job.UpdatedAt = time.Unix(updated, 0)
job.Created = time.Unix(created, 0)
job.Started = time.Unix(started, 0)
job.Finished = time.Unix(finished, 0)
return
}
func Jobs(db *sql.DB, where string, args ...any) (jobs []api.Job, err error) {
q := `SELECT id, updated, uuid, group_uuid, ` +
`repo, "commit", config, target, created, ` +
`started, finished, status FROM job`
if len(where) != 0 {
q += ` WHERE ` + where
}
stmt, err := db.Prepare(q)
if err != nil {
return
}
defer stmt.Close()
rows, err := stmt.Query(args...)
if err != nil {
return
}
defer rows.Close()
for rows.Next() {
var job api.Job
job, err = scanJob(rows.Scan)
if err != nil {
return
}
jobs = append(jobs, job)
}
return
}
func Job(db *sql.DB, uuid string) (job api.Job, err error) {
stmt, err := db.Prepare(`SELECT id, updated, uuid, ` +
`group_uuid, ` +
`repo, "commit", config, target, ` +
`created, started, finished, status ` +
`FROM job WHERE uuid=$1`)
if err != nil {
return
}
defer stmt.Close()
return scanJob(stmt.QueryRow(uuid).Scan)
}
func JobStatus(db *sql.DB, uuid string) (st api.Status, err error) {
stmt, err := db.Prepare(`SELECT status FROM job ` +
`WHERE uuid=$1`)
if err != nil {
return
}
defer stmt.Close()
err = stmt.QueryRow(uuid).Scan(&st)
if err != nil {
return
}
return
}

50
daemon/db/job_test.go Normal file
View File

@ -0,0 +1,50 @@
package db
import (
"os"
"testing"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"code.dumpstack.io/tools/out-of-tree/api"
)
func TestJobTable(t *testing.T) {
file, db := tmpdb(t)
defer os.Remove(file.Name())
defer db.Close()
job := api.Job{
RepoName: "testname",
Commit: "test",
Group: uuid.New().String(),
}
err := AddJob(db, &job)
assert.Nil(t, err)
job.Group = uuid.New().String()
job.Status = api.StatusSuccess
err = UpdateJob(db, &job)
assert.Nil(t, err)
jobs, err := Jobs(db, "")
assert.Nil(t, err)
assert.Equal(t, 1, len(jobs))
assert.Equal(t, job.Group, jobs[0].Group)
job, err = Job(db, job.UUID)
assert.Nil(t, err)
assert.Equal(t, api.StatusSuccess, job.Status)
st, err := JobStatus(db, job.UUID)
assert.Nil(t, err)
assert.Equal(t, job.Status, st)
}

61
daemon/db/repo.go Normal file
View File

@ -0,0 +1,61 @@
package db
import (
"database/sql"
"code.dumpstack.io/tools/out-of-tree/api"
)
func createRepoTable(db *sql.DB) (err error) {
_, err = db.Exec(`
CREATE TABLE IF NOT EXISTS repo (
id INTEGER PRIMARY KEY,
name TEXT UNIQUE
)`)
return
}
func AddRepo(db *sql.DB, repo *api.Repo) (err error) {
stmt, err := db.Prepare(`INSERT INTO repo (name) ` +
`VALUES ($1);`)
if err != nil {
return
}
defer stmt.Close()
res, err := stmt.Exec(repo.Name)
if err != nil {
return
}
repo.ID, err = res.LastInsertId()
return
}
func Repos(db *sql.DB) (repos []api.Repo, err error) {
stmt, err := db.Prepare(`SELECT id, name FROM repo`)
if err != nil {
return
}
defer stmt.Close()
rows, err := stmt.Query()
if err != nil {
return
}
defer rows.Close()
for rows.Next() {
var repo api.Repo
err = rows.Scan(&repo.ID, &repo.Name)
if err != nil {
return
}
repos = append(repos, repo)
}
return
}

46
daemon/db/repo_test.go Normal file
View File

@ -0,0 +1,46 @@
package db
import (
"database/sql"
"os"
"testing"
"github.com/stretchr/testify/assert"
"code.dumpstack.io/tools/out-of-tree/api"
)
func testCreateRepoTable(t *testing.T) (file *os.File, db *sql.DB) {
file, err := os.CreateTemp("", "temp-sqlite.db")
assert.Nil(t, err)
// defer os.Remove(tempDB.Name())
db, err = sql.Open("sqlite3", file.Name())
assert.Nil(t, err)
// defer db.Close()
db.SetMaxOpenConns(1)
err = createRepoTable(db)
assert.Nil(t, err)
return
}
func TestRepoTable(t *testing.T) {
file, db := testCreateRepoTable(t)
defer db.Close()
defer os.Remove(file.Name())
repo := api.Repo{Name: "testname"}
err := AddRepo(db, &repo)
assert.Nil(t, err)
repos, err := Repos(db)
assert.Nil(t, err)
assert.Equal(t, 1, len(repos))
assert.Equal(t, repo, repos[0])
}

177
daemon/process.go Normal file
View File

@ -0,0 +1,177 @@
package daemon
import (
"database/sql"
"errors"
"fmt"
"os"
"os/exec"
"path/filepath"
"time"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/api"
"code.dumpstack.io/tools/out-of-tree/artifact"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
"code.dumpstack.io/tools/out-of-tree/daemon/db"
"code.dumpstack.io/tools/out-of-tree/distro"
"code.dumpstack.io/tools/out-of-tree/qemu"
)
type jobProcessor struct {
job api.Job
log zerolog.Logger
db *sql.DB
}
func newJobProcessor(job api.Job, db *sql.DB) (pj jobProcessor) {
pj.job = job
pj.db = db
pj.log = log.With().
Str("uuid", job.UUID).
Str("group", job.Group).
Logger()
return
}
func (pj jobProcessor) Update() (err error) {
err = db.UpdateJob(pj.db, &pj.job)
if err != nil {
pj.log.Error().Err(err).Msgf("update job %v", pj.job)
}
return
}
func (pj jobProcessor) SetStatus(status api.Status) (err error) {
pj.log.Info().Msgf(`%v -> %v`, pj.job.Status, status)
pj.job.Status = status
err = pj.Update()
return
}
func (pj *jobProcessor) Process(res *Resources) (err error) {
if pj.job.Status != api.StatusWaiting {
err = errors.New("job is not available to process")
return
}
if pj.job.Artifact.Qemu.Cpus == 0 {
pj.job.Artifact.Qemu.Cpus = qemu.DefaultCPUs
}
if pj.job.Artifact.Qemu.Memory == 0 {
pj.job.Artifact.Qemu.Memory = qemu.DefaultMemory
}
err = res.Allocate(pj.job)
if err != nil {
return
}
defer func() {
res.Release(pj.job)
}()
log.Info().Msgf("process job %v", pj.job.UUID)
pj.SetStatus(api.StatusRunning)
pj.job.Started = time.Now()
defer func() {
pj.job.Finished = time.Now()
if err != nil {
pj.SetStatus(api.StatusFailure)
} else {
pj.SetStatus(api.StatusSuccess)
}
}()
var tmp string
tmp, err = os.MkdirTemp(dotfiles.Dir("tmp"), "")
if err != nil {
pj.log.Error().Err(err).Msg("mktemp")
return
}
defer os.RemoveAll(tmp)
tmprepo := filepath.Join(tmp, "repo")
pj.log.Debug().Msgf("temp repo: %v", tmprepo)
remote := fmt.Sprintf("git://localhost:9418/%s", pj.job.RepoName)
pj.log.Debug().Msgf("remote: %v", remote)
var raw []byte
cmd := exec.Command("git", "clone", remote, tmprepo)
raw, err = cmd.CombinedOutput()
pj.log.Trace().Msgf("%v\n%v", cmd, string(raw))
if err != nil {
pj.log.Error().Msgf("%v\n%v", cmd, string(raw))
return
}
cmd = exec.Command("git", "checkout", pj.job.Commit)
cmd.Dir = tmprepo
raw, err = cmd.CombinedOutput()
pj.log.Trace().Msgf("%v\n%v", cmd, string(raw))
if err != nil {
pj.log.Error().Msgf("%v\n%v", cmd, string(raw))
return
}
pj.job.Artifact.SourcePath = tmprepo
var result *artifact.Result
var dq *qemu.System
pj.job.Artifact.Process(pj.log, pj.job.Target, false, "", "", 0,
func(q *qemu.System, ka artifact.Artifact, ki distro.KernelInfo,
res *artifact.Result) {
result = res
dq = q
},
)
logdir := dotfiles.Dir("daemon/logs", pj.job.UUID)
err = os.WriteFile(filepath.Join(logdir, "build.log"),
[]byte(result.Build.Output), 0644)
if err != nil {
pj.log.Error().Err(err).Msg("")
}
err = os.WriteFile(filepath.Join(logdir, "run.log"),
[]byte(result.Run.Output), 0644)
if err != nil {
pj.log.Error().Err(err).Msg("")
}
err = os.WriteFile(filepath.Join(logdir, "test.log"),
[]byte(result.Test.Output), 0644)
if err != nil {
pj.log.Error().Err(err).Msg("")
}
err = os.WriteFile(filepath.Join(logdir, "qemu.log"),
[]byte(dq.Stdout), 0644)
if err != nil {
pj.log.Error().Err(err).Msg("")
}
pj.log.Info().Msgf("build %v, run %v, test %v",
result.Build.Ok, result.Run.Ok, result.Test.Ok)
if !result.Test.Ok {
err = errors.New("tests failed")
}
return
}

206
daemon/resources.go Normal file
View File

@ -0,0 +1,206 @@
package daemon
import (
"errors"
"runtime"
"sync"
"syscall"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/api"
)
type Resources struct {
initialized bool
CPU *CPUResource
RAM *RAMResources
}
func NewResources() (r *Resources) {
r = &Resources{}
r.CPU = NewCPUResources()
r.RAM = NewRAMResources()
r.initialized = true
return
}
func (r *Resources) Allocate(job api.Job) (err error) {
if !r.initialized {
err = errors.New("resources not initialized")
return
}
if job.Artifact.Qemu.Cpus == 0 {
err = errors.New("no cpus requested")
return
}
if job.Artifact.Qemu.Memory == 0 {
err = errors.New("no memory requested")
return
}
origRam := r.RAM.GetSpent()
origCPU := r.CPU.GetSpent()
err = r.CPU.Allocate(job.Artifact.Qemu.Cpus)
if err != nil {
return
}
err = r.RAM.Allocate(job.Artifact.Qemu.Memory)
if err != nil {
r.CPU.Release(job.Artifact.Qemu.Cpus)
return
}
log.Debug().Msgf("allocated %d cpus, %d MB ram",
r.CPU.GetSpent()-origCPU,
r.RAM.GetSpent()-origRam)
return
}
func (r *Resources) Release(job api.Job) {
if !r.initialized {
log.Error().Msg("resources not initialized")
return
}
r.CPU.Release(job.Artifact.Qemu.Cpus)
r.RAM.Release(job.Artifact.Qemu.Memory)
log.Debug().Msgf("released %d cpus, %d MB ram",
job.Artifact.Qemu.Cpus,
job.Artifact.Qemu.Memory)
}
type CPUResource struct {
num int
overcommit float64
mu *sync.Mutex
spent int
}
const (
Allocation = iota
Release
)
func NewCPUResources() (cpur *CPUResource) {
cpur = &CPUResource{}
cpur.mu = &sync.Mutex{}
cpur.num = runtime.NumCPU()
cpur.overcommit = 1
log.Debug().Msgf("total cpus: %d", cpur.num)
return
}
func (cpur *CPUResource) SetOvercommit(oc float64) {
log.Info().Int("cpus", cpur.num).
Int("result", int(float64(cpur.num)*oc)).
Msgf("%.02f", oc)
cpur.overcommit = oc
}
func (cpur *CPUResource) GetSpent() int {
cpur.mu.Lock()
defer cpur.mu.Unlock()
return cpur.spent
}
var ErrNotEnoughCpu = errors.New("not enough cpu")
func (cpur *CPUResource) Allocate(cpu int) (err error) {
cpur.mu.Lock()
defer cpur.mu.Unlock()
if cpur.spent+cpu > int(float64(cpur.num)*cpur.overcommit) {
err = ErrNotEnoughCpu
return
}
cpur.spent += cpu
return
}
func (cpur *CPUResource) Release(cpu int) (err error) {
cpur.mu.Lock()
defer cpur.mu.Unlock()
if cpur.spent < cpu {
err = ErrFreeingMoreThanAllocated
return
}
cpur.spent -= cpu
return
}
type RAMResources struct {
mb int
overcommit float64
mu *sync.Mutex
spent int
}
func NewRAMResources() (ramr *RAMResources) {
ramr = &RAMResources{}
ramr.mu = &sync.Mutex{}
ramr.overcommit = 1
var info syscall.Sysinfo_t
syscall.Sysinfo(&info)
ramr.mb = int(info.Totalram / 1024 / 1024)
log.Debug().Msgf("total ram: %d MB", ramr.mb)
return
}
func (ramr *RAMResources) SetOvercommit(oc float64) {
log.Info().Int("ram", ramr.mb).
Int("result", int(float64(ramr.mb)*oc)).
Msgf("%.02f", oc)
ramr.overcommit = oc
}
func (ramr RAMResources) GetSpent() int {
ramr.mu.Lock()
defer ramr.mu.Unlock()
return ramr.spent
}
var ErrNotEnoughRam = errors.New("not enough ram")
func (ramr *RAMResources) Allocate(mb int) (err error) {
ramr.mu.Lock()
defer ramr.mu.Unlock()
ocmem := int(float64(ramr.mb) * ramr.overcommit)
if mb > ocmem-ramr.spent {
err = ErrNotEnoughRam
return
}
ramr.spent += mb
return
}
var ErrFreeingMoreThanAllocated = errors.New("freeing more than allocated")
func (ramr *RAMResources) Release(mb int) (err error) {
ramr.mu.Lock()
defer ramr.mu.Unlock()
if ramr.spent < mb {
err = ErrFreeingMoreThanAllocated
return
}
ramr.spent -= mb
return
}

39
default.nix Normal file
View File

@ -0,0 +1,39 @@
{ pkgs ? (
let
inherit (builtins) fetchTree fromJSON readFile;
inherit ((fromJSON (readFile ./flake.lock)).nodes) nixpkgs gomod2nix;
in
import (fetchTree nixpkgs.locked) {
overlays = [
(import "${fetchTree gomod2nix.locked}/overlay.nix")
];
}
)
, lib
, version
}:
pkgs.buildGoApplication rec {
pname = "out-of-tree";
inherit version;
nativeBuildInputs = [ pkgs.makeWrapper ];
src = ./.;
pwd = ./.;
doCheck = false;
postFixup = ''
wrapProgram $out/bin/out-of-tree \
--prefix PATH : "${lib.makeBinPath [ pkgs.qemu pkgs.podman pkgs.openssl ]}"
'';
meta = with lib; {
description = "kernel {module, exploit} development tool";
homepage = "https://out-of-tree.io";
maintainers = [ maintainers.dump_stack ];
license = licenses.agpl3Plus;
};
}

217
distro/centos/centos.go Normal file
View File

@ -0,0 +1,217 @@
package centos
import (
"fmt"
"strings"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
"code.dumpstack.io/tools/out-of-tree/container"
"code.dumpstack.io/tools/out-of-tree/distro"
)
func init() {
releases := []string{"6", "7", "8"}
for _, release := range releases {
distro.Register(CentOS{release: release})
}
}
type CentOS struct {
release string
}
func (centos CentOS) Equal(d distro.Distro) bool {
return centos.release == d.Release && distro.CentOS == d.ID
}
func (centos CentOS) Distro() distro.Distro {
return distro.Distro{ID: distro.CentOS, Release: centos.release}
}
func (centos CentOS) Packages() (pkgs []string, err error) {
c, err := container.New(centos.Distro())
if err != nil {
return
}
if !c.Exist() {
err = c.Build("centos:"+centos.release,
centos.envs(), centos.runs())
if err != nil {
return
}
}
cmd := "yum search kernel --showduplicates 2>/dev/null " +
"| grep '^kernel-[0-9]' " +
"| grep -v src " +
"| cut -d ' ' -f 1"
output, err := c.Run(dotfiles.Dir("tmp"), []string{cmd})
if err != nil {
return
}
pkgs = append(pkgs, strings.Fields(output)...)
return
}
func (centos CentOS) Kernels() (kernels []distro.KernelInfo, err error) {
c, err := container.New(centos.Distro())
if err != nil {
return
}
return c.Kernels()
}
func (centos CentOS) envs() (envs []string) {
return
}
func (centos CentOS) runs() (commands []string) {
cmdf := func(f string, s ...interface{}) {
commands = append(commands, fmt.Sprintf(f, s...))
}
var repos []string
// TODO refactor
switch centos.release {
case "6":
repofmt := "[6.%d-%s]\\nbaseurl=https://vault.centos.org/6.%d/%s/$basearch/\\ngpgcheck=0"
for i := 0; i <= 10; i++ {
repos = append(repos, fmt.Sprintf(repofmt, i, "os", i, "os"))
repos = append(repos, fmt.Sprintf(repofmt, i, "updates", i, "updates"))
}
cmdf("rm /etc/yum.repos.d/*")
case "7":
repofmt := "[%s-%s]\\nbaseurl=https://vault.centos.org/%s/%s/$basearch/\\ngpgcheck=0"
for _, ver := range []string{
"7.0.1406", "7.1.1503", "7.2.1511",
"7.3.1611", "7.4.1708", "7.5.1804",
"7.6.1810", "7.7.1908", "7.8.2003",
} {
repos = append(repos, fmt.Sprintf(repofmt, ver, "os", ver, "os"))
repos = append(repos, fmt.Sprintf(repofmt, ver, "updates", ver, "updates"))
}
// FIXME http/gpgcheck=0
repofmt = "[%s-%s]\\nbaseurl=http://mirror.centos.org/centos-7/%s/%s/$basearch/\\ngpgcheck=0"
repos = append(repos, fmt.Sprintf(repofmt, "7.9.2009", "os", "7.9.2009", "os"))
repos = append(repos, fmt.Sprintf(repofmt, "7.9.2009", "updates", "7.9.2009", "updates"))
case "8":
repofmt := "[%s-%s]\\nbaseurl=https://vault.centos.org/%s/%s/$basearch/os/\\ngpgcheck=0"
for _, ver := range []string{
"8.0.1905", "8.1.1911", "8.2.2004",
"8.3.2011", "8.4.2105", "8.5.2111",
} {
repos = append(repos, fmt.Sprintf(repofmt, ver, "baseos", ver, "BaseOS"))
repos = append(repos, fmt.Sprintf(repofmt, ver, "appstream", ver, "AppStream"))
}
default:
log.Fatal().Msgf("no support for centos %s", centos.release)
return
}
cmdf("sed -i 's/enabled=1/enabled=0/' /etc/yum.repos.d/* || true")
for _, repo := range repos {
cmdf("echo -e '%s' >> /etc/yum.repos.d/oot.repo\n", repo)
}
// do not remove old kernels
cmdf("sed -i 's;installonly_limit=;installonly_limit=100500;' /etc/yum.conf")
cmdf("yum -y update")
cmdf("yum -y groupinstall 'Development Tools'")
// TODO do not use lexicographical comparison, change to parse int
if centos.release <= "6" {
cmdf("yum -y install kernel-firmware")
} else {
cmdf("yum -y install linux-firmware")
}
if centos.release < "8" {
cmdf("yum -y install deltarpm")
} else {
cmdf("yum -y install grub2-tools-minimal elfutils-libelf-devel")
}
var flags string
if centos.release >= "8" {
flags = "--noautoremove"
}
// Install and remove a single kernel and headers.
// This ensures that all dependencies are cached.
cmd := "export HEADERS=$(yum search kernel-devel --showduplicates " +
"| grep '^kernel-devel' | cut -d ' ' -f 1 | head -n 1)"
cmd += " KERNEL=$(echo $HEADERS | sed 's/-devel//')"
cmd += " MODULES=$(echo $HEADERS | sed 's/-devel/-modules/')"
cmd += " CORE=$(echo $HEADERS | sed 's/-devel/-core/')"
cmd += " && yum -y install $KERNEL $HEADERS"
cmd += " && yum -y remove %s $KERNEL $HEADERS $MODULES $CORE"
cmdf(cmd, flags)
return
}
func (centos CentOS) Install(pkgname string, headers bool) (err error) {
var headerspkg string
if headers {
headerspkg = strings.Replace(pkgname, "kernel", "kernel-devel", -1)
}
var commands []string
cmdf := func(f string, s ...interface{}) {
commands = append(commands, fmt.Sprintf(f, s...))
}
cmdf("yum -y install %s %s", pkgname, headerspkg)
version := strings.Replace(pkgname, "kernel-", "", -1)
if centos.release <= "7" {
cmdf("dracut -v --add-drivers 'e1000 ext4' -f "+
"/boot/initramfs-%s.img %s", version, version)
} else {
cmdf("dracut -v --add-drivers 'ata_piix libata' "+
"--force-drivers 'e1000 ext4 sd_mod' -f "+
"/boot/initramfs-%s.img %s", version, version)
}
cmdf("cp -r /boot /target/")
cmdf("cp -r /lib/modules /target/lib/")
cmdf("cp -r /usr/src /target/usr/")
c, err := container.New(centos.Distro())
if err != nil {
return
}
for i := range c.Volumes {
c.Volumes[i].Dest = "/target" + c.Volumes[i].Dest
}
_, err = c.Run("", commands)
if err != nil {
return
}
return
}
func (centos CentOS) RootFS() string {
return fmt.Sprintf("out_of_tree_centos_%s.img", centos.release)
}

View File

@ -0,0 +1,19 @@
package centos
import (
"testing"
"github.com/stretchr/testify/assert"
"code.dumpstack.io/tools/out-of-tree/distro"
)
func TestCentOS(t *testing.T) {
assert := assert.New(t)
u := CentOS{release: "7"}
assert.True(u.Equal(distro.Distro{Release: "7", ID: distro.CentOS}))
assert.NotEmpty(u.Packages())
}

53
distro/debian/cache.go Normal file
View File

@ -0,0 +1,53 @@
package debian
import (
"errors"
"sync"
"github.com/rapidloop/skv"
)
type Cache struct {
store *skv.KVStore
}
// cache is not thread-safe, so make sure there are only one user
var mu sync.Mutex
func NewCache(path string) (c *Cache, err error) {
mu.Lock()
c = &Cache{}
c.store, err = skv.Open(path)
return
}
func (c Cache) Put(p []DebianKernel) error {
if len(p) == 0 {
return errors.New("empty slice")
}
return c.store.Put(p[0].Version.Package, p)
}
func (c Cache) Get(version string) (p []DebianKernel, err error) {
err = c.store.Get(version, &p)
if len(p) == 0 {
err = skv.ErrNotFound
}
return
}
func (c Cache) PutVersions(versions []string) error {
return c.store.Put("versions", versions)
}
func (c Cache) GetVersions() (versions []string, err error) {
err = c.store.Get("versions", &versions)
return
}
func (c Cache) Close() (err error) {
err = c.store.Close()
mu.Unlock()
return
}

106
distro/debian/cache_test.go Normal file
View File

@ -0,0 +1,106 @@
package debian
import (
"os"
"path/filepath"
"testing"
"github.com/rapidloop/skv"
"code.dumpstack.io/tools/out-of-tree/distro/debian/snapshot"
)
func TestCache(t *testing.T) {
dir, err := os.MkdirTemp("", "out-of-tree_cache_test_")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
path := filepath.Join(dir, "debian.cache")
c, err := NewCache(path)
if err != nil {
t.Fatal(err)
}
image := snapshot.Package{}
image.Deb.Hash = "12345"
version := "4.17.14-1"
dk := DebianKernel{
Version: DebianKernelVersion{Package: version},
Image: image,
}
err = c.Put([]DebianKernel{dk})
if err != nil {
t.Fatal(err)
}
dk2s, err := c.Get(version)
if err != nil {
t.Fatal(err)
}
dk2 := dk2s[0]
if dk.Image.Deb.Hash != dk2.Image.Deb.Hash {
t.Fatalf("mismatch")
}
c.Close()
c, err = NewCache(path)
if err != nil {
t.Fatal(err)
}
defer c.Close()
dk3s, err := c.Get(version)
if err != nil {
t.Fatal(err)
}
dk3 := dk3s[0]
if dk.Image.Deb.Hash != dk3.Image.Deb.Hash {
t.Fatalf("mismatch")
}
_, err = c.Get("key not exist")
if err == nil || err != skv.ErrNotFound {
t.Fatal(err)
}
}
func TestVersionsCache(t *testing.T) {
dir, err := os.MkdirTemp("", "out-of-tree_cache_test_")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
path := filepath.Join(dir, "debian.cache")
c, err := NewCache(path)
if err != nil {
t.Fatal(err)
}
defer c.Close()
versions := []string{"a", "b", "c"}
err = c.PutVersions(versions)
if err != nil {
t.Fatal(err)
}
result, err := c.GetVersions()
if err != nil {
t.Fatal(err)
}
if len(versions) != len(result) {
t.Fatal("mismatch")
}
}

535
distro/debian/debian.go Normal file
View File

@ -0,0 +1,535 @@
package debian
import (
"fmt"
"os"
"path"
"path/filepath"
"strings"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/cache"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
"code.dumpstack.io/tools/out-of-tree/container"
"code.dumpstack.io/tools/out-of-tree/distro"
"code.dumpstack.io/tools/out-of-tree/distro/debian/snapshot"
"code.dumpstack.io/tools/out-of-tree/fs"
)
func init() {
releases := []Release{
Wheezy,
Jessie,
Stretch,
Buster,
Bullseye,
Bookworm,
}
for _, release := range releases {
distro.Register(Debian{release: release})
}
}
type Debian struct {
release Release
}
func (d Debian) Equal(dd distro.Distro) bool {
if dd.ID != distro.Debian {
return false
}
return ReleaseFromString(dd.Release) == d.release
}
func (d Debian) Distro() distro.Distro {
return distro.Distro{distro.Debian, d.release.String()}
}
func (d Debian) Packages() (packages []string, err error) {
c, err := container.New(d.Distro())
if err != nil {
return
}
if !c.Exist() {
err = c.Build(d.image(), d.envs(), d.runs())
if err != nil {
return
}
}
kernels, err := GetKernels()
if err != nil {
log.Error().Err(err).Msg("get kernels")
return
}
for _, dk := range kernels {
if d.release != dk.Release {
continue
}
version := kver(dk.Version.Package)
// filter out pre-release kernels
switch dk.Release {
case Wheezy:
if version.LessThan(kver("3.2-rc0")) {
continue
}
case Jessie:
if version.LessThan(kver("3.16-rc0")) {
continue
}
case Stretch:
if version.LessThan(kver("4.9-rc0")) {
continue
}
case Buster:
if version.LessThan(kver("4.19-rc0")) {
continue
}
case Bullseye:
if version.LessThan(kver("5.10-rc0")) {
continue
}
case Bookworm:
if version.LessThan(kver("6.1-rc0")) {
continue
}
}
p := dk.Image.Deb.Name[:len(dk.Image.Deb.Name)-4] // w/o .deb
packages = append(packages, p)
}
return
}
type Release int
const (
None Release = iota
Buzz
Hamm
Woody
Etch
Lenny
Squeeze
Wheezy
Jessie
Stretch
Buster
Bullseye
Bookworm
)
var ReleaseStrings = [...]string{
"",
"buzz",
"hamm",
"woody",
"etch",
"lenny",
"squeeze",
"wheezy",
"jessie",
"stretch",
"buster",
"bullseye",
"bookworm",
}
func (cn Release) Name() string {
return ReleaseStrings[cn]
}
func (cn Release) String() string {
return fmt.Sprintf("%d", cn)
}
func ReleaseFromString(s string) (r Release) {
switch strings.ToLower(s) {
case "1", "buzz":
r = Buzz
case "2", "hamm":
r = Hamm
case "3", "woody":
r = Woody
case "4", "etch":
r = Etch
case "5", "lenny":
r = Lenny
case "6", "squeeze":
r = Squeeze
case "7", "wheezy":
r = Wheezy
case "8", "jessie":
r = Jessie
case "9", "stretch":
r = Stretch
case "10", "buster":
r = Buster
case "11", "bullseye":
r = Bullseye
case "12", "bookworm":
r = Bookworm
default:
r = None
}
return
}
func (d Debian) envs() (envs []string) {
envs = append(envs, "DEBIAN_FRONTEND=noninteractive")
return
}
func (d Debian) image() (image string) {
image += "debian:"
switch d.release {
case Wheezy:
image += "wheezy-20190228"
case Jessie:
image += "jessie-20210326"
case Stretch:
image += "stretch-20220622"
default:
image += d.release.Name()
}
return
}
func repositories(release Release) (repos []string) {
var snapshot string
switch release {
// Latest snapshots that include release
case Wheezy:
// doesn't include snapshot repos in /etc/apt/source.list
snapshot = "20190321T212815Z"
case Jessie:
snapshot = "20230322T152120Z"
case Stretch:
snapshot = "20230423T032533Z"
default:
return
}
repo := func(archive, s string) {
format := "deb [check-valid-until=no trusted=yes] " +
"http://snapshot.debian.org/archive/%s/%s " +
"%s%s main"
r := fmt.Sprintf(format, archive, snapshot, release.Name(), s)
repos = append(repos, r)
}
repo("debian", "")
repo("debian", "-updates")
if release <= 7 {
repo("debian", "-backports")
}
repo("debian-security", "/updates")
return
}
func (d Debian) runs() (commands []string) {
cmdf := func(f string, s ...interface{}) {
commands = append(commands, fmt.Sprintf(f, s...))
}
repos := repositories(d.release)
if len(repos) != 0 {
cmdf("rm /etc/apt/sources.list")
for _, repo := range repos {
cmdf("echo '%s' >> /etc/apt/sources.list", repo)
}
} else {
cmdf("apt-get update || sed -i " +
"-e '/snapshot/!d' " +
"-e 's/# deb/deb [check-valid-until=no trusted=yes]/' " +
"/etc/apt/sources.list")
}
cmdf("apt-get update || apt-get update || apt-get update")
pkglist := []string{
"wget", "build-essential", "libelf-dev", "git",
"kmod", "linux-base", "libssl-dev",
"firmware-linux-free",
"libxml2", "libglib2.0.0", "irqbalance", "libcap-ng0",
"libnuma1", "sgml-base", "shared-mime-info", "xdg-user-dirs",
"xml-core", "python3",
}
gccs := "'^(gcc-[0-9].[0-9]|gcc-[0-9]|gcc-[1-9][0-9])$'"
pkglist = append(pkglist, gccs)
if d.release >= 8 {
pkglist = append(pkglist, "initramfs-tools")
} else {
// by default Debian backports repositories have a lower
// priority than stable, so we should specify it manually
cmdf("apt-get -y install -t %s-backports "+
"initramfs-tools", d.release.Name())
}
if d.release >= 9 {
pkglist = append(pkglist, "apparmor")
}
if d.release < 9 {
pkglist = append(pkglist, "module-init-tools")
}
var packages string
for _, pkg := range pkglist {
packages += fmt.Sprintf("%s ", pkg)
}
cmdf("timeout 5m apt-get install -y %s "+
"|| timeout 10m apt-get install -y %s "+
"|| apt-get install -y %s", packages, packages, packages)
if d.release == Wheezy {
// We need newer libc for deb8*~bpo70+1
format := "deb [check-valid-until=no trusted=yes] " +
"http://snapshot.debian.org/archive/debian/%s " +
"jessie main"
// Keep it here not in repos to have apt-priority close
repo := fmt.Sprintf(format, "20190321T212815Z")
cmdf("echo '%s' >> /etc/apt/sources.list", repo)
cmdf("echo 'Package: *' >> /etc/apt/preferences.d/jessie")
cmdf("echo 'Pin: release a=jessie' >> /etc/apt/preferences.d/jessie")
cmdf("echo 'Pin-Priority: 10' >> /etc/apt/preferences.d/jessie")
cmdf("apt-get -y update")
// glibc guarantee backwards compatibility, so should be no problem
cmdf("apt-get -y install -t jessie libc6-dev")
}
cmdf("mkdir -p /lib/modules")
return
}
func (d Debian) Kernels() (kernels []distro.KernelInfo, err error) {
c, err := container.New(d.Distro())
if err != nil {
return
}
if !c.Exist() {
return
}
cpath := dotfiles.Dir("volumes", c.Name())
rootfs := dotfiles.File("images", c.Name()+".img")
files, err := os.ReadDir(cpath)
if err != nil {
return
}
for _, file := range files {
if !strings.Contains(file.Name(), "linux-image") {
continue
}
pkgname := file.Name()
kpkgdir := filepath.Join(cpath, pkgname)
bootdir := filepath.Join(kpkgdir, "boot")
vmlinuz, err := fs.FindBySubstring(bootdir, "vmlinuz")
if err != nil {
log.Warn().Msgf("cannot find vmlinuz for %s", pkgname)
continue
}
initrd, err := fs.FindBySubstring(bootdir, "initrd")
if err != nil {
log.Warn().Msgf("cannot find initrd for %s", pkgname)
continue
}
modulesdir := filepath.Join(kpkgdir, "lib/modules")
modules, err := fs.FindBySubstring(modulesdir, "")
if err != nil {
log.Warn().Msgf("cannot find modules for %s", pkgname)
continue
}
log.Debug().Msgf("%s %s %s", vmlinuz, initrd, modules)
release := strings.Replace(pkgname, "linux-image-", "", -1)
ki := distro.KernelInfo{
Distro: d.Distro(),
KernelVersion: path.Base(modules),
KernelRelease: release,
ContainerName: c.Name(),
KernelPath: vmlinuz,
InitrdPath: initrd,
ModulesPath: modules,
RootFS: rootfs,
Package: pkgname,
}
smapBlocklist := []string{
"3.10.5-1~bpo70+1",
"3.10.11-1~bpo70+1",
"3.9.6-1~bpo70+1",
}
for _, ver := range smapBlocklist {
if strings.Contains(release, ver) {
ki.CPU.Flags = append(ki.CPU.Flags, "smap=off")
}
}
kernels = append(kernels, ki)
}
return
}
func (d Debian) volumes(pkgname string) (volumes []container.Volume) {
c, err := container.New(d.Distro())
if err != nil {
return
}
pkgdir := filepath.Join("volumes", c.Name(), pkgname)
volumes = append(volumes, container.Volume{
Src: dotfiles.Dir(pkgdir, "/lib/modules"),
Dest: "/lib/modules",
})
volumes = append(volumes, container.Volume{
Src: dotfiles.Dir(pkgdir, "/usr/src"),
Dest: "/usr/src",
})
volumes = append(volumes, container.Volume{
Src: dotfiles.Dir(pkgdir, "/boot"),
Dest: "/boot",
})
return
}
func (d Debian) Install(pkgname string, headers bool) (err error) {
defer func() {
if err != nil {
d.cleanup(pkgname)
}
}()
dk, err := getCachedKernel(pkgname + ".deb")
if err != nil {
return
}
var pkgs []snapshot.Package
if headers {
pkgs = dk.Packages()
} else {
pkgs = []snapshot.Package{dk.Image}
}
var commands []string
cmdf := func(f string, s ...interface{}) {
commands = append(commands, fmt.Sprintf(f, s...))
}
for _, pkg := range pkgs {
found, newurl := cache.PackageURL(
distro.Debian,
pkg.Deb.URL,
)
if found {
log.Debug().Msgf("cached deb found %s", newurl)
pkg.Deb.URL = newurl
}
// TODO use faketime on old releases?
pkg.Deb.URL = strings.Replace(pkg.Deb.URL, "https", "http", -1)
cmdf("wget --no-verbose " +
"--timeout=10 --waitretry=1 --tries=10 " +
"--no-check-certificate " + pkg.Deb.URL)
}
// prepare local repository
cmdf("mkdir debs && mv *.deb debs/")
cmdf("dpkg-scanpackages debs /dev/null | gzip > debs/Packages.gz")
cmdf(`echo "deb [trusted=yes] file:$(pwd) debs/" >> /etc/apt/sources.list.d/local.list`)
cmdf("apt-get update -o Dir::Etc::sourcelist='sources.list.d/local.list' -o Dir::Etc::sourceparts='-' -o APT::Get::List-Cleanup='0'")
// make sure apt-get will not download the repo version
cmdf("echo 'Package: *' >> /etc/apt/preferences.d/pin")
cmdf(`echo 'Pin: origin "*.debian.org"' >> /etc/apt/preferences.d/pin`)
cmdf("echo 'Pin-Priority: 100' >> /etc/apt/preferences.d/pin")
// cut package names and install
cmdf("ls debs | grep deb | cut -d '_' -f 1 | " +
"xargs apt-get -y --force-yes install")
// for debug
cmdf("ls debs | grep deb | cut -d '_' -f 1 | xargs apt-cache policy")
c, err := container.New(d.Distro())
if err != nil {
return
}
c.Volumes = d.volumes(pkgname)
for i := range c.Volumes {
c.Volumes[i].Dest = "/target" + c.Volumes[i].Dest
}
cmdf("cp -r /boot /target/")
cmdf("cp -r /lib/modules /target/lib/")
cmdf("cp -rL /usr/src /target/usr/")
_, err = c.Run("", commands)
if err != nil {
return
}
return
}
func (d Debian) cleanup(pkgname string) {
c, err := container.New(d.Distro())
if err != nil {
return
}
pkgdir := dotfiles.Dir(filepath.Join("volumes", c.Name(), pkgname))
log.Debug().Msgf("cleanup %s", pkgdir)
err = os.RemoveAll(pkgdir)
if err != nil {
log.Warn().Err(err).Msg("cleanup")
}
}
func (d Debian) RootFS() string {
return fmt.Sprintf("out_of_tree_debian_%s.img", d.release.String())
}

View File

@ -0,0 +1,24 @@
package debian
import (
"os"
"testing"
"github.com/stretchr/testify/assert"
"code.dumpstack.io/tools/out-of-tree/distro"
)
func TestDebian(t *testing.T) {
assert := assert.New(t)
u := Debian{release: Wheezy}
assert.True(u.Equal(distro.Distro{Release: "wheezy", ID: distro.Debian}))
if os.Getenv("CI") != "" {
t.Skip("skip testing in CI")
}
assert.NotEmpty(u.Packages())
}

467
distro/debian/kernel.go Normal file
View File

@ -0,0 +1,467 @@
package debian
import (
"errors"
"math"
"strings"
"time"
"github.com/Masterminds/semver"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/cache"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
"code.dumpstack.io/tools/out-of-tree/distro/debian/snapshot"
"code.dumpstack.io/tools/out-of-tree/distro/debian/snapshot/metasnap"
"code.dumpstack.io/tools/out-of-tree/fs"
)
type DebianKernelVersion struct {
// linux-headers-4.17.0-2-amd64_4.17.14-1_amd64.deb
// Package version, e.g. "4.17.14-1"
// See tags in https://salsa.debian.org/kernel-team/linux
Package string
// ABI version, e.g. "4.17.0-2"
ABI string
}
func ParseKernelVersion(pkg string) (dkv DebianKernelVersion, err error) {
// -> 4.11.0-trunk-amd64_4.11-1~exp2_amd64.deb
pkg = strings.Replace(pkg, "linux-image-", "", -1)
// -> [4.11.0-trunk-amd64 4.11-1~exp2 amd64.deb]
fields := strings.Split(pkg, "_")
if len(fields) != 3 {
err = errors.New("incorrect input format")
return
}
// 4.11.0-trunk-amd64 -> 4.11.0-trunk
// TODO other archs?
dkv.ABI = strings.Split(fields[0], "-amd64")[0]
if dkv.ABI == "" {
err = errors.New("incorrect input format")
return
}
dkv.Package = fields[1]
if dkv.Package == "" {
err = errors.New("incorrect input format")
return
}
return
}
type DebianKernel struct {
Version DebianKernelVersion
Image snapshot.Package
Headers []snapshot.Package
Dependencies []snapshot.Package
// FIXME There is a better way
Internal struct {
Invalid bool
LastFetch time.Time
}
Release Release
}
func (dk DebianKernel) HasDependency(pkgname string) bool {
for _, deppkg := range dk.Dependencies {
if strings.Contains(deppkg.Name, pkgname) {
return true
}
}
return false
}
func (dk DebianKernel) Packages() (pkgs []snapshot.Package) {
pkgs = append(pkgs, dk.Image)
pkgs = append(pkgs, dk.Headers...)
pkgs = append(pkgs, dk.Dependencies...)
return
}
// use only for inline comparison
func kver(ver string) *semver.Version {
ver = strings.Replace(ver, "~", "-", -1)
ver = strings.Replace(ver, "+", "-", -1)
return semver.MustParse(ver)
}
var (
ErrNoBinaryPackages = errors.New("no binary packages found")
ErrNoHeadersPackage = errors.New("no headers package found")
ErrNoImagePackage = errors.New("no image package found")
)
func getDebianKernel(version string) (dk DebianKernel, err error) {
flog := log.With().
Str("version", version).
Logger()
dk.Version.Package = version
regex := `^(linux-(image|headers)-[a-z+~0-9\.\-]*-(common|amd64|amd64-unsigned)|linux-kbuild-.*|linux-compiler-.*-x86)$`
filter := []string{
"rt-amd64",
"cloud-amd64",
"all-amd64",
"dbg",
}
packages, err := snapshot.Packages("linux", version, regex,
[]string{"amd64", "all"}, filter)
if err != nil {
return
}
if len(packages) == 0 {
err = ErrNoBinaryPackages
return
}
var imageFound, headersFound bool
for _, p := range packages {
if strings.Contains(p.Name, "image") {
imageFound = true
dk.Image = p
} else if strings.Contains(p.Name, "headers") {
headersFound = true
dk.Headers = append(dk.Headers, p)
} else {
dk.Dependencies = append(dk.Dependencies, p)
}
}
if !imageFound {
err = ErrNoImagePackage
return
}
if !headersFound {
err = ErrNoHeadersPackage
return
}
s := strings.Replace(dk.Image.Name, "linux-image-", "", -1)
dk.Version.ABI = strings.Replace(s, "-amd64", "", -1)
dk.Release = getRelease(dk.Image)
if dk.Release == None {
flog.Warn().Msg("release not found")
} else {
flog.Debug().Msgf("release is %s", dk.Release.Name())
}
return
}
func getRelease(p snapshot.Package) Release {
repos, err := metasnap.GetRepos(p.Repo.Archive, p.Name, p.Arch, p.Version)
if err != nil {
log.Debug().Err(err).Msg("metasnap")
return None
}
for _, repo := range repos {
for _, rel := range ReleaseStrings[1:] {
switch repo.Suite {
case rel, rel + "-backports",
rel + "-updates",
rel + "-proposed-updates":
return ReleaseFromString(rel)
}
}
}
return None
}
// GetCachedKernel by deb package name
func getCachedKernel(deb string) (dk DebianKernel, err error) {
c, err := NewCache(CachePath)
if err != nil {
log.Error().Err(err).Msg("cache")
return
}
defer c.Close()
versions, err := c.GetVersions()
if err != nil {
log.Error().Err(err).Msg("get source package versions from cache")
return
}
for _, version := range versions {
var tmpdks []DebianKernel
tmpdks, err = c.Get(version)
if err != nil {
continue
}
tmpdk := tmpdks[0]
if deb == tmpdk.Image.Deb.Name {
dk = tmpdk
return
}
for _, h := range tmpdk.Headers {
if deb == h.Deb.Name {
dk = tmpdk
return
}
}
}
return
}
func kbuildVersion(versions []string, kpkgver string) string {
for _, v := range versions {
if v == kpkgver {
return v
}
}
ver := kver(kpkgver)
// Not able to find the exact version, try similar
for _, v := range versions {
cver := kver(v)
// It's certainly not fit for purpose if the major and
// minor versions aren't the same
if ver.Major() != cver.Major() {
continue
}
if ver.Minor() != cver.Minor() {
continue
}
return v
}
return ""
}
func findKbuild(versions []string, kpkgver string) (
pkg snapshot.Package, err error) {
version := kbuildVersion(versions, kpkgver)
if version == "" {
err = errors.New("cannot find kbuild version")
return
}
packages, err := snapshot.Packages("linux-tools", version,
`^linux-kbuild`, []string{"amd64"}, []string{"dbg"})
if err != nil {
return
}
if len(packages) == 0 {
err = errors.New("cannot find kbuild package")
}
pkg = packages[0]
return
}
func updateKbuild(toolsVersions []string, dk *DebianKernel) {
if !kver(dk.Version.Package).LessThan(kver("4.5-rc0")) {
return
}
var deps []snapshot.Package
for _, pkg := range dk.Dependencies {
if strings.Contains(pkg.Name, "kbuild") {
continue
}
deps = append(deps, pkg)
}
dk.Dependencies = deps
kbuildpkg, err := findKbuild(toolsVersions, dk.Version.Package)
if err != nil {
dk.Internal.Invalid = true
return
}
dk.Dependencies = append(dk.Dependencies, kbuildpkg)
}
func getKernelsByVersion(slog zerolog.Logger, c *Cache, toolsVersions []string,
version string, mode GetKernelsMode) (kernels []DebianKernel,
fromcache bool) {
var dk DebianKernel
dks, err := c.Get(version)
if err == nil {
dk = dks[0]
if !dk.Internal.Invalid {
// TODO refactor
slog.Trace().Msgf("found in cache")
if dk.Release == None && mode&UpdateRelease != 0 {
slog.Debug().Msg("update release")
dk.Release = getRelease(dk.Image)
if dk.Release != None {
slog.Debug().Msg("update cache")
err = c.Put([]DebianKernel{dk})
if err != nil {
slog.Error().Err(err).Msg("")
return
}
}
}
if mode&UpdateKbuild != 0 {
slog.Debug().Msg("update kbuild")
updateKbuild(toolsVersions, &dk)
slog.Debug().Msg("update cache")
err = c.Put([]DebianKernel{dk})
if err != nil {
slog.Error().Err(err).Msg("")
return
}
}
kernels = append(kernels, dk)
fromcache = true
return
}
}
if dk.Internal.Invalid {
refetch := dk.Internal.LastFetch.AddDate(0, 0, RefetchDays)
if refetch.After(time.Now()) {
slog.Trace().Msgf("refetch at %v", refetch)
return
}
}
dk, err = getDebianKernel(version)
if err != nil {
if err == ErrNoBinaryPackages {
slog.Warn().Err(err).Msg("")
} else {
slog.Error().Err(err).Msg("get debian kernel")
}
dk.Internal.Invalid = true
}
if !dk.HasDependency("kbuild") {
// Debian kernels prior to the 4.5 package
// version did not have a kbuild built from
// the linux source itself, but used the
// linux-tools source package.
if !kver(dk.Version.Package).LessThan(kver("4.5-rc0")) {
dk.Internal.Invalid = true
} else {
updateKbuild(toolsVersions, &dk)
}
}
dk.Internal.LastFetch = time.Now()
if !dk.Internal.Invalid {
kernels = append(kernels, dk)
}
err = c.Put([]DebianKernel{dk})
if err != nil {
slog.Error().Err(err).Msg("put to cache")
return
}
slog.Debug().Msgf("%s cached", version)
return
}
var (
CachePath string
RefetchDays int = 14
)
type GetKernelsMode int
const (
NoMode GetKernelsMode = iota
UpdateRelease
UpdateKbuild
)
// GetKernelsWithLimit is workaround for testing and building the
// first cache, which is heavily rate limited by snapshot.debian.org
func GetKernelsWithLimit(limit int, mode GetKernelsMode) (kernels []DebianKernel,
err error) {
if CachePath == "" {
CachePath = dotfiles.File("debian.cache")
log.Debug().Msgf("Use default kernels cache path: %s", CachePath)
if !fs.PathExists(CachePath) {
log.Debug().Msgf("No cache, download")
err = cache.DownloadDebianCache(CachePath)
if err != nil {
log.Debug().Err(err).Msg(
"No remote cache, will take some time")
}
}
} else {
log.Debug().Msgf("Debian kernels cache path: %s", CachePath)
}
c, err := NewCache(CachePath)
if err != nil {
log.Error().Err(err).Msg("cache")
return
}
defer c.Close()
toolsVersions, err := snapshot.SourcePackageVersions("linux-tools")
if err != nil {
log.Error().Err(err).Msg("get linux-tools source pkg versions")
return
}
versions, err := snapshot.SourcePackageVersions("linux")
if err != nil {
log.Error().Err(err).Msg("get linux source package versions")
return
}
err = c.PutVersions(versions)
if err != nil {
log.Error().Err(err).Msg("put source package versions to cache")
return
}
for i, version := range versions {
slog := log.With().Str("version", version).Logger()
slog.Trace().Msgf("%03d/%03d", i, len(versions))
vkernels, fromcache := getKernelsByVersion(slog, c, toolsVersions, version, mode)
kernels = append(kernels, vkernels...)
if !fromcache {
limit--
}
if limit <= 0 {
return
}
}
return
}
func GetKernels() (kernels []DebianKernel, err error) {
return GetKernelsWithLimit(math.MaxInt32, NoMode)
}

View File

@ -0,0 +1,67 @@
package debian
import (
"testing"
"github.com/davecgh/go-spew/spew"
"github.com/stretchr/testify/assert"
"code.dumpstack.io/tools/out-of-tree/distro/debian/snapshot"
)
func TestGetDebianKernel(t *testing.T) {
assert := assert.New(t)
dk, err := getDebianKernel("4.6.4-1")
assert.Nil(err)
assert.Equal(getRelease(dk.Image), Stretch)
t.Logf("%s", spew.Sdump(dk))
}
func TestParseKernelVersion(t *testing.T) {
assert := assert.New(t)
kernels, err := GetKernelsWithLimit(16, NoMode)
assert.Nil(err)
assert.NotEmpty(kernels)
versions := make(map[string]bool)
for _, dk := range kernels {
dkv, err := ParseKernelVersion(dk.Image.Deb.Name)
assert.Nil(err)
_, found := versions[dkv.Package]
assert.True(!found)
versions[dkv.Package] = true
}
}
func TestKbuildVersion(t *testing.T) {
assert := assert.New(t)
kernels, err := GetKernelsWithLimit(16, NoMode)
assert.Nil(err)
assert.NotEmpty(kernels)
toolsVersions, err := snapshot.SourcePackageVersions("linux-tools")
assert.Nil(err)
for _, dk := range kernels {
if !kver(dk.Version.Package).LessThan(kver("4.5-rc0")) {
continue
}
version := kbuildVersion(
toolsVersions,
dk.Version.Package,
)
assert.Nil(err)
assert.NotEmpty(version)
t.Log(dk.Version.Package, "->", version)
}
}

View File

@ -0,0 +1,158 @@
package metasnap
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"strings"
"time"
"github.com/rs/zerolog/log"
"golang.org/x/time/rate"
)
// Note: Metasnap does not have all the packages, and its API is
// rather buggy.
const apiURL = "http://metasnap.debian.net/cgi-bin/api?"
var (
limiterTimeout time.Duration = time.Second / 20
limiterMaxTimeout time.Duration = time.Second * 2
limiterBurst int = 1
limiterUpdateDelay time.Duration = time.Second
Limiter = rate.NewLimiter(rate.Every(limiterTimeout), limiterBurst)
)
func lowerLimit() {
limiterTimeout = limiterTimeout * 2
if limiterTimeout > limiterMaxTimeout {
limiterTimeout = limiterMaxTimeout
}
log.Info().Msgf("limiter timeout set to %v", limiterTimeout)
Limiter.SetLimitAt(
time.Now().Add(limiterUpdateDelay),
rate.Every(limiterTimeout),
)
log.Info().Msgf("wait %v", limiterUpdateDelay)
time.Sleep(limiterUpdateDelay)
}
// Retries in case of 5xx errors
var Retries = 10
var ErrNotFound = errors.New("404 not found")
func query(q string) (result string, err error) {
flog := log.With().Str("url", q).Logger()
var resp *http.Response
for i := Retries; i > 0; i-- {
flog.Trace().Msg("wait")
Limiter.Wait(context.Background())
flog.Trace().Msg("start")
resp, err = http.Get(q)
if err != nil {
if strings.Contains(err.Error(), "reset by peer") {
flog.Debug().Err(err).Msg("")
lowerLimit()
continue
}
flog.Error().Err(err).Msg("")
return
}
defer resp.Body.Close()
flog.Debug().Msgf("%s", resp.Status)
if resp.StatusCode == 404 {
err = ErrNotFound
return
}
if resp.StatusCode < 500 {
break
}
flog.Debug().Msgf("retry (%d left)", i)
}
if resp.StatusCode >= 400 {
err = fmt.Errorf("%d (%s)", resp.StatusCode, q)
}
buf, err := io.ReadAll(resp.Body)
if err != nil {
return
}
result = string(buf)
return
}
func queryAPIf(f string, s ...interface{}) (result string, err error) {
return query(apiURL + fmt.Sprintf(f, s...))
}
type Snapshot struct {
First string
Last string
}
type Repo struct {
Archive string
Suite string
Component string
Snapshot Snapshot
}
func GetRepos(archive, pkg, arch, ver string) (repos []Repo, err error) {
result, err := queryAPIf("archive=%s&pkg=%s&arch=%s",
archive, pkg, arch)
if err != nil {
return
}
if result == "" {
err = ErrNotFound
return
}
for _, line := range strings.Split(result, "\n") {
if line == "" {
break
}
fields := strings.Split(line, " ")
if len(fields) != 5 {
err = fmt.Errorf("metasnap api returned %s", result)
return
}
repo := Repo{
Archive: archive,
Suite: fields[1],
Component: fields[2],
Snapshot: Snapshot{
First: fields[3],
Last: fields[4],
},
}
if fields[0] == ver {
repos = append(repos, repo)
}
}
if len(repos) == 0 {
err = ErrNotFound
return
}
return
}

View File

@ -0,0 +1,28 @@
package metasnap
import (
"testing"
"github.com/davecgh/go-spew/spew"
)
func TestGetRepos(t *testing.T) {
// existing
infos, err := GetRepos("debian", "linux-image-3.8-trunk-amd64",
"amd64", "3.8.2-1~experimental.1")
if err != nil {
t.Fatal(err)
}
t.Log(spew.Sdump(infos))
// non-existing
infos, err = GetRepos("debian", "meh", "amd64", "meh")
if err == nil {
t.Fatalf("should not be ok, result: %s", spew.Sdump(infos))
}
if err != ErrNotFound {
t.Fatal("wrong error type")
}
}

View File

@ -0,0 +1,186 @@
package mr
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"strings"
"time"
"github.com/rs/zerolog/log"
"golang.org/x/time/rate"
)
const apiURL = "https://snapshot.debian.org/mr"
var (
limiterTimeout time.Duration = time.Second / 20
limiterMaxTimeout time.Duration = time.Second * 2
limiterBurst int = 1
limiterUpdateDelay time.Duration = time.Second
Limiter = rate.NewLimiter(rate.Every(limiterTimeout), limiterBurst)
)
func lowerLimit() {
limiterTimeout = limiterTimeout * 2
if limiterTimeout > limiterMaxTimeout {
limiterTimeout = limiterMaxTimeout
}
log.Info().Msgf("limiter timeout set to %v", limiterTimeout)
Limiter.SetLimitAt(
time.Now().Add(limiterUpdateDelay),
rate.Every(limiterTimeout),
)
log.Info().Msgf("wait %v", limiterUpdateDelay)
time.Sleep(limiterUpdateDelay)
}
// Retries in case of 5xx errors
var Retries = 10
// https://salsa.debian.org/snapshot-team/snapshot/blob/master/API
// /mr/package/<package>/
type Package struct {
Comment string `json:"_comment"`
Package string `json:"package"`
Result []struct {
Version string `json:"version"`
} `json:"result"`
}
// /mr/package/<package>/<version>/binpackages
type Binpackages struct {
Comment string `json:"_comment"`
Package string `json:"package"`
Result []struct {
Name string `json:"name"`
Version string `json:"version"`
} `json:"result"`
Version string `json:"version"`
}
// /mr/binary/<binary>/
type Binary struct {
Comment string `json:"_comment"`
Binary string `json:"binary"`
Result []struct {
BinaryVersion string `json:"binary_version"`
Name string `json:"name"`
Source string `json:"source"`
Version string `json:"version"`
} `json:"result"`
}
// /mr/binary/<binpkg>/<binversion>/binfiles
type Binfiles struct {
Comment string `json:"_comment"`
Binary string `json:"binary"`
BinaryVersion string `json:"binary_version"`
Result []struct {
Architecture string `json:"architecture"`
Hash string `json:"hash"`
} `json:"result"`
}
type Fileinfo struct {
ArchiveName string `json:"archive_name"`
FirstSeen string `json:"first_seen"`
Name string `json:"name"`
Path string `json:"path"`
Size int `json:"size"`
}
// /mr/file/<hash>/info
type Info struct {
Comment string `json:"_comment"`
Hash string `json:"hash"`
Result []Fileinfo `json:"result"`
}
var ErrNotFound = errors.New("404 not found")
func getJson(query string, target interface{}) (err error) {
flog := log.With().Str("url", query).Logger()
var resp *http.Response
for i := Retries; i > 0; i-- {
flog.Trace().Msg("wait")
Limiter.Wait(context.Background())
flog.Trace().Msg("start")
resp, err = http.Get(query)
if err != nil {
if strings.Contains(err.Error(), "reset by peer") ||
strings.Contains(err.Error(), "connection refused") {
flog.Debug().Err(err).Msg("")
lowerLimit()
continue
}
flog.Error().Err(err).Msg("")
return
}
defer resp.Body.Close()
flog.Debug().Msgf("%s", resp.Status)
if resp.StatusCode == 404 {
err = ErrNotFound
return
}
if resp.StatusCode < 500 {
break
}
flog.Debug().Msgf("retry (%d left)", i)
}
if resp.StatusCode >= 400 {
err = fmt.Errorf("%d (%s)", resp.StatusCode, query)
}
return json.NewDecoder(resp.Body).Decode(target)
}
func GetPackage(name string) (pkg Package, err error) {
query := fmt.Sprintf("%s/package/%s/", apiURL, name)
err = getJson(query, &pkg)
return
}
func GetBinpackages(name, version string) (binpkgs Binpackages, err error) {
query := fmt.Sprintf("%s/package/%s/%s/binpackages",
apiURL, name, version)
err = getJson(query, &binpkgs)
return
}
func GetBinary(pkg string) (binary Binary, err error) {
query := fmt.Sprintf("%s/binary/%s/", apiURL, pkg)
err = getJson(query, &binary)
return
}
func GetBinfiles(binpkg, binversion string) (binfiles Binfiles, err error) {
query := fmt.Sprintf("%s/binary/%s/%s/binfiles",
apiURL, binpkg, binversion)
err = getJson(query, &binfiles)
return
}
func GetInfo(hash string) (info Info, err error) {
query := fmt.Sprintf("%s/file/%s/info", apiURL, hash)
err = getJson(query, &info)
if err != nil {
return
}
if len(info.Result) == 0 {
err = errors.New("empty response")
}
return
}

View File

@ -0,0 +1,50 @@
package mr
import (
"testing"
)
func TestMR(t *testing.T) {
name := "linux"
t.Log(name)
pkg, err := GetPackage(name)
if err != nil {
t.Fatal(err)
}
version := pkg.Result[0].Version
t.Log(version)
binpkgs, err := GetBinpackages(name, version)
if err != nil {
t.Fatal(err)
}
binpkgName := binpkgs.Result[0].Name
t.Log(binpkgName)
binary, err := GetBinary(binpkgName)
if err != nil {
t.Fatal(err)
}
binaryName := binary.Result[0].Name
binaryVersion := binary.Result[0].BinaryVersion
t.Log(binaryName, binaryVersion)
binfiles, err := GetBinfiles(binaryName, binaryVersion)
if err != nil {
t.Fatal(err)
}
hash := binfiles.Result[0].Hash
t.Log(hash)
info, err := GetInfo(hash)
if err != nil {
t.Fatal(err)
}
t.Log(info)
}

View File

@ -0,0 +1,166 @@
package snapshot
import (
"errors"
"fmt"
"net/url"
"regexp"
"strings"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/distro/debian/snapshot/mr"
)
const URL = "https://snapshot.debian.org"
func SourcePackageVersions(name string) (versions []string, err error) {
pkg, err := mr.GetPackage(name)
if err != nil {
return
}
for _, res := range pkg.Result {
versions = append(versions, res.Version)
}
return
}
type Package struct {
Name string
Source string
Version string
Arch string
Deb struct {
Name string
Hash string
URL string
}
Repo struct {
Snapshot string
Archive string
Component string
}
}
func NewPackage(name, srcname, version string, archs []string) (
p Package, err error) {
p.Name = name
p.Source = srcname
p.Version = version
p.Arch, p.Deb.Hash, err = p.getHash(archs)
if err != nil {
return
}
info, err := mr.GetInfo(p.Deb.Hash)
if err != nil {
return
}
p.Deb.Name = info.Result[0].Name
p.Repo.Archive = info.Result[0].ArchiveName
p.Repo.Snapshot = info.Result[0].FirstSeen
p.Deb.URL, err = url.JoinPath(URL, "archive", p.Repo.Archive,
p.Repo.Snapshot, info.Result[0].Path, p.Deb.Name)
if err != nil {
return
}
split := strings.Split(info.Result[0].Path, "/")
if split[1] != "pool" || len(split) < 3 {
err = fmt.Errorf("incorrect path: %s", info.Result[0].Path)
return
}
p.Repo.Component = split[2]
return
}
func (p Package) getHash(archs []string) (arch, hash string, err error) {
binfiles, err := mr.GetBinfiles(p.Name, p.Version)
if err != nil {
return
}
for _, res := range binfiles.Result {
for _, allowedArch := range archs {
if res.Architecture == allowedArch {
arch = res.Architecture
hash = res.Hash
return
}
}
}
err = errors.New("hash not found")
return
}
func contains(pkgs []Package, pkg Package) bool {
for _, p := range pkgs {
if p.Name == pkg.Name {
return true
}
}
return false
}
func filtered(s string, filter []string) bool {
for _, f := range filter {
if strings.Contains(s, f) {
return true
}
}
return false
}
func Packages(srcname, version, regex string, archs, filter []string) (
pkgs []Package, err error) {
binpkgs, err := mr.GetBinpackages(srcname, version)
if err == mr.ErrNotFound {
err = nil
return
}
if err != nil {
return
}
r := regexp.MustCompile(regex)
for _, res := range binpkgs.Result {
if res.Version != version {
continue
}
if !r.MatchString(res.Name) || filtered(res.Name, filter) {
continue
}
log.Trace().Msgf("matched %v", res.Name)
var pkg Package
pkg, err = NewPackage(res.Name, srcname, version, archs)
if err != nil {
return
}
if contains(pkgs, pkg) {
log.Trace().Msgf("%v already in slice O_o", pkg.Name)
continue
}
log.Trace().Msgf("append %v", pkg.Name)
pkgs = append(pkgs, pkg)
}
return
}

View File

@ -0,0 +1,37 @@
package snapshot
import (
"errors"
"testing"
)
func TestSourcePackageVersions(t *testing.T) {
versions, err := SourcePackageVersions("linux")
if err != nil {
t.Fatal(err)
}
if len(versions) == 0 {
t.Fatal(errors.New("empty response"))
}
t.Logf("found %d package versions", len(versions))
}
func TestPackages(t *testing.T) {
rx := `^(linux-(image|headers)-[a-z+~0-9\.\-]*-(common|amd64|amd64-unsigned)|linux-kbuild-.*)$`
packages, err := Packages("linux", "5.10.179-1", rx,
[]string{"amd64", "all"}, []string{})
if err != nil {
t.Fatal(err)
}
if len(packages) == 0 {
t.Fatal(errors.New("empty response"))
}
for _, pkg := range packages {
t.Logf("%#v", pkg)
}
}

106
distro/distro.go Normal file
View File

@ -0,0 +1,106 @@
package distro
import (
"errors"
"sync"
)
var mu sync.Mutex
var distros []distribution
type distribution interface {
Distro() Distro
Equal(Distro) bool
Packages() (packages []string, err error)
Install(pkg string, headers bool) (err error)
Kernels() (kernels []KernelInfo, err error)
RootFS() string
}
func Register(d distribution) {
mu.Lock()
defer mu.Unlock()
distros = append(distros, d)
}
func List() (dds []Distro) {
for _, dd := range distros {
dds = append(dds, dd.Distro())
}
return
}
type Distro struct {
ID ID
Release string
}
func (d Distro) String() string {
return d.ID.String() + " " + d.Release
}
func (d Distro) Packages() (packages []string, err error) {
for _, dd := range distros {
if d.ID != None && d.ID != dd.Distro().ID {
continue
}
if d.Release != "" && !dd.Equal(d) {
continue
}
var pkgs []string
pkgs, err = dd.Packages()
if err != nil {
return
}
packages = append(packages, pkgs...)
}
return
}
func (d Distro) Install(pkg string, headers bool) (err error) {
for _, dd := range distros {
if !dd.Equal(d) {
continue
}
return dd.Install(pkg, headers)
}
return errors.New("not found")
}
func (d Distro) Kernels() (kernels []KernelInfo, err error) {
for _, dd := range distros {
if dd.Equal(d) {
return dd.Kernels()
}
}
return
}
func (d Distro) Equal(to Distro) bool {
for _, dd := range distros {
if dd.Equal(d) {
return dd.Equal(to)
}
}
return false
}
func (d Distro) RootFS() string {
for _, dd := range distros {
if dd.Equal(d) {
return dd.RootFS()
}
}
return ""
}
type Command struct {
Distro Distro
Command string
}

72
distro/id.go Normal file
View File

@ -0,0 +1,72 @@
package distro
import (
"fmt"
"strings"
)
// ID of the distro
type ID int
const (
None ID = iota
// Ubuntu https://ubuntu.com/
Ubuntu
// CentOS https://www.centos.org/
CentOS
// Debian https://www.debian.org/
Debian
// OracleLinux https://www.oracle.com/linux/
OracleLinux
// OpenSUSE https://opensuse.org/
OpenSUSE
)
var IDs = []ID{
None, Ubuntu, CentOS, Debian, OracleLinux, OpenSUSE,
}
var nameStrings = [...]string{
"",
"Ubuntu",
"CentOS",
"Debian",
"OracleLinux",
"openSUSE",
}
func NewID(name string) (id ID, err error) {
err = id.UnmarshalTOML([]byte(name))
return
}
func (id ID) String() string {
return nameStrings[id]
}
// UnmarshalTOML is for support github.com/naoina/toml
func (id *ID) UnmarshalTOML(data []byte) (err error) {
name := strings.Trim(string(data), `"`)
if strings.EqualFold(name, "Ubuntu") {
*id = Ubuntu
} else if strings.EqualFold(name, "CentOS") {
*id = CentOS
} else if strings.EqualFold(name, "Debian") {
*id = Debian
} else if strings.EqualFold(name, "OracleLinux") {
*id = OracleLinux
} else if strings.EqualFold(name, "openSUSE") {
*id = OpenSUSE
} else if name != "" {
err = fmt.Errorf("distro %s is not supported", name)
} else {
*id = None
}
return
}
// MarshalTOML is for support github.com/naoina/toml
func (id ID) MarshalTOML() (data []byte, err error) {
data = []byte(`"` + id.String() + `"`)
return
}

41
distro/kernel.go Normal file
View File

@ -0,0 +1,41 @@
package distro
import "code.dumpstack.io/tools/out-of-tree/qemu"
// ByRootFS is sorting by .RootFS lexicographically
type ByRootFS []KernelInfo
func (a ByRootFS) Len() int { return len(a) }
func (a ByRootFS) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByRootFS) Less(i, j int) bool { return a[i].RootFS < a[j].RootFS }
// KernelInfo defines kernels.toml entries
type KernelInfo struct {
Distro Distro
// Must be *exactly* same as in `uname -r`
KernelVersion string
KernelRelease string
// Build-time information
KernelSource string // module/exploit will be build on host
ContainerName string
// Runtime information
KernelPath string
InitrdPath string
ModulesPath string
CPU qemu.CPU
RootFS string
// Debug symbols
VmlinuxPath string
// Package name, not mandatory (yet)
Package string
Blocklisted bool
}

301
distro/opensuse/opensuse.go Normal file
View File

@ -0,0 +1,301 @@
package opensuse
import (
"fmt"
"strings"
"code.dumpstack.io/tools/out-of-tree/cache"
"code.dumpstack.io/tools/out-of-tree/container"
"code.dumpstack.io/tools/out-of-tree/distro"
)
func init() {
releases := []string{
"12.1", "12.2", "12.3",
"13.1", "13.2",
"42.1", "42.2", "42.3",
"15.0", "15.1", "15.2", "15.3", "15.4", "15.5",
}
for _, release := range releases {
distro.Register(OpenSUSE{release: release})
}
}
type OpenSUSE struct {
release string
}
func (suse OpenSUSE) Equal(d distro.Distro) bool {
return suse.release == d.Release && distro.OpenSUSE == d.ID
}
func (suse OpenSUSE) Distro() distro.Distro {
return distro.Distro{ID: distro.OpenSUSE, Release: suse.release}
}
func (suse OpenSUSE) Packages() (pkgs []string, err error) {
c, err := container.New(suse.Distro())
if err != nil {
return
}
var name string
if strings.HasPrefix(suse.release, "12") {
var cnt string
switch suse.release {
case "12.1", "12.2":
name = "opensuse:12.1"
cnt = "openSUSE-12.1"
case "12.3":
name = "opensuse:12.3"
cnt = "openSUSE-12.3"
}
cnturl := cache.ContainerURL(cnt)
err = container.Import(cnturl, name)
if err != nil {
return
}
} else if strings.HasPrefix(suse.release, "13") {
name = "opensuse:13"
cnturl := cache.ContainerURL("openSUSE-13.2")
err = container.Import(cnturl, name)
if err != nil {
return
}
} else if strings.HasPrefix(suse.release, "42") {
name = "opensuse/leap:42"
} else if strings.HasPrefix(suse.release, "15") {
name = "opensuse/leap:" + suse.release
}
if !c.Exist() {
err = c.Build(name, suse.envs(), suse.runs())
if err != nil {
return
}
}
cmd := "zypper search -s --match-exact kernel-default | grep x86_64 " +
"| cut -d '|' -f 4 | sed 's/ //g'"
output, err := c.Run("", []string{cmd})
if err != nil {
return
}
pkgs = append(pkgs, strings.Fields(output)...)
return
}
func (suse OpenSUSE) Kernels() (kernels []distro.KernelInfo, err error) {
c, err := container.New(suse.Distro())
if err != nil {
return
}
kernels, err = c.Kernels()
if err != nil {
return
}
for i := range kernels {
kernels[i].KernelRelease = strings.Replace(
kernels[i].KernelRelease, "-default", "", -1)
}
return
}
func (suse OpenSUSE) envs() (envs []string) {
return
}
func (suse OpenSUSE) runs() (commands []string) {
cmdf := func(f string, s ...interface{}) {
commands = append(commands, fmt.Sprintf(f, s...))
}
main := "http://download.opensuse.org/"
discontinued := "http://ftp.gwdg.de/pub/opensuse/discontinued/"
var repourls []string
if strings.HasPrefix(suse.release, "12") ||
strings.HasPrefix(suse.release, "13") {
dist := discontinued + "distribution/%s/repo/oss/"
update := discontinued + "update/%s/"
repourls = append(repourls,
fmt.Sprintf(dist, suse.release),
fmt.Sprintf(update, suse.release),
)
} else if strings.HasPrefix(suse.release, "42") {
dist := discontinued + "distribution/leap/%s/repo/oss/suse/"
update := discontinued + "update/leap/%s/oss/"
repourls = append(repourls,
fmt.Sprintf(dist, suse.release),
fmt.Sprintf(update, suse.release),
)
} else if strings.HasPrefix(suse.release, "15") {
dist := main + "distribution/leap/%s/repo/oss/"
update := main + "update/leap/%s/oss/"
repourls = append(repourls,
fmt.Sprintf(dist, suse.release),
fmt.Sprintf(update, suse.release),
)
switch suse.release {
case "15.3", "15.4", "15.5":
sle := main + "update/leap/%s/sle/"
repourls = append(repourls,
fmt.Sprintf(sle, suse.release),
)
}
}
cmdf("rm /etc/zypp/repos.d/*")
switch suse.release {
case "12.1", "12.2":
repourl := discontinued + "distribution/12.3/repo/oss/"
cmdf(`echo -e `+
`"[dracut]\n`+
`name=dracut\n`+
`enabled=1\n`+
`autorefresh=0\n`+
`gpgcheck=0\n`+
// higher number is lower priority
// default is 99
`priority=100\n`+
`baseurl=%s" > /etc/zypp/repos.d/dracut.repo`,
repourl,
)
}
for i, repourl := range repourls {
cmdf(`echo -e `+
`"[%d]\n`+
`name=%d\n`+
`enabled=1\n`+
`autorefresh=0\n`+
`gpgcheck=0\n`+
`baseurl=%s" > /etc/zypp/repos.d/%d.repo`,
i, i, repourl, i,
)
}
cmdf("zypper -n refresh")
params := "--no-recommends --force-resolution"
if !strings.HasPrefix(suse.release, "12") {
params += " --replacefiles"
}
cmdf("zypper -n update %s", params)
cmdf("zypper --no-refresh -n install %s -t pattern devel_kernel", params)
// Cache dependencies
cmdf("zypper -n install %s kernel-default kernel-default-devel "+
"&& zypper -n remove -U kernel-default kernel-default-devel",
params)
switch suse.release {
case "12.1", "12.2":
cmdf("zypper -n install %s -r dracut dracut", params)
cmdf("rm /etc/zypp/repos.d/dracut.repo")
case "12.3":
cmdf("zypper -n install %s dracut", params)
}
if !strings.HasPrefix(suse.release, "12") {
cmdf("zypper --no-refresh -n install %s kmod which", params)
}
if strings.HasPrefix(suse.release, "13") {
cmdf("zypper --no-refresh -n install %s kernel-firmware", params)
}
cmdf("rm -rf /boot/*")
cmdf("rm -rf /lib/modules/*")
return
}
func (suse OpenSUSE) Install(version string, headers bool) (err error) {
var commands []string
cmdf := func(f string, s ...interface{}) {
commands = append(commands, fmt.Sprintf(f, s...))
}
installcmd := "zypper --no-refresh -n install "
if !strings.HasPrefix(suse.release, "12") {
installcmd += " --replacefiles"
}
installcmd += " --no-recommends --force-resolution --capability"
cmdf("%s kernel-default=%s", installcmd, version)
if headers {
cmdf("%s kernel-default-devel=%s", installcmd, version)
}
cmdf("mkdir /usr/lib/dracut/modules.d/42workaround")
wsetuppath := "/usr/lib/dracut/modules.d/42workaround/module-setup.sh"
cmdf("echo 'check() { return 0; }' >> %s", wsetuppath)
cmdf("echo 'depends() { return 0; }' >> %s", wsetuppath)
cmdf(`echo 'install() { `+
`inst_hook pre-mount 91 "$moddir/workaround.sh"; `+
`}' >> %s`, wsetuppath)
cmdf("echo 'installkernel() { "+
"instmods af_packet e1000; "+
"}' >> %s", wsetuppath)
wpath := "/usr/lib/dracut/modules.d/42workaround/workaround.sh"
cmdf("echo '#!/bin/sh' >> %s", wpath)
cmdf("echo 'modprobe af_packet' >> %s", wpath)
cmdf("echo 'modprobe e1000' >> %s", wpath)
modules := "ata_piix e1000 rfkill af_packet"
if suse.release != "15.2" {
modules += " libata ext4 sd_mod"
}
format := "dracut "
format += "-a workaround "
if strings.HasPrefix(suse.release, "12") {
format += "--no-hostonly --add-drivers '%s' "
} else {
format += "--force-drivers '%s' "
}
format += "-f /boot/initrd-$(ls /lib/modules) $(ls /lib/modules)"
cmdf(format, modules)
cmdf("cp -r /boot /target/")
cmdf("cp -r /lib/modules /target/lib/")
cmdf("cp -r /usr/src /target/usr/")
c, err := container.New(suse.Distro())
if err != nil {
return
}
for i := range c.Volumes {
c.Volumes[i].Dest = "/target" + c.Volumes[i].Dest
}
_, err = c.Run("", commands)
if err != nil {
return
}
return
}
func (suse OpenSUSE) RootFS() string {
return fmt.Sprintf("out_of_tree_opensuse_%s.img",
strings.Split(suse.release, ".")[0])
}

View File

@ -0,0 +1,222 @@
package oraclelinux
import (
"fmt"
"regexp"
"strings"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
"code.dumpstack.io/tools/out-of-tree/container"
"code.dumpstack.io/tools/out-of-tree/distro"
)
func init() {
releases := []string{"6", "7", "8", "9"}
for _, release := range releases {
distro.Register(OracleLinux{release: release})
}
}
type OracleLinux struct {
release string
}
func (ol OracleLinux) Equal(d distro.Distro) bool {
return ol.release == d.Release && distro.OracleLinux == d.ID
}
func (ol OracleLinux) Distro() distro.Distro {
return distro.Distro{ID: distro.OracleLinux, Release: ol.release}
}
func (ol OracleLinux) Packages() (pkgs []string, err error) {
c, err := container.New(ol.Distro())
if err != nil {
return
}
if !c.Exist() {
err = c.Build("oraclelinux:"+ol.release,
ol.envs(), ol.runs())
if err != nil {
return
}
}
if ol.release == "8" {
// Image for ol9 is required for some kernels
// See notes in OracleLinux.Kernels()
_, err = OracleLinux{release: "9"}.Packages()
if err != nil {
return
}
}
cmd := "yum search kernel --showduplicates 2>/dev/null " +
"| grep '^kernel-[0-9]\\|^kernel-uek-[0-9]' " +
"| grep -v src " +
"| cut -d ' ' -f 1"
output, err := c.Run(dotfiles.Dir("tmp"), []string{cmd})
if err != nil {
return
}
pkgs = append(pkgs, strings.Fields(output)...)
return
}
func (ol OracleLinux) Kernels() (kernels []distro.KernelInfo, err error) {
c, err := container.New(ol.Distro())
if err != nil {
return
}
kernels, err = c.Kernels()
if err != nil {
return
}
// Some kernels do not work with the smap enabled
//
// BUG: unable to handle kernel paging request at 00007fffc64b2fda
// IP: [<ffffffff8127a9ed>] strnlen+0xd/0x40"
// ...
// Call Trace:
// [<ffffffff81123bf8>] dtrace_psinfo_alloc+0x138/0x390
// [<ffffffff8118b143>] do_execve_common.isra.24+0x3c3/0x460
// [<ffffffff81554d70>] ? rest_init+0x80/0x80
// [<ffffffff8118b1f8>] do_execve+0x18/0x20
// [<ffffffff81554dc2>] kernel_init+0x52/0x180
// [<ffffffff8157cd2c>] ret_from_fork+0x7c/0xb0
//
smapBlocklist := []string{
"3.8.13-16",
"3.8.13-26",
"3.8.13-35",
"3.8.13-44",
"3.8.13-55",
"3.8.13-68",
"3.8.13-98",
}
// BUG: soft lockup - CPU#0 stuck for 61s!
blocklistr := regexp.MustCompile(
`2[.]6[.]32-300[.]3(2[.][2-3]|[3-9][.][0-9])`)
for i, k := range kernels {
// The latest uek kernels require gcc-11, which is
// only present in el8 with scl load, so not so
// convinient. It is possible to just build from
// the next release container.
if strings.Contains(k.KernelVersion, "5.15.0") {
cnt := strings.Replace(k.ContainerName, "8", "9", -1)
kernels[i].ContainerName = cnt
}
for _, ver := range smapBlocklist {
if strings.Contains(k.KernelVersion, ver) {
kernels[i].CPU.Flags = append(
kernels[i].CPU.Flags, "smap=off",
)
}
}
if blocklistr.MatchString(k.KernelVersion) {
kernels[i].Blocklisted = true
}
}
return
}
func (ol OracleLinux) envs() (envs []string) {
return
}
func (ol OracleLinux) runs() (commands []string) {
cmdf := func(f string, s ...interface{}) {
commands = append(commands, fmt.Sprintf(f, s...))
}
if ol.release < "6" {
log.Fatal().Msgf("no support for pre-EL6")
}
cmdf("sed -i 's/enabled=0/enabled=1/' /etc/yum.repos.d/*")
cmdf("sed -i 's;installonly_limit=;installonly_limit=100500;' /etc/yum.conf /etc/dnf/dnf.conf || true")
cmdf("yum -y update")
cmdf("yum -y groupinstall 'Development Tools'")
packages := "linux-firmware grubby"
if ol.release <= "7" {
packages += " libdtrace-ctf"
}
cmdf("yum -y install %s", packages)
return
}
func (ol OracleLinux) Install(pkgname string, headers bool) (err error) {
var headerspkg string
if headers {
if strings.Contains(pkgname, "uek") {
headerspkg = strings.Replace(pkgname,
"kernel-uek", "kernel-uek-devel", -1)
} else {
headerspkg = strings.Replace(pkgname,
"kernel", "kernel-devel", -1)
}
}
var commands []string
cmdf := func(f string, s ...interface{}) {
commands = append(commands, fmt.Sprintf(f, s...))
}
cmdf("yum -y install %s %s", pkgname, headerspkg)
var version string
if strings.Contains(pkgname, "uek") {
version = strings.Replace(pkgname, "kernel-uek-", "", -1)
} else {
version = strings.Replace(pkgname, "kernel-", "", -1)
}
if ol.release <= "7" {
cmdf("dracut -v --add-drivers 'e1000 ext4' -f "+
"/boot/initramfs-%s.img %s", version, version)
} else {
cmdf("dracut -v --add-drivers 'ata_piix libata' "+
"--force-drivers 'e1000 ext4 sd_mod' -f "+
"/boot/initramfs-%s.img %s", version, version)
}
cmdf("cp -r /boot /target/")
cmdf("cp -r /lib/modules /target/lib/")
cmdf("cp -r /usr/src /target/usr/")
c, err := container.New(ol.Distro())
if err != nil {
return
}
for i := range c.Volumes {
c.Volumes[i].Dest = "/target" + c.Volumes[i].Dest
}
_, err = c.Run("", commands)
if err != nil {
return
}
return
}
func (ol OracleLinux) RootFS() string {
return fmt.Sprintf("out_of_tree_oraclelinux_%s.img", ol.release)
}

View File

@ -0,0 +1,19 @@
package oraclelinux
import (
"testing"
"github.com/stretchr/testify/assert"
"code.dumpstack.io/tools/out-of-tree/distro"
)
func TestOracleLinux(t *testing.T) {
assert := assert.New(t)
u := OracleLinux{release: "9"}
assert.True(u.Equal(distro.Distro{Release: "9", ID: distro.OracleLinux}))
assert.NotEmpty(u.Packages())
}

165
distro/ubuntu/ubuntu.go Normal file
View File

@ -0,0 +1,165 @@
package ubuntu
import (
"fmt"
"strings"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
"code.dumpstack.io/tools/out-of-tree/container"
"code.dumpstack.io/tools/out-of-tree/distro"
)
func init() {
releases := []string{
"12.04",
"14.04",
"16.04",
"18.04",
"20.04",
"22.04",
}
for _, release := range releases {
distro.Register(Ubuntu{release: release})
}
}
type Ubuntu struct {
release string
}
func (u Ubuntu) Equal(d distro.Distro) bool {
return u.release == d.Release && distro.Ubuntu == d.ID
}
func (u Ubuntu) Distro() distro.Distro {
return distro.Distro{ID: distro.Ubuntu, Release: u.release}
}
func (u Ubuntu) Packages() (pkgs []string, err error) {
c, err := container.New(u.Distro())
if err != nil {
return
}
if !c.Exist() {
err = c.Build("ubuntu:"+u.release, u.envs(), u.runs())
if err != nil {
return
}
}
cmd := "apt-cache search " +
"--names-only '^linux-image-[0-9\\.\\-]*-generic$' " +
"| awk '{ print $1 }'"
output, err := c.Run(dotfiles.Dir("tmp"), []string{cmd})
if err != nil {
return
}
pkgs = append(pkgs, strings.Fields(output)...)
return
}
func (u Ubuntu) Kernels() (kernels []distro.KernelInfo, err error) {
c, err := container.New(u.Distro())
if err != nil {
return
}
return c.Kernels()
}
func (u Ubuntu) envs() (envs []string) {
envs = append(envs, "DEBIAN_FRONTEND=noninteractive")
return
}
func (u Ubuntu) runs() (commands []string) {
cmdf := func(f string, s ...interface{}) {
commands = append(commands, fmt.Sprintf(f, s...))
}
if u.release < "14.04" {
cmdf("sed -i 's/archive.ubuntu.com/old-releases.ubuntu.com/' " +
"/etc/apt/sources.list")
}
cmdf("apt-get update")
cmdf("apt-get install -y build-essential libelf-dev")
cmdf("apt-get install -y wget git")
if u.release == "12.04" {
cmdf("apt-get install -y grub")
cmdf("cp /bin/true /usr/sbin/grub-probe")
cmdf("mkdir -p /boot/grub")
cmdf("touch /boot/grub/menu.lst")
}
if u.release < "14.04" {
return
}
if u.release == "22.04" {
cmdf("apt-get install -y gcc-12")
return
}
cmdf("apt-get install -y libseccomp-dev")
// Install and remove a single kernel and headers.
// This ensures that all dependencies are cached.
cmd := "export HEADERS=$(apt-cache search " +
"--names-only '^linux-headers-[0-9\\.\\-]*-generic' " +
"| awk '{ print $1 }' | head -n 1)"
cmd += " KERNEL=$(echo $HEADERS | sed 's/headers/image/')"
cmd += " MODULES=$(echo $HEADERS | sed 's/headers/modules/')"
cmd += " && apt-get install -y $HEADERS $KERNEL $MODULES"
cmd += " && apt-get remove -y $HEADERS $KERNEL $MODULES"
cmdf(cmd)
return
}
func (u Ubuntu) Install(pkgname string, headers bool) (err error) {
var headerspkg string
if headers {
headerspkg = strings.Replace(pkgname, "image", "headers", -1)
}
var commands []string
cmdf := func(f string, s ...interface{}) {
commands = append(commands, fmt.Sprintf(f, s...))
}
cmdf("apt-get install -y %s %s", pkgname, headerspkg)
cmdf("cp -r /boot /target/")
cmdf("cp -r /lib/modules /target/lib/")
cmdf("cp -r /usr/src /target/usr/")
c, err := container.New(u.Distro())
if err != nil {
return
}
for i := range c.Volumes {
c.Volumes[i].Dest = "/target" + c.Volumes[i].Dest
}
_, err = c.Run("", commands)
if err != nil {
return
}
return
}
func (u Ubuntu) RootFS() string {
return fmt.Sprintf("out_of_tree_ubuntu_%s.img",
strings.Replace(u.release, ".", "__", -1))
}

View File

@ -0,0 +1,19 @@
package ubuntu
import (
"testing"
"github.com/stretchr/testify/assert"
"code.dumpstack.io/tools/out-of-tree/distro"
)
func TestUbuntu(t *testing.T) {
assert := assert.New(t)
u := Ubuntu{release: "22.04"}
assert.True(u.Equal(distro.Distro{Release: "22.04", ID: distro.Ubuntu}))
assert.NotEmpty(u.Packages())
}

1
docs/conf.py Normal file
View File

@ -0,0 +1 @@
project = "out-of-tree"

1
examples/kernel-exploit/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
logs

View File

@ -1,38 +1,28 @@
# out-of-tree configuration file
# docs at https://out-of-tree.io
name = "out-of-tree exploit example"
name = "exploit_example"
type = "exploit"
[[supported_kernels]]
distro_type = "Ubuntu"
distro_release = "16.04"
release_mask = "4[.]4[.]0-(1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20|21|22|23|24|25|26|27|28|29|30|31|32|33|34|35|36|37|38|39|40|41|42|43|44|45|46|47|48|49|50|51|52|53|54|55|56|57|58|59|60|61|62|63|64|65|66|67|68|69|70|71|72|73|74|75|76|77|78|79|80|81|82|83|84|85|86|87|88|89|90|91|92|93|94|95|96|97|98|99|100|101|102|103|104|105|106|107|108|109|110|111|112|113|114|115|116)-.*"
[[targets]]
distro = { id = "Ubuntu", release = "16.04" }
kernel = { regex = "4[.]4[.]0-(1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20|21|22|23|24|25|26|27|28|29|30|31|32|33|34|35|36|37|38|39|40|41|42|43|44|45|46|47|48|49|50|51|52|53|54|55|56|57|58|59|60|61|62|63|64|65|66|67|68|69|70|71|72|73|74|75|76|77|78|79|80|81|82|83|84|85|86|87|88|89|90|91|92|93|94|95|96|97|98|99|100|101|102|103|104|105|106|107|108|109|110|111|112|113|114|115|116)-.*" }
[[supported_kernels]]
distro_type = "Ubuntu"
distro_release = "16.04"
release_mask = "4[.]8[.]0-(1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20|21|22|23|24|25|26|27|28|29|30|31|32|33|34|35|36|37|38|39|40|41|42|43|44|45|46|47|48|49|50|51|52|53|54|55|56|57|58)-.*"
[[targets]]
distro = { id = "Ubuntu", release = "16.04" }
kernel = { regex = "4[.]8[.]0-(1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20|21|22|23|24|25|26|27|28|29|30|31|32|33|34|35|36|37|38|39|40|41|42|43|44|45|46|47|48|49|50|51|52|53|54|55|56|57|58)-.*" }
[[supported_kernels]]
[[targets]]
# Can be Ubuntu/CentOS/Debian/etc.
distro_type = "Ubuntu"
distro_release = "16.04"
distro = { id = "Ubuntu", release = "16.04" }
# regex for `uname -r`
# See also: regex-golang.appspot.com
# stupid way to generate: $ echo '4.4.0-('$(seq 44 | xargs echo | sed 's/ /|/g')')-.*'
release_mask = "4[.]10[.]0-(1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20|21|22|23|24|25|26|27|28|29|30|31|32|33|34|35|36|37|38|39|40|41|42)-.*"
kernel = { regex = "4[.]10[.]0-(1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20|21|22|23|24|25|26|27|28|29|30|31|32|33|34|35|36|37|38|39|40|41|42)-.*" }
[[supported_kernels]]
distro_type = "Ubuntu"
distro_release = "16.04"
release_mask = "4[.]11[.]0-(1|2|3|4|5|6|7|8|9|10|11|12|13|14)-.*"
[[targets]]
distro = { id = "Ubuntu", release = "16.04" }
kernel = { regex = "4[.]11[.]0-(1|2|3|4|5|6|7|8|9|10|11|12|13|14)-.*" }
[[supported_kernels]]
distro_type = "Ubuntu"
distro_release = "16.04"
# equivalent for "4[.]13[.]0-(1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20|21)-.*"
[supported_kernels.kernel]
version = [ 4 ]
major = [ 13 ]
minor = [ 0 ]
patch = [ 1, 21 ]
[[targets]]
distro = { id = "Ubuntu", release = "16.04" }
kernel = { regex = "4[.]13[.]0-(1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20|21)-.*" }

View File

@ -12,3 +12,4 @@ GPATH
GRTAGS
GTAGS
.cache.mk
logs

View File

@ -1,30 +1,26 @@
# out-of-tree configuration file
# docs at https://out-of-tree.io
name = "out-of-tree module example"
name = "module_example"
type = "module"
[[supported_kernels]]
[[targets]]
# Can be Ubuntu/CentOS/Debian/etc.
distro_type = "Ubuntu"
distro_release = "16.04"
distro = { id = "Ubuntu", release = "16.04" }
# regex for `uname -r`
# See also: regex-golang.appspot.com
release_mask = "4[.]4[.]0-70-.*"
kernel = { regex = "4[.]4[.]0-70-.*" }
# [[supported_kernels]] may be defined unlimited number of times
[[supported_kernels]]
distro_type = "Ubuntu"
distro_release = "18.04"
# [[targets]] may be defined unlimited number of times
[[targets]]
distro = { id = "Ubuntu", release = "18.04" }
# Also you can use only one kernel
release_mask = "4[.]15[.]0-(24|29)-generic"
kernel = { regex = "4[.]15[.]0-(24|29)-generic" }
[[supported_kernels]]
distro_type = "Ubuntu"
distro_release = "18.04"
[[targets]]
distro = { id = "Ubuntu", release = "18.04" }
# Also you can use only one kernel
release_mask = "4[.]15[.]0-23-generic"
kernel = { regex = "4[.]15[.]0-23-generic" }
[[supported_kernels]]
distro_type = "CentOS"
distro_release = "7"
release_mask = "3[.]10[.]0-862.el7.x86_64"
[[targets]]
distro = { id = "CentOS", release = "7" }
kernel = { regex = "3[.]10[.]0-862.el7.x86_64" }

1
examples/preload/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
logs

View File

@ -1,10 +1,9 @@
name = "out-of-tree preload"
name = "preload_example"
type = "module"
[[supported_kernels]]
distro_type = "Ubuntu"
distro_release = "18.04"
release_mask = ".*"
[[targets]]
distro = { id = "Ubuntu", release = "18.04" }
kernel = { regex = ".*" }
[[preload]]
repo = "https://github.com/openwall/lkrg"

3
examples/preload/test.sh Normal file
View File

@ -0,0 +1,3 @@
#!/bin/sh
dmesg | grep BLOCK

1
examples/script/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
logs

View File

@ -1,11 +1,10 @@
# out-of-tree configuration file
# docs at https://out-of-tree.io
name = "out-of-tree script example"
name = "script_example"
type = "script"
script = "script.sh"
[[supported_kernels]]
distro_type = "Ubuntu"
distro_release = "22.04"
release_mask = ".*"
[[targets]]
distro = { id = "Ubuntu", release = "22.04" }
kernel = { regex = ".*" }

130
flake.lock Normal file
View File

@ -0,0 +1,130 @@
{
"nodes": {
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1705309234,
"narHash": "sha256-uNRRNRKmJyCRC/8y1RqBkqWBLM034y4qN7EprSdmgyA=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "1ef2e671c3b0c19053962c07dbda38332dcebf26",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_2": {
"inputs": {
"systems": "systems_2"
},
"locked": {
"lastModified": 1694529238,
"narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "ff7b65b44d01cf9ba6a71320833626af21126384",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"gomod2nix": {
"inputs": {
"flake-utils": "flake-utils_2",
"nixpkgs": "nixpkgs"
},
"locked": {
"lastModified": 1705314449,
"narHash": "sha256-yfQQ67dLejP0FLK76LKHbkzcQqNIrux6MFe32MMFGNQ=",
"owner": "nix-community",
"repo": "gomod2nix",
"rev": "30e3c3a9ec4ac8453282ca7f67fca9e1da12c3e6",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "gomod2nix",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1658285632,
"narHash": "sha256-zRS5S/hoeDGUbO+L95wXG9vJNwsSYcl93XiD0HQBXLk=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "5342fc6fb59d0595d26883c3cadff16ce58e44f3",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "master",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1708296515,
"narHash": "sha256-FyF489fYNAUy7b6dkYV6rGPyzp+4tThhr80KNAaF/yY=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "b98a4e1746acceb92c509bc496ef3d0e5ad8d4aa",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"flake-utils": "flake-utils",
"gomod2nix": "gomod2nix",
"nixpkgs": "nixpkgs_2"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_2": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

24
flake.nix Normal file
View File

@ -0,0 +1,24 @@
{
description = "kernel {module, exploit} development tool";
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
inputs.flake-utils.url = "github:numtide/flake-utils";
inputs.gomod2nix.url = "github:nix-community/gomod2nix";
outputs = { self, nixpkgs, flake-utils, gomod2nix }:
(flake-utils.lib.eachDefaultSystem
(system:
let
pkgs = import nixpkgs {
inherit system;
overlays = [ gomod2nix.overlays.default ];
};
version = self.lastModifiedDate;
in
{
packages.default = pkgs.callPackage ./. { inherit version; };
devShells.default = import ./shell.nix { inherit pkgs; };
})
);
}

72
fs/fs.go Normal file
View File

@ -0,0 +1,72 @@
package fs
import (
"errors"
"os"
"path/filepath"
"strings"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
)
// CaseInsensitive check
func CaseInsensitive(dir string) (yes bool, err error) {
pathLowercase := filepath.Join(dir, "file")
fLowercase, err := os.Create(pathLowercase)
if err != nil {
return
}
defer fLowercase.Close()
defer os.Remove(pathLowercase)
pathUppercase := filepath.Join(dir, "FILE")
fUppercase, err := os.Create(pathUppercase)
if err != nil {
return
}
defer fUppercase.Close()
defer os.Remove(pathUppercase)
statLowercase, err := fLowercase.Stat()
if err != nil {
return
}
statUppercase, err := fUppercase.Stat()
if err != nil {
return
}
yes = os.SameFile(statLowercase, statUppercase)
return
}
// PathExists check
func PathExists(path string) bool {
if _, err := os.Stat(path); err != nil {
return false
}
return true
}
// TempDir that exist relative to config directory
func TempDir() (string, error) {
return os.MkdirTemp(dotfiles.Dir("tmp"), "")
}
func FindBySubstring(dir, substring string) (k string, err error) {
files, err := os.ReadDir(dir)
if err != nil {
return
}
for _, file := range files {
if strings.Contains(file.Name(), substring) {
k = filepath.Join(dir, file.Name())
return
}
}
err = errors.New("not found")
return
}

28
go.mod
View File

@ -1,24 +1,28 @@
module code.dumpstack.io/tools/out-of-tree
go 1.17
replace code.dumpstack.io/tools/out-of-tree/qemu => ./qemu
replace code.dumpstack.io/tools/out-of-tree/config => ./config
go 1.21
require (
github.com/Masterminds/semver v1.5.0
github.com/alecthomas/kong v0.7.1
github.com/cavaliergopher/grab/v3 v3.0.1
github.com/davecgh/go-spew v1.1.1
github.com/go-git/go-git/v5 v5.6.1
github.com/google/uuid v1.6.0
github.com/mattn/go-sqlite3 v1.14.16
github.com/mitchellh/go-homedir v1.1.0
github.com/naoina/toml v0.1.1
github.com/natefinch/lumberjack v2.0.0+incompatible
github.com/olekukonko/tablewriter v0.0.5
github.com/otiai10/copy v1.10.0
github.com/otiai10/copy v1.11.0
github.com/povsister/scp v0.0.0-20210427074412-33febfd9f13e
github.com/rapidloop/skv v0.0.0-20180909015525-9def2caac4cc
github.com/remeh/sizedwaitgroup v1.0.0
github.com/rs/zerolog v1.29.0
github.com/rs/zerolog v1.29.1
github.com/stretchr/testify v1.7.0
github.com/zcalusic/sysinfo v0.9.5
golang.org/x/crypto v0.7.0
golang.org/x/crypto v0.9.0
golang.org/x/time v0.3.0
gopkg.in/logrusorgru/aurora.v2 v2.0.3
)
@ -27,6 +31,7 @@ require (
github.com/Microsoft/go-winio v0.5.2 // indirect
github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 // indirect
github.com/acomagu/bufpipe v1.0.4 // indirect
github.com/boltdb/bolt v1.3.1 // indirect
github.com/cloudflare/circl v1.1.0 // indirect
github.com/emirpasic/gods v1.18.1 // indirect
github.com/go-git/gcfg v1.5.0 // indirect
@ -40,11 +45,14 @@ require (
github.com/mattn/go-runewidth v0.0.9 // indirect
github.com/naoina/go-stringutil v0.1.0 // indirect
github.com/pjbgf/sha1cd v0.3.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/sergi/go-diff v1.1.0 // indirect
github.com/skeema/knownhosts v1.1.0 // indirect
github.com/xanzy/ssh-agent v0.3.3 // indirect
golang.org/x/net v0.8.0 // indirect
golang.org/x/sys v0.6.0 // indirect
golang.org/x/net v0.10.0 // indirect
golang.org/x/sys v0.8.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v2 v2.3.0 // indirect
gopkg.in/yaml.v3 v3.0.0 // indirect
)

51
go.sum
View File

@ -1,5 +1,7 @@
github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA=
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 h1:wPbRQzjjwFc0ih8puEVAOFGELsn1zoIIYdxvML7mDxA=
@ -16,10 +18,14 @@ github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFI
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
github.com/cavaliergopher/grab/v3 v3.0.1 h1:4z7TkBfmPjmLAAmkkAZNX/6QJ1nNFdv3SdIHXju0Fr4=
github.com/cavaliergopher/grab/v3 v3.0.1/go.mod h1:1U/KNnD+Ft6JJiYoYBAimKH2XrYptb8Kl3DFGmsjpq4=
github.com/cloudflare/circl v1.1.0 h1:bZgT/A+cikZnKIwn7xL2OBj012Bmvho/o6RpRvv3GKY=
github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I=
github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
@ -40,6 +46,8 @@ github.com/go-git/go-git/v5 v5.6.1/go.mod h1:mvyoL6Unz0PiTQrGQfSfiLFhBH1c1e84ylC
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
@ -80,8 +88,8 @@ github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6Yf
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/otiai10/copy v1.10.0 h1:znyI7l134wNg/wDktoVQPxPkgvhDfGCYUasey+h0rDQ=
github.com/otiai10/copy v1.10.0/go.mod h1:rSaLseMUsZFFbsFGc7wCJnnkTAvdc5L6VWxPE4308Ww=
github.com/otiai10/copy v1.11.0 h1:OKBD80J/mLBrwnzXqGtFCzprFSGioo30JcmR4APsNwc=
github.com/otiai10/copy v1.11.0/go.mod h1:rSaLseMUsZFFbsFGc7wCJnnkTAvdc5L6VWxPE4308Ww=
github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks=
github.com/otiai10/mint v1.5.1/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM=
github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4=
@ -90,11 +98,15 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/povsister/scp v0.0.0-20210427074412-33febfd9f13e h1:VtsDti2SgX7M7jy0QAyGgb162PeHLrOaNxmcYOtaGsY=
github.com/povsister/scp v0.0.0-20210427074412-33febfd9f13e/go.mod h1:i1Au86ZXK0ZalQNyBp2njCcyhSCR/QP/AMfILip+zNI=
github.com/rapidloop/skv v0.0.0-20180909015525-9def2caac4cc h1:eXQoy66wUI9meNnIdKYJ+EV/Tq3LvXeUe95AB2dPk8g=
github.com/rapidloop/skv v0.0.0-20180909015525-9def2caac4cc/go.mod h1:V5hvlcTzUJ3MOo0fEolWR25CZBBsb7q3wWVAmBYwr54=
github.com/remeh/sizedwaitgroup v1.0.0 h1:VNGGFwNo/R5+MJBf6yrsr110p0m4/OX4S3DCy7Kyl5E=
github.com/remeh/sizedwaitgroup v1.0.0/go.mod h1:3j2R4OIe/SeS6YDhICBy22RWjJC5eNCJ1V+9+NVNYlo=
github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rs/zerolog v1.29.0 h1:Zes4hju04hjbvkVkOhdl2HpZa+0PmVwigmo8XoORE5w=
github.com/rs/zerolog v1.29.0/go.mod h1:NILgTygv/Uej1ra5XxGf82ZFSLk58MFGAUS2o6usyD0=
github.com/rs/zerolog v1.29.1 h1:cO+d60CHkknCbvzEWxP0S9K6KqyTjrCNUy1LdQLCGPc=
github.com/rs/zerolog v1.29.1/go.mod h1:Le6ESbR7hc+DP6Lt1THiV8CQSdkkNrd3R0XbEgp3ZBU=
github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
@ -112,17 +124,17 @@ github.com/zcalusic/sysinfo v0.9.5 h1:ivoHyj9aIAYkwzo1+8QgJ5s4oeE6Etx9FmZtqa4wJj
github.com/zcalusic/sysinfo v0.9.5/go.mod h1:Z/gPVufBrFc8X5sef3m6kkw3r3nlNFp+I6bvASfvBZQ=
golang.org/x/arch v0.1.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220826181053-bd7e27e6170d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g=
golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
@ -131,11 +143,10 @@ golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfS
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -155,28 +166,29 @@ golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.0.0-20220722155259-a9ba230a4035/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -190,8 +202,9 @@ gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYs
gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA=
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

135
gomod2nix.toml Normal file
View File

@ -0,0 +1,135 @@
schema = 3
[mod]
[mod."github.com/BurntSushi/toml"]
version = "v1.2.1"
hash = "sha256-Z1dlsUTjF8SJZCknYKt7ufJz8NPGg9P9+W17DQn+LO0="
[mod."github.com/Masterminds/semver"]
version = "v1.5.0"
hash = "sha256-3fEInOXFdzCiGdDZ1s9otEes7VXiL8Q1RVB3zXRPJsQ="
[mod."github.com/Microsoft/go-winio"]
version = "v0.5.2"
hash = "sha256-g+kEivzu+sIaO5fDGR4RCpm3LmJSzmsAO16wAzBnP6c="
[mod."github.com/ProtonMail/go-crypto"]
version = "v0.0.0-20230217124315-7d5c6f04bbb8"
hash = "sha256-QWS55wWNCrgx6BbIrroWpc1s08FeSqf2ehNTXkhbDJQ="
[mod."github.com/acomagu/bufpipe"]
version = "v1.0.4"
hash = "sha256-gO76ADEf7bzVUhmZbRU/LNA+L9qCdb/aaAAavvj26mA="
[mod."github.com/alecthomas/kong"]
version = "v0.7.1"
hash = "sha256-Wyg4T/TX2Le7SsbA3YFX1LVRsc8+7e8JPf1elBs/jdo="
[mod."github.com/boltdb/bolt"]
version = "v1.3.1"
hash = "sha256-eSxMiPaicRFOVsgwU8XOWrgvprJfuPfA8CQ6GakB8nw="
[mod."github.com/cavaliergopher/grab/v3"]
version = "v3.0.1"
hash = "sha256-7yixBq4kPAp+NqHvEC4xCKFwI5bqSbZfzdVVLwvMvl4="
[mod."github.com/cloudflare/circl"]
version = "v1.1.0"
hash = "sha256-3FxALC6ZXwhv+MwZsh3iHusx0E4Mh/SoxyfXWIwD3MU="
[mod."github.com/davecgh/go-spew"]
version = "v1.1.1"
hash = "sha256-nhzSUrE1fCkN0+RL04N4h8jWmRFPPPWbCuDc7Ss0akI="
[mod."github.com/emirpasic/gods"]
version = "v1.18.1"
hash = "sha256-hGDKddjLj+5dn2woHtXKUdd49/3xdsqnhx7VEdCu1m4="
[mod."github.com/go-git/gcfg"]
version = "v1.5.0"
hash = "sha256-A62eSLI/0y4sfwCsZhe/uoSC9Z3TryyW+PyWIcknYdE="
[mod."github.com/go-git/go-billy/v5"]
version = "v5.4.1"
hash = "sha256-BGkU8ryX1czUc6s30qt4vjt2NTtkWdwDjQxZ3LxGC2k="
[mod."github.com/go-git/go-git/v5"]
version = "v5.6.1"
hash = "sha256-8HbSt4yX7B2ozSFj8Beoo05GcHb8/rBt/6ILkoTEtd8="
[mod."github.com/imdario/mergo"]
version = "v0.3.13"
hash = "sha256-03LKAZXgR5YUMeK5IRh2ds2GyV6uSbdq5QCPbAzlTus="
[mod."github.com/jbenet/go-context"]
version = "v0.0.0-20150711004518-d14ea06fba99"
hash = "sha256-VANNCWNNpARH/ILQV9sCQsBWgyL2iFT+4AHZREpxIWE="
[mod."github.com/kevinburke/ssh_config"]
version = "v1.2.0"
hash = "sha256-Ta7ZOmyX8gG5tzWbY2oES70EJPfI90U7CIJS9EAce0s="
[mod."github.com/kylelemons/godebug"]
version = "v1.1.0"
hash = "sha256-DJ0re9mGqZb6PROQI8NPC0JVyDHdZ/y4uehNH7MbczY="
[mod."github.com/mattn/go-colorable"]
version = "v0.1.12"
hash = "sha256-Y1vCt0ShrCz4wSmwsppCfeLPLKrWusc2zM2lUFwDMyI="
[mod."github.com/mattn/go-isatty"]
version = "v0.0.14"
hash = "sha256-e8zn5eCVh/B1HOP1PGXeXH0bGkIV0vKYP9KLwZni5as="
[mod."github.com/mattn/go-runewidth"]
version = "v0.0.9"
hash = "sha256-dK/kIPe1tcxEubwI4CWfov/HWRBgD/fqlPC3d5i30CY="
[mod."github.com/mattn/go-sqlite3"]
version = "v1.14.16"
hash = "sha256-Ky0kas72AY0lpuRiC/fQk9rw9aJ6dvL9y1Ikw5PFzlA="
[mod."github.com/mitchellh/go-homedir"]
version = "v1.1.0"
hash = "sha256-oduBKXHAQG8X6aqLEpqZHs5DOKe84u6WkBwi4W6cv3k="
[mod."github.com/naoina/go-stringutil"]
version = "v0.1.0"
hash = "sha256-htVZGTbH2kFO56UrWfZUwc6DDhgU/TCXrzEPy8MNAwE="
[mod."github.com/naoina/toml"]
version = "v0.1.1"
hash = "sha256-Tq9NDUJSye1staRAuT32AqI3qLfxDQH1nAkZPpntB04="
[mod."github.com/natefinch/lumberjack"]
version = "v2.0.0+incompatible"
hash = "sha256-CLir3wRkgNy7tXQWODk7u3RP/W7qIsO2LADdM6/vWtQ="
[mod."github.com/olekukonko/tablewriter"]
version = "v0.0.5"
hash = "sha256-/5i70IkH/qSW5KjGzv8aQNKh9tHoz98tqtL0K2DMFn4="
[mod."github.com/otiai10/copy"]
version = "v1.11.0"
hash = "sha256-2xQtmy9eor9BIhNuvs52noIQDJ1alG3ZXumXXSL6l9Q="
[mod."github.com/pjbgf/sha1cd"]
version = "v0.3.0"
hash = "sha256-kX9BdLh2dxtGNaDvc24NORO+C0AZ7JzbrXrtecCdB7w="
[mod."github.com/povsister/scp"]
version = "v0.0.0-20210427074412-33febfd9f13e"
hash = "sha256-SgFphgTQZQtiINSQDjY9K7fCPhynWR3SsBe+X9nQqKo="
[mod."github.com/rapidloop/skv"]
version = "v0.0.0-20180909015525-9def2caac4cc"
hash = "sha256-q5AMoiWcLDNwriaownVVyP58DpRa5J0KZjkyJKSf42I="
[mod."github.com/remeh/sizedwaitgroup"]
version = "v1.0.0"
hash = "sha256-CtjNoNeep0TnfkuRN/rc48diAo0jUog1fOz3I/z6jfc="
[mod."github.com/rs/zerolog"]
version = "v1.29.1"
hash = "sha256-UX+uiffB13Wdt1DGatxm0WiDWnI91w5Vxfhz4D7oLAw="
[mod."github.com/sergi/go-diff"]
version = "v1.1.0"
hash = "sha256-8NJMabldpf40uwQN20T6QXx5KORDibCBJL02KD661xY="
[mod."github.com/skeema/knownhosts"]
version = "v1.1.0"
hash = "sha256-WpHhJaZea5zh7PBZPuL4a9XJyADeS7TK0DGnQF4NjmI="
[mod."github.com/xanzy/ssh-agent"]
version = "v0.3.3"
hash = "sha256-l3pGB6IdzcPA/HLk93sSN6NM2pKPy+bVOoacR5RC2+c="
[mod."github.com/zcalusic/sysinfo"]
version = "v0.9.5"
hash = "sha256-An8f5875m2OgrdzzwwY4DTVKq4+zFgjypX4PL/QX1Bo="
[mod."golang.org/x/crypto"]
version = "v0.9.0"
hash = "sha256-RpGvWrx96GBXpu1zsWxdv9/+WcRmjBxOC7fvSgOJGL0="
[mod."golang.org/x/net"]
version = "v0.10.0"
hash = "sha256-HkGiUYBZOBdOtt7mYo3N3swFjjAXzW++pG2JeWGJR9Q="
[mod."golang.org/x/sys"]
version = "v0.8.0"
hash = "sha256-wLPPnoFkHM1HPUaFIfRyQZOJjrqXVZimB0nMySly7Xg="
[mod."golang.org/x/time"]
version = "v0.3.0"
hash = "sha256-/hmc9skIswMYbivxNS7R8A6vCTUF9k2/7tr/ACkcEaM="
[mod."gopkg.in/logrusorgru/aurora.v2"]
version = "v2.0.3"
hash = "sha256-7o5Fh4jscdYKgXfnNMbcD68Kjw8Z4LcPgHcr4ZyQYrI="
[mod."gopkg.in/natefinch/lumberjack.v2"]
version = "v2.2.1"
hash = "sha256-GaXWRDxhGy4Z4mgE+bJ8OE9SVvYUa9TnNiydnp2s1Ms="
[mod."gopkg.in/warnings.v0"]
version = "v0.1.2"
hash = "sha256-ATVL9yEmgYbkJ1DkltDGRn/auGAjqGOfjQyBYyUo8s8="

View File

@ -1,7 +0,0 @@
// Copyright 2019 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package main
const imagesBaseURL = "https://out-of-tree.fra1.digitaloceanspaces.com/1.0.0/"

178
images.go
View File

@ -1,178 +0,0 @@
// Copyright 2019 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package main
import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"os/exec"
"os/user"
"time"
"code.dumpstack.io/tools/out-of-tree/config"
"code.dumpstack.io/tools/out-of-tree/qemu"
"github.com/rs/zerolog/log"
)
type ImageCmd struct {
List ImageListCmd `cmd:"" help:"list images"`
Edit ImageEditCmd `cmd:"" help:"edit image"`
}
type ImageListCmd struct{}
func (cmd *ImageListCmd) Run(g *Globals) (err error) {
usr, err := user.Current()
if err != nil {
return
}
entries, err := os.ReadDir(usr.HomeDir + "/.out-of-tree/images/")
if err != nil {
return
}
for _, e := range entries {
fmt.Println(e.Name())
}
return
}
type ImageEditCmd struct {
Name string `help:"image name" required:""`
}
func (cmd *ImageEditCmd) Run(g *Globals) (err error) {
usr, err := user.Current()
if err != nil {
return
}
image := usr.HomeDir + "/.out-of-tree/images/" + cmd.Name
if !exists(image) {
fmt.Println("image does not exist")
}
kcfg, err := config.ReadKernelConfig(g.Config.Kernels)
if err != nil {
return
}
if len(kcfg.Kernels) == 0 {
return errors.New("No kernels found")
}
ki := config.KernelInfo{}
for _, k := range kcfg.Kernels {
if k.RootFS == image {
ki = k
break
}
}
kernel := qemu.Kernel{
KernelPath: ki.KernelPath,
InitrdPath: ki.InitrdPath,
}
q, err := qemu.NewSystem(qemu.X86x64, kernel, ki.RootFS)
q.Mutable = true
err = q.Start()
if err != nil {
fmt.Println("Qemu start error:", err)
return
}
defer q.Stop()
fmt.Print("ssh command:\n\n\t")
fmt.Println(q.GetSSHCommand())
fmt.Print("\npress enter to stop")
fmt.Scanln()
q.Command("root", "poweroff")
for !q.Died {
time.Sleep(time.Second)
}
return
}
// inspired by Edd Turtle code
func downloadFile(filepath string, url string) (err error) {
out, err := os.Create(filepath)
if err != nil {
return
}
defer out.Close()
resp, err := http.Get(url)
if err != nil {
return
}
defer resp.Body.Close()
switch resp.StatusCode {
case http.StatusOK:
break
case http.StatusForbidden, http.StatusNotFound:
err = fmt.Errorf("Cannot download %s. It looks like you need "+
"to generate it manually and place it "+
"to ~/.out-of-tree/images/. "+
"Check documentation for additional information.", url)
return
default:
err = fmt.Errorf("Something weird happens while "+
"download file: %d", resp.StatusCode)
return
}
_, err = io.Copy(out, resp.Body)
return
}
func unpackTar(archive, destination string) (err error) {
// NOTE: If you're change anything in tar command please check also
// BSD tar (or if you're using macOS, do not forget to check GNU Tar)
// Also make sure that sparse files are extracting correctly
cmd := exec.Command("tar", "-Sxf", archive)
cmd.Dir = destination + "/"
log.Debug().Msgf("%v", cmd)
rawOutput, err := cmd.CombinedOutput()
if err != nil {
err = fmt.Errorf("%v: %s", err, rawOutput)
return
}
return
}
func downloadImage(path, file string) (err error) {
tmp, err := ioutil.TempDir("/tmp/", "out-of-tree_")
if err != nil {
return
}
defer os.RemoveAll(tmp)
archive := tmp + "/" + file + ".tar.gz"
url := imagesBaseURL + file + ".tar.gz"
err = downloadFile(archive, url)
if err != nil {
return
}
err = unpackTar(archive, path)
return
}

768
kernel.go
View File

@ -1,768 +0,0 @@
// Copyright 2023 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package main
import (
"errors"
"fmt"
"io/ioutil"
"math"
"math/rand"
"os"
"os/exec"
"os/user"
"regexp"
"runtime"
"strings"
"time"
"github.com/naoina/toml"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/config"
)
type KernelCmd struct {
NoDownload bool `help:"do not download qemu image while kernel generation"`
UseHost bool `help:"also use host kernels"`
Force bool `help:"force reinstall kernel"`
NoHeaders bool `help:"do not install kernel headers"`
Shuffle bool `help:"randomize kernels installation order"`
Retries int64 `help:"amount of tries for each kernel" default:"10"`
List KernelListCmd `cmd:"" help:"list kernels"`
Autogen KernelAutogenCmd `cmd:"" help:"generate kernels based on the current config"`
Genall KernelGenallCmd `cmd:"" help:"generate all kernels for distro"`
Install KernelInstallCmd `cmd:"" help:"install specific kernel"`
ConfigRegen KernelConfigRegenCmd `cmd:"" help:"regenerate config"`
}
type KernelListCmd struct{}
func (cmd *KernelListCmd) Run(g *Globals) (err error) {
kcfg, err := config.ReadKernelConfig(g.Config.Kernels)
if err != nil {
log.Debug().Err(err).Msg("read kernel config")
}
if len(kcfg.Kernels) == 0 {
return errors.New("No kernels found")
}
for _, k := range kcfg.Kernels {
fmt.Println(k.DistroType, k.DistroRelease, k.KernelRelease)
}
return
}
type KernelAutogenCmd struct {
Max int64 `help:"download kernels from set defined by regex in release_mask, but no more than X for each of release_mask" default:"100500"`
}
func (cmd KernelAutogenCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
ka, err := config.ReadArtifactConfig(g.WorkDir + "/.out-of-tree.toml")
if err != nil {
return
}
for _, sk := range ka.SupportedKernels {
if sk.DistroRelease == "" {
err = errors.New("Please set distro_release")
return
}
err = generateKernels(sk,
g.Config.Docker.Registry,
g.Config.Docker.Commands,
cmd.Max, kernelCmd.Retries,
!kernelCmd.NoDownload,
kernelCmd.Force,
!kernelCmd.NoHeaders,
kernelCmd.Shuffle,
)
if err != nil {
return
}
}
return updateKernelsCfg(kernelCmd.UseHost, !kernelCmd.NoDownload)
}
type KernelGenallCmd struct {
Distro string `required:"" help:"distribution"`
Ver string `required:"" help:"distro version"`
}
func (cmd *KernelGenallCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
distroType, err := config.NewDistroType(cmd.Distro)
if err != nil {
return
}
km := config.KernelMask{
DistroType: distroType,
DistroRelease: cmd.Ver,
ReleaseMask: ".*",
}
err = generateKernels(km,
g.Config.Docker.Registry,
g.Config.Docker.Commands,
math.MaxUint32, kernelCmd.Retries,
!kernelCmd.NoDownload,
kernelCmd.Force,
!kernelCmd.NoHeaders,
kernelCmd.Shuffle,
)
if err != nil {
return
}
return updateKernelsCfg(kernelCmd.UseHost, !kernelCmd.NoDownload)
}
type KernelInstallCmd struct {
Distro string `required:"" help:"distribution"`
Ver string `required:"" help:"distro version"`
Kernel string `required:"" help:"kernel release mask"`
}
func (cmd *KernelInstallCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
distroType, err := config.NewDistroType(cmd.Distro)
if err != nil {
return
}
km := config.KernelMask{
DistroType: distroType,
DistroRelease: cmd.Ver,
ReleaseMask: cmd.Kernel,
}
err = generateKernels(km,
g.Config.Docker.Registry,
g.Config.Docker.Commands,
math.MaxUint32, kernelCmd.Retries,
!kernelCmd.NoDownload,
kernelCmd.Force,
!kernelCmd.NoHeaders,
kernelCmd.Shuffle,
)
if err != nil {
return
}
return updateKernelsCfg(kernelCmd.UseHost, !kernelCmd.NoDownload)
}
type KernelConfigRegenCmd struct{}
func (cmd *KernelConfigRegenCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
return updateKernelsCfg(kernelCmd.UseHost, !kernelCmd.NoDownload)
}
func matchDebImagePkg(container, mask string) (pkgs []string, err error) {
cmd := "apt-cache search --names-only '^linux-image-[0-9\\.\\-]*-generic' | awk '{ print $1 }'"
// FIXME timeout should be in global out-of-tree config
c, err := NewContainer(container, time.Hour)
if err != nil {
return
}
output, err := c.Run("/tmp", cmd)
if err != nil {
return
}
r, err := regexp.Compile("linux-image-" + mask)
if err != nil {
return
}
for _, pkg := range strings.Fields(output) {
if r.MatchString(pkg) || strings.Contains(pkg, mask) {
pkgs = append(pkgs, pkg)
}
}
return
}
func matchCentOSDevelPkg(container, mask string, generic bool) (
pkgs []string, err error) {
cmd := "yum search kernel-devel --showduplicates | " +
"grep '^kernel-devel' | cut -d ' ' -f 1"
// FIXME timeout should be in global out-of-tree config
c, err := NewContainer(container, time.Hour)
if err != nil {
return
}
output, err := c.Run("/tmp", cmd)
if err != nil {
return
}
r, err := regexp.Compile("kernel-devel-" + mask)
if err != nil {
return
}
for _, pkg := range strings.Fields(output) {
if r.MatchString(pkg) || strings.Contains(pkg, mask) {
pkgs = append(pkgs, pkg)
}
}
return
}
func dockerImagePath(sk config.KernelMask) (path string, err error) {
usr, err := user.Current()
if err != nil {
return
}
path = usr.HomeDir + "/.out-of-tree/containers/"
path += sk.DistroType.String() + "/" + sk.DistroRelease
return
}
func vsyscallAvailable() (available bool, err error) {
if runtime.GOOS != "linux" {
// Docker for non-Linux systems is not using the host
// kernel but uses kernel inside a virtual machine, so
// it builds by the Docker team with vsyscall support.
available = true
return
}
buf, err := ioutil.ReadFile("/proc/self/maps")
if err != nil {
return
}
available = strings.Contains(string(buf), "[vsyscall]")
return
}
func generateBaseDockerImage(registry string, commands []config.DockerCommand,
sk config.KernelMask) (err error) {
imagePath, err := dockerImagePath(sk)
if err != nil {
return
}
dockerPath := imagePath + "/Dockerfile"
d := "# BASE\n"
cmd := exec.Command("docker", "images", "-q", sk.DockerName())
log.Debug().Msgf("run %v", cmd)
rawOutput, err := cmd.CombinedOutput()
if err != nil {
return
}
if exists(dockerPath) && string(rawOutput) != "" {
log.Info().Msgf("Base image for %s:%s found",
sk.DistroType.String(), sk.DistroRelease)
return
}
log.Info().Msgf("Base image for %s:%s not found, start generating",
sk.DistroType.String(), sk.DistroRelease)
os.MkdirAll(imagePath, os.ModePerm)
d += "FROM "
if registry != "" {
d += registry + "/"
}
d += fmt.Sprintf("%s:%s\n",
strings.ToLower(sk.DistroType.String()),
sk.DistroRelease,
)
vsyscall, err := vsyscallAvailable()
if err != nil {
return
}
for _, c := range commands {
d += "RUN " + c.Command + "\n"
}
switch sk.DistroType {
case config.Ubuntu:
d += "ENV DEBIAN_FRONTEND=noninteractive\n"
d += "RUN apt-get update\n"
d += "RUN apt-get install -y build-essential libelf-dev\n"
d += "RUN apt-get install -y wget git\n"
// Install a single kernel and headers to ensure all dependencies are cached
d += "RUN export PKGNAME=$(apt-cache search --names-only '^linux-headers-[0-9\\.\\-]*-generic' | awk '{ print $1 }' | head -n 1); " +
"apt-get install -y $PKGNAME $(echo $PKGNAME | sed 's/headers/image/')\n"
if sk.DistroRelease >= "14.04" {
d += "RUN apt-get install -y libseccomp-dev\n"
}
d += "RUN mkdir -p /lib/modules\n"
case config.CentOS:
if sk.DistroRelease < "7" && !vsyscall {
log.Print("Old CentOS requires `vsyscall=emulate` " +
"on the latest kernels")
log.Print("Check out `A note about vsyscall` " +
"at https://hub.docker.com/_/centos")
log.Print("See also https://lwn.net/Articles/446528/")
err = fmt.Errorf("vsyscall is not available")
return
} else if sk.DistroRelease == "8" {
// CentOS 8 doesn't have Vault repos by default
for _, repover := range []string{
"8.0.1905", "8.1.1911", "8.2.2004", "8.3.2011", "8.4.2105", "8.5.2111",
} {
repo := fmt.Sprintf("[%s]\\nbaseurl=http://vault.centos.org/%s/BaseOS/$basearch/os/\\ngpgcheck=0", repover, repover)
d += fmt.Sprintf("RUN echo -e '%s' >> /etc/yum.repos.d/CentOS-Vault.repo\n", repo)
}
d += "RUN sed -i 's/enabled=1/enabled=0/' /etc/yum.repos.d/*\n"
}
// enable rpms from old minor releases
d += "RUN sed -i 's/enabled=0/enabled=1/' /etc/yum.repos.d/CentOS-Vault.repo\n"
// do not remove old kernels
d += "RUN sed -i 's;installonly_limit=;installonly_limit=100500;' /etc/yum.conf\n"
d += "RUN yum -y update\n"
d += "RUN yum -y groupinstall 'Development Tools'\n"
if sk.DistroRelease < "8" {
d += "RUN yum -y install deltarpm\n"
} else {
d += "RUN yum -y install grub2-tools-minimal " +
"elfutils-libelf-devel\n"
}
var flags string
if sk.DistroRelease >= "8" {
flags = "--noautoremove"
}
// Cache kernel package dependencies
d += "RUN export PKGNAME=$(yum search kernel-devel --showduplicates | grep '^kernel-devel' | cut -d ' ' -f 1 | head -n 1); " +
"yum -y install $PKGNAME $(echo $PKGNAME | sed 's/-devel//'); " +
fmt.Sprintf("yum -y remove $PKGNAME $(echo $PKGNAME | sed 's/-devel//') %s\n", flags)
default:
err = fmt.Errorf("%s not yet supported", sk.DistroType.String())
return
}
d += "# END BASE\n\n"
err = ioutil.WriteFile(dockerPath, []byte(d), 0644)
if err != nil {
return
}
c, err := NewContainer(sk.DockerName(), time.Hour)
if err != nil {
return
}
output, err := c.Build(imagePath)
if err != nil {
log.Error().Err(err).Msgf("Base image for %s:%s generating error",
sk.DistroType.String(), sk.DistroRelease)
log.Fatal().Msg(output)
return
}
log.Info().Msgf("Base image for %s:%s generating success",
sk.DistroType.String(), sk.DistroRelease)
return
}
func installKernel(sk config.KernelMask, pkgname string, force, headers bool) (err error) {
tmpdir, err := os.MkdirTemp("", "out-of-tree-"+pkgname+"-")
if err != nil {
log.Fatal().Err(err).Msg("make tmp directory")
}
defer os.RemoveAll(tmpdir)
slog := log.With().
Str("distro_type", sk.DistroType.String()).
Str("distro_release", sk.DistroRelease).
Str("pkg", pkgname).
Str("tmpdir", tmpdir).
Logger()
c, err := NewContainer(sk.DockerName(), time.Hour) // TODO conf
if err != nil {
return
}
moddirs, err := ioutil.ReadDir(c.Volumes.LibModules)
if err != nil {
return
}
for _, krel := range moddirs {
if strings.Contains(pkgname, krel.Name()) {
if force {
slog.Info().Msg("Reinstall")
} else {
slog.Info().Msg("Already installed")
return
}
}
}
volumes := c.Volumes
c.Volumes.LibModules = fmt.Sprintf("%s/libmodules", tmpdir)
os.MkdirAll(c.Volumes.LibModules, 0777)
c.Volumes.UsrSrc = fmt.Sprintf("%s/usrsrc", tmpdir)
os.MkdirAll(c.Volumes.UsrSrc, 0777)
c.Volumes.Boot = fmt.Sprintf("%s/boot", tmpdir)
os.MkdirAll(c.Volumes.Boot, 0777)
slog.Debug().Msgf("Installing kernel")
switch sk.DistroType {
case config.Ubuntu:
var headerspkg string
if headers {
headerspkg = strings.Replace(pkgname, "image", "headers", -1)
}
cmd := fmt.Sprintf("apt-get install -y %s %s", pkgname, headerspkg)
_, err = c.Run("/tmp", cmd)
if err != nil {
return
}
case config.CentOS:
imagepkg := strings.Replace(pkgname, "-devel", "", -1)
version := strings.Replace(pkgname, "kernel-devel-", "", -1)
if !headers {
pkgname = ""
}
cmd := fmt.Sprintf("yum -y install %s %s\n", imagepkg,
pkgname)
_, err = c.Run("/tmp", cmd)
if err != nil {
return
}
cmd = fmt.Sprintf("dracut --add-drivers 'e1000 ext4' -f "+
"/boot/initramfs-%s.img %s\n", version, version)
_, err = c.Run("/tmp", cmd)
if err != nil {
return
}
default:
err = fmt.Errorf("%s not yet supported", sk.DistroType.String())
return
}
c.Args = append(c.Args, "-v", volumes.LibModules+":/target/lib/modules")
c.Args = append(c.Args, "-v", volumes.UsrSrc+":/target/usr/src")
c.Args = append(c.Args, "-v", volumes.Boot+":/target/boot")
cmd := "true"
files, err := ioutil.ReadDir(c.Volumes.Boot)
if err != nil {
return
}
if len(files) != 0 {
cmd += " && cp -r /boot/* /target/boot/"
}
files, err = ioutil.ReadDir(c.Volumes.LibModules)
if err != nil {
return
}
if len(files) != 0 {
cmd += " && cp -r /lib/modules/* /target/lib/modules/"
}
files, err = ioutil.ReadDir(c.Volumes.UsrSrc)
if err != nil {
return
}
if len(files) != 0 {
cmd += " && cp -r /usr/src/* /target/usr/src/"
}
_, err = c.Run("/tmp", cmd)
if err != nil {
return
}
slog.Debug().Msgf("Success")
return
}
func findKernelFile(files []os.FileInfo, kname string) (name string, err error) {
for _, file := range files {
if strings.HasPrefix(file.Name(), "vmlinuz") {
if strings.Contains(file.Name(), kname) {
name = file.Name()
return
}
}
}
err = errors.New("cannot find kernel")
return
}
func findInitrdFile(files []os.FileInfo, kname string) (name string, err error) {
for _, file := range files {
if strings.HasPrefix(file.Name(), "initrd") ||
strings.HasPrefix(file.Name(), "initramfs") {
if strings.Contains(file.Name(), kname) {
name = file.Name()
return
}
}
}
err = errors.New("cannot find kernel")
return
}
func genRootfsImage(d containerImageInfo, download bool) (rootfs string, err error) {
usr, err := user.Current()
if err != nil {
return
}
imageFile := d.Name + ".img"
imagesPath := usr.HomeDir + "/.out-of-tree/images/"
os.MkdirAll(imagesPath, os.ModePerm)
rootfs = imagesPath + imageFile
if !exists(rootfs) {
if download {
log.Info().Msgf("%v not available, start download", imageFile)
err = downloadImage(imagesPath, imageFile)
}
}
return
}
func updateKernelsCfg(host, download bool) (err error) {
newkcfg := config.KernelConfig{}
if host {
// Get host kernels
newkcfg, err = genHostKernels(download)
if err != nil {
return
}
}
// Get docker kernels
dockerImages, err := listContainerImages()
if err != nil {
return
}
for _, d := range dockerImages {
err = listContainersKernels(d, &newkcfg, download)
if err != nil {
log.Print("gen kernels", d.Name, ":", err)
continue
}
}
stripkcfg := config.KernelConfig{}
for _, nk := range newkcfg.Kernels {
if !hasKernel(nk, stripkcfg) {
stripkcfg.Kernels = append(stripkcfg.Kernels, nk)
}
}
buf, err := toml.Marshal(&stripkcfg)
if err != nil {
return
}
buf = append([]byte("# Autogenerated\n# DO NOT EDIT\n\n"), buf...)
usr, err := user.Current()
if err != nil {
return
}
// TODO move all cfg path values to one provider
kernelsCfgPath := usr.HomeDir + "/.out-of-tree/kernels.toml"
err = ioutil.WriteFile(kernelsCfgPath, buf, 0644)
if err != nil {
return
}
log.Info().Msgf("%s is successfully updated", kernelsCfgPath)
return
}
func listContainersKernels(dii containerImageInfo, newkcfg *config.KernelConfig,
download bool) (err error) {
rootfs, err := genRootfsImage(dii, download)
if err != nil {
return
}
c, err := NewContainer(dii.Name, time.Hour)
if err != nil {
return
}
moddirs, err := ioutil.ReadDir(c.Volumes.LibModules)
if err != nil {
return
}
bootfiles, err := ioutil.ReadDir(c.Volumes.Boot)
if err != nil {
return
}
for _, krel := range moddirs {
log.Debug().Msgf("generate config entry for %s", krel.Name())
var kernelFile, initrdFile string
kernelFile, err = findKernelFile(bootfiles, krel.Name())
if err != nil {
log.Warn().Msgf("cannot find kernel %s", krel.Name())
continue
}
initrdFile, err = findInitrdFile(bootfiles, krel.Name())
if err != nil {
log.Warn().Msgf("cannot find initrd %s", krel.Name())
continue
}
ki := config.KernelInfo{
DistroType: dii.DistroType,
DistroRelease: dii.DistroRelease,
KernelRelease: krel.Name(),
ContainerName: dii.Name,
KernelPath: c.Volumes.Boot + "/" + kernelFile,
InitrdPath: c.Volumes.Boot + "/" + initrdFile,
ModulesPath: c.Volumes.LibModules + "/" + krel.Name(),
RootFS: rootfs,
}
newkcfg.Kernels = append(newkcfg.Kernels, ki)
}
for _, cmd := range []string{
"find /boot -type f -exec chmod a+r {} \\;",
} {
_, err = c.Run("/tmp", cmd)
if err != nil {
return
}
}
return
}
func hasKernel(ki config.KernelInfo, kcfg config.KernelConfig) bool {
for _, sk := range kcfg.Kernels {
if sk == ki {
return true
}
}
return false
}
func shuffleStrings(a []string) []string {
// FisherYates shuffle
for i := len(a) - 1; i > 0; i-- {
j := rand.Intn(i + 1)
a[i], a[j] = a[j], a[i]
}
return a
}
func generateKernels(km config.KernelMask, registry string,
commands []config.DockerCommand, max, retries int64,
download, force, headers, shuffle bool) (err error) {
log.Info().Msgf("Generating for kernel mask %v", km)
_, err = genRootfsImage(containerImageInfo{Name: km.DockerName()},
download)
if err != nil {
return
}
err = generateBaseDockerImage(registry, commands, km)
if err != nil {
return
}
var pkgs []string
switch km.DistroType {
case config.Ubuntu:
pkgs, err = matchDebImagePkg(km.DockerName(), km.ReleaseMask)
case config.CentOS:
pkgs, err = matchCentOSDevelPkg(km.DockerName(),
km.ReleaseMask, true)
default:
err = fmt.Errorf("%s not yet supported", km.DistroType.String())
}
if err != nil {
return
}
if shuffle {
pkgs = shuffleStrings(pkgs)
}
for i, pkg := range pkgs {
if max <= 0 {
log.Print("Max is reached")
break
}
log.Info().Msgf("%d/%d %s", i+1, len(pkgs), pkg)
var attempt int64
for {
attempt++
err = installKernel(km, pkg, force, headers)
if err == nil {
max--
break
} else if attempt >= retries {
log.Error().Err(err).Msg("install kernel")
log.Debug().Msg("skip")
break
} else {
log.Warn().Err(err).Msg("install kernel")
time.Sleep(time.Second)
log.Info().Msg("retry")
}
}
}
return
}

93
kernel/kernel.go Normal file
View File

@ -0,0 +1,93 @@
// Copyright 2023 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package kernel
import (
"math/rand"
"os"
"os/signal"
"path/filepath"
"regexp"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/artifact"
"code.dumpstack.io/tools/out-of-tree/cache"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
"code.dumpstack.io/tools/out-of-tree/fs"
)
func MatchPackages(km artifact.Target) (packages []string, err error) {
pkgs, err := km.Distro.Packages()
if err != nil {
return
}
r, err := regexp.Compile(km.Kernel.Regex)
if err != nil {
return
}
exr, err := regexp.Compile(km.Kernel.ExcludeRegex)
if err != nil {
return
}
for _, pkg := range pkgs {
if !r.MatchString(pkg) {
continue
}
if km.Kernel.ExcludeRegex != "" && exr.MatchString(pkg) {
continue
}
packages = append(packages, pkg)
}
return
}
func GenRootfsImage(imageFile string, download bool) (rootfs string, err error) {
imagesPath := dotfiles.Dir("images")
rootfs = filepath.Join(imagesPath, imageFile)
if !fs.PathExists(rootfs) {
if download {
log.Info().Msgf("%v not available, start download", imageFile)
err = cache.DownloadRootFS(imagesPath, imageFile)
}
}
return
}
func ShuffleStrings(a []string) []string {
// FisherYates shuffle
for i := len(a) - 1; i > 0; i-- {
j := rand.Intn(i + 1)
a[i], a[j] = a[j], a[i]
}
return a
}
func SetSigintHandler(variable *bool) {
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
go func() {
counter := 0
for range c {
if counter == 0 {
*variable = true
log.Warn().Msg("shutdown requested, finishing work")
log.Info().Msg("^C a couple of times more for an unsafe exit")
} else if counter >= 3 {
log.Fatal().Msg("unsafe exit")
}
counter += 1
}
}()
}

View File

@ -5,24 +5,26 @@
//go:build linux
// +build linux
package main
package kernel
import (
"io/ioutil"
"os"
"os/exec"
"strings"
"github.com/rs/zerolog/log"
"github.com/zcalusic/sysinfo"
"code.dumpstack.io/tools/out-of-tree/config"
"code.dumpstack.io/tools/out-of-tree/container"
"code.dumpstack.io/tools/out-of-tree/distro"
"code.dumpstack.io/tools/out-of-tree/fs"
)
func genHostKernels(download bool) (kcfg config.KernelConfig, err error) {
func GenHostKernels(download bool) (kernels []distro.KernelInfo, err error) {
si := sysinfo.SysInfo{}
si.GetSysInfo()
distroType, err := config.NewDistroType(si.OS.Vendor)
distroType, err := distro.NewID(si.OS.Vendor)
if err != nil {
return
}
@ -37,20 +39,17 @@ func genHostKernels(download bool) (kcfg config.KernelConfig, err error) {
}
kernelsBase := "/boot/"
bootfiles, err := ioutil.ReadDir(kernelsBase)
bootfiles, err := os.ReadDir(kernelsBase)
if err != nil {
return
}
// only for compatibility, docker is not really used
dii := containerImageInfo{
Name: config.KernelMask{
DistroType: distroType,
DistroRelease: si.OS.Version,
}.DockerName(),
dist := distro.Distro{
ID: distroType,
Release: si.OS.Version,
}
rootfs, err := genRootfsImage(dii, download)
rootfs, err := GenRootfsImage(dist.RootFS(), download)
if err != nil {
return
}
@ -59,21 +58,24 @@ func genHostKernels(download bool) (kcfg config.KernelConfig, err error) {
log.Debug().Msgf("generate config entry for %s", krel)
var kernelFile, initrdFile string
kernelFile, err = findKernelFile(bootfiles, krel)
kernelFile, err = container.FindKernel(bootfiles, krel)
if err != nil {
log.Warn().Msgf("cannot find kernel %s", krel)
continue
}
initrdFile, err = findInitrdFile(bootfiles, krel)
initrdFile, err = container.FindInitrd(bootfiles, krel)
if err != nil {
log.Warn().Msgf("cannot find initrd %s", krel)
continue
}
ki := config.KernelInfo{
DistroType: distroType,
DistroRelease: si.OS.Version,
ki := distro.KernelInfo{
Distro: distro.Distro{
ID: distroType,
Release: si.OS.Version,
},
KernelRelease: krel,
KernelSource: "/lib/modules/" + krel + "/build",
@ -85,11 +87,11 @@ func genHostKernels(download bool) (kcfg config.KernelConfig, err error) {
vmlinux := "/usr/lib/debug/boot/vmlinux-" + krel
log.Print("vmlinux", vmlinux)
if exists(vmlinux) {
if fs.PathExists(vmlinux) {
ki.VmlinuxPath = vmlinux
}
kcfg.Kernels = append(kcfg.Kernels, ki)
kernels = append(kernels, ki)
}
return

View File

@ -1,18 +1,19 @@
// Copyright 2018 Mikhail Klementev. All rights reserved.
// Copyright 2023 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
//go:build darwin
// +build darwin
package main
package kernel
import (
"errors"
"code.dumpstack.io/tools/out-of-tree/config"
"code.dumpstack.io/tools/out-of-tree/distro"
)
func genHostKernels(download bool) (kcfg config.KernelConfig, err error) {
func GenHostKernels(download bool) (kernels []distro.KernelInfo, err error) {
err = errors.New("generate host kernels for macOS is not supported")
return
}

173
main.go
View File

@ -6,13 +6,11 @@ package main
import (
"fmt"
"io"
"math/rand"
"os"
"os/user"
"os/exec"
"runtime"
"runtime/debug"
"strconv"
"time"
"strings"
"github.com/natefinch/lumberjack"
"github.com/rs/zerolog"
@ -20,48 +18,71 @@ import (
"github.com/alecthomas/kong"
"code.dumpstack.io/tools/out-of-tree/config"
_ "code.dumpstack.io/tools/out-of-tree/distro/centos"
_ "code.dumpstack.io/tools/out-of-tree/distro/debian"
_ "code.dumpstack.io/tools/out-of-tree/distro/opensuse"
_ "code.dumpstack.io/tools/out-of-tree/distro/oraclelinux"
_ "code.dumpstack.io/tools/out-of-tree/distro/ubuntu"
"code.dumpstack.io/tools/out-of-tree/cache"
"code.dumpstack.io/tools/out-of-tree/cmd"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
"code.dumpstack.io/tools/out-of-tree/container"
"code.dumpstack.io/tools/out-of-tree/fs"
)
type Globals struct {
Config config.OutOfTree `help:"path to out-of-tree configuration" default:"~/.out-of-tree/out-of-tree.toml"`
WorkDir string `help:"path to work directory" default:"./" type:"path"`
}
type CLI struct {
Globals
cmd.Globals
Pew PewCmd `cmd:"" help:"build, run, and test module/exploit"`
Kernel KernelCmd `cmd:"" help:"manipulate kernels"`
Debug DebugCmd `cmd:"" help:"debug environment"`
Log LogCmd `cmd:"" help:"query logs"`
Pack PackCmd `cmd:"" help:"exploit pack test"`
Gen GenCmd `cmd:"" help:"generate .out-of-tree.toml skeleton"`
Image ImageCmd `cmd:"" help:"manage images"`
Container ContainerCmd `cmd:"" help:"manage containers"`
Pew cmd.PewCmd `cmd:"" help:"build, run, and test module/exploit"`
Kernel cmd.KernelCmd `cmd:"" help:"manipulate kernels"`
Debug cmd.DebugCmd `cmd:"" help:"debug environment"`
Log cmd.LogCmd `cmd:"" help:"query logs"`
Pack cmd.PackCmd `cmd:"" help:"exploit pack test"`
Gen cmd.GenCmd `cmd:"" help:"generate .out-of-tree.toml skeleton"`
Image cmd.ImageCmd `cmd:"" help:"manage images"`
Container cmd.ContainerCmd `cmd:"" help:"manage containers"`
Distro cmd.DistroCmd `cmd:"" help:"distro-related helpers"`
Daemon cmd.DaemonCmd `cmd:"" help:"run daemon"`
Version VersionFlag `name:"version" help:"print version information and quit"`
LogLevel LogLevelFlag `enum:"trace,debug,info,warn,error" default:"info"`
ContainerRuntime string `enum:"podman,docker" default:"podman"`
}
func last(s []string) string {
return s[len(s)-1]
}
func debugLevel(pc uintptr, file string, line int) string {
function := runtime.FuncForPC(pc).Name()
if strings.Contains(function, ".") {
function = last(strings.Split(function, "."))
}
return function
}
func traceLevel(pc uintptr, file string, line int) string {
function := runtime.FuncForPC(pc).Name()
if strings.Contains(function, "/") {
function = last(strings.Split(function, "/"))
}
return fmt.Sprintf("%s:%s:%d", file, function, line)
}
type LogLevelFlag string
func (loglevel LogLevelFlag) AfterApply() error {
switch loglevel {
case "debug", "trace":
zerolog.CallerMarshalFunc = func(pc uintptr, file string, line int) string {
short := file
for i := len(file) - 1; i > 0; i-- {
if file[i] == '/' {
short = file[i+1:]
break
}
}
file = short
return file + ":" + strconv.Itoa(line)
}
case "debug":
zerolog.CallerMarshalFunc = debugLevel
log.Logger = log.With().Caller().Logger()
case "trace":
zerolog.CallerMarshalFunc = traceLevel
log.Logger = log.With().Caller().Logger()
}
return nil
@ -77,21 +98,7 @@ func (v VersionFlag) BeforeApply(app *kong.Kong, vars kong.Vars) error {
return nil
}
type LevelWriter struct {
io.Writer
Level zerolog.Level
}
func (lw *LevelWriter) WriteLevel(l zerolog.Level, p []byte) (n int, err error) {
if l >= lw.Level {
return lw.Writer.Write(p)
}
return len(p), nil
}
func main() {
rand.Seed(time.Now().UnixNano())
cli := CLI{}
ctx := kong.Parse(&cli,
kong.Name("out-of-tree"),
@ -101,42 +108,40 @@ func main() {
Compact: true,
}),
kong.Vars{
"version": "2.0.5",
"version": "2.1.2",
},
)
var loglevel zerolog.Level
switch cli.LogLevel {
case "trace":
loglevel = zerolog.TraceLevel
cmd.LogLevel = zerolog.TraceLevel
case "debug":
loglevel = zerolog.DebugLevel
cmd.LogLevel = zerolog.DebugLevel
case "info":
loglevel = zerolog.InfoLevel
cmd.LogLevel = zerolog.InfoLevel
case "warn":
loglevel = zerolog.WarnLevel
cmd.LogLevel = zerolog.WarnLevel
case "error":
loglevel = zerolog.ErrorLevel
cmd.LogLevel = zerolog.ErrorLevel
}
usr, err := user.Current()
if err != nil {
return
cmd.ConsoleWriter = cmd.LevelWriter{Writer: zerolog.NewConsoleWriter(
func(w *zerolog.ConsoleWriter) {
w.Out = os.Stderr
},
),
Level: cmd.LogLevel,
}
cmd.FileWriter = cmd.LevelWriter{Writer: &lumberjack.Logger{
Filename: dotfiles.File("logs/out-of-tree.log"),
},
Level: zerolog.TraceLevel,
}
log.Logger = log.Output(zerolog.MultiLevelWriter(
&LevelWriter{Writer: zerolog.NewConsoleWriter(
func(w *zerolog.ConsoleWriter) {
w.Out = os.Stderr
},
),
Level: loglevel,
},
&LevelWriter{Writer: &lumberjack.Logger{
Filename: usr.HomeDir + "/.out-of-tree/logs/out-of-tree.log",
},
Level: zerolog.TraceLevel,
},
&cmd.ConsoleWriter,
&cmd.FileWriter,
))
log.Trace().Msg("start out-of-tree")
@ -148,6 +153,36 @@ func main() {
log.Debug().Msgf("%v", buildInfo.Settings)
}
path := dotfiles.Dir()
yes, err := fs.CaseInsensitive(path)
if err != nil {
log.Fatal().Err(err).Msg(path)
}
if yes {
log.Warn().Msg("case-insensitive file system not supported")
}
_, err = exec.LookPath(cli.ContainerRuntime)
if err != nil {
if cli.ContainerRuntime == "podman" { // default value
log.Debug().Msgf("podman is not found in $PATH, " +
"fall back to docker")
cli.ContainerRuntime = "docker"
}
_, err = exec.LookPath(cli.ContainerRuntime)
if err != nil {
log.Fatal().Msgf("%v is not found in $PATH",
cli.ContainerRuntime)
}
}
container.Runtime = cli.ContainerRuntime
if cli.Globals.CacheURL.String() != "" {
cache.URL = cli.Globals.CacheURL.String()
}
log.Debug().Msgf("set cache url to %s", cache.URL)
err = ctx.Run(&cli.Globals)
ctx.FatalIfErrorf(err)
}

770
pew.go
View File

@ -1,770 +0,0 @@
// Copyright 2023 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package main
import (
"bufio"
"database/sql"
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"os"
"os/exec"
"os/user"
"strings"
"time"
"github.com/otiai10/copy"
"github.com/remeh/sizedwaitgroup"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"gopkg.in/logrusorgru/aurora.v2"
"code.dumpstack.io/tools/out-of-tree/config"
"code.dumpstack.io/tools/out-of-tree/qemu"
)
type PewCmd struct {
Max int64 `help:"test no more than X kernels" default:"100500"`
Runs int64 `help:"runs per each kernel" default:"1"`
Kernel string `help:"override kernel regex"`
Guess bool `help:"try all defined kernels"`
Shuffle bool `help:"randomize kernels test order"`
Binary string `help:"use binary, do not build"`
Test string `help:"override path for test"`
Dist string `help:"build result path" default:"/dev/null"`
Threads int `help:"threads" default:"1"`
Tag string `help:"log tagging"`
Timeout time.Duration `help:"timeout after tool will not spawn new tests"`
ArtifactConfig string `help:"path to artifact config" type:"path"`
QemuTimeout time.Duration `help:"timeout for qemu"`
DockerTimeout time.Duration `help:"timeout for docker"`
Threshold float64 `help:"reliablity threshold for exit code" default:"1.00"`
db *sql.DB
kcfg config.KernelConfig
timeoutDeadline time.Time
}
func (cmd *PewCmd) Run(g *Globals) (err error) {
cmd.kcfg, err = config.ReadKernelConfig(g.Config.Kernels)
if err != nil {
log.Fatal().Err(err).Msg("read kernels config")
}
if cmd.Timeout != 0 {
log.Info().Msgf("Set global timeout to %s", cmd.Timeout)
cmd.timeoutDeadline = time.Now().Add(cmd.Timeout)
}
cmd.db, err = openDatabase(g.Config.Database)
if err != nil {
log.Fatal().Err(err).
Msgf("Cannot open database %s", g.Config.Database)
}
defer cmd.db.Close()
var configPath string
if cmd.ArtifactConfig == "" {
configPath = g.WorkDir + "/.out-of-tree.toml"
} else {
configPath = cmd.ArtifactConfig
}
ka, err := config.ReadArtifactConfig(configPath)
if err != nil {
return
}
if ka.SourcePath == "" {
ka.SourcePath = g.WorkDir
}
if cmd.Kernel != "" {
var km config.KernelMask
km, err = kernelMask(cmd.Kernel)
if err != nil {
return
}
ka.SupportedKernels = []config.KernelMask{km}
}
if cmd.Guess {
ka.SupportedKernels, err = genAllKernels()
if err != nil {
return
}
}
if cmd.QemuTimeout != 0 {
log.Info().Msgf("Set qemu timeout to %s", cmd.QemuTimeout)
} else {
cmd.QemuTimeout = g.Config.Qemu.Timeout.Duration
}
if cmd.DockerTimeout != 0 {
log.Info().Msgf("Set docker timeout to %s", cmd.DockerTimeout)
} else {
cmd.DockerTimeout = g.Config.Docker.Timeout.Duration
}
if cmd.Tag == "" {
cmd.Tag = fmt.Sprintf("%d", time.Now().Unix())
}
log.Info().Str("tag", cmd.Tag).Msg("log")
err = cmd.performCI(ka)
if err != nil {
return
}
log.Info().Msgf("Success rate: %.02f, Threshold: %.02f",
successRate(state), cmd.Threshold)
if successRate(state) < cmd.Threshold {
err = errors.New("reliability threshold not met")
}
return
}
type runstate struct {
Overall, Success float64
}
var (
state runstate
)
func successRate(state runstate) float64 {
return state.Success / state.Overall
}
const pathDevNull = "/dev/null"
func sh(workdir, command string) (output string, err error) {
flog := log.With().
Str("workdir", workdir).
Str("command", command).
Logger()
cmd := exec.Command("sh", "-c", "cd "+workdir+" && "+command)
flog.Debug().Msgf("%v", cmd)
stdout, err := cmd.StdoutPipe()
if err != nil {
return
}
cmd.Stderr = cmd.Stdout
err = cmd.Start()
if err != nil {
return
}
go func() {
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
m := scanner.Text()
output += m + "\n"
flog.Trace().Str("stdout", m).Msg("")
}
}()
err = cmd.Wait()
if err != nil {
e := fmt.Sprintf("%v %v output: %v", cmd, err, output)
err = errors.New(e)
}
return
}
func applyPatches(src string, ka config.Artifact) (err error) {
for i, patch := range ka.Patches {
name := fmt.Sprintf("patch_%02d", i)
path := src + "/" + name + ".diff"
if patch.Source != "" && patch.Path != "" {
err = errors.New("path and source are mutually exclusive")
return
} else if patch.Source != "" {
err = os.WriteFile(path, []byte(patch.Source), 0644)
if err != nil {
return
}
} else if patch.Path != "" {
err = copy.Copy(patch.Path, path)
if err != nil {
return
}
}
if patch.Source != "" || patch.Path != "" {
_, err = sh(src, "patch < "+path)
if err != nil {
return
}
}
if patch.Script != "" {
script := src + "/" + name + ".sh"
err = os.WriteFile(script, []byte(patch.Script), 0755)
if err != nil {
return
}
_, err = sh(src, script)
if err != nil {
return
}
}
}
return
}
func build(tmp string, ka config.Artifact, ki config.KernelInfo,
dockerTimeout time.Duration) (outdir, outpath, output string, err error) {
target := fmt.Sprintf("%d_%s", rand.Int(), ki.KernelRelease)
outdir = tmp + "/source"
err = copy.Copy(ka.SourcePath, outdir)
if err != nil {
return
}
err = applyPatches(outdir, ka)
if err != nil {
return
}
outpath = outdir + "/" + target
if ka.Type == config.KernelModule {
outpath += ".ko"
}
kernel := "/lib/modules/" + ki.KernelRelease + "/build"
if ki.KernelSource != "" {
kernel = ki.KernelSource
}
buildCommand := "make KERNEL=" + kernel + " TARGET=" + target
if ka.Make.Target != "" {
buildCommand += " " + ka.Make.Target
}
if ki.ContainerName != "" {
var c container
c, err = NewContainer(ki.ContainerName, dockerTimeout)
if err != nil {
log.Fatal().Err(err).Msg("container creation failure")
}
output, err = c.Run(outdir, buildCommand+" && chmod -R 777 /work")
} else {
cmd := exec.Command("bash", "-c", "cd "+outdir+" && "+
buildCommand)
log.Debug().Msgf("%v", cmd)
timer := time.AfterFunc(dockerTimeout, func() {
cmd.Process.Kill()
})
defer timer.Stop()
var raw []byte
raw, err = cmd.CombinedOutput()
if err != nil {
e := fmt.Sprintf("error `%v` for cmd `%v` with output `%v`",
err, buildCommand, string(raw))
err = errors.New(e)
return
}
output = string(raw)
}
return
}
func runScript(q *qemu.System, script string) (output string, err error) {
return q.Command("root", script)
}
func testKernelModule(q *qemu.System, ka config.Artifact,
test string) (output string, err error) {
output, err = q.Command("root", test)
// TODO generic checks for WARNING's and so on
return
}
func testKernelExploit(q *qemu.System, ka config.Artifact,
test, exploit string) (output string, err error) {
output, err = q.Command("user", "chmod +x "+exploit)
if err != nil {
return
}
randFilePath := fmt.Sprintf("/root/%d", rand.Int())
cmd := fmt.Sprintf("%s %s %s", test, exploit, randFilePath)
output, err = q.Command("user", cmd)
if err != nil {
return
}
_, err = q.Command("root", "stat "+randFilePath)
if err != nil {
return
}
return
}
func genOkFail(name string, ok bool) (aurv aurora.Value) {
state.Overall += 1
s := " " + name
if name == "" {
s = ""
}
if ok {
state.Success += 1
s += " SUCCESS "
aurv = aurora.BgGreen(aurora.Black(s))
} else {
s += " FAILURE "
aurv = aurora.BgRed(aurora.White(aurora.Bold(s)))
}
return
}
type phasesResult struct {
BuildDir string
BuildArtifact string
Build, Run, Test struct {
Output string
Ok bool
}
}
func copyFile(sourcePath, destinationPath string) (err error) {
sourceFile, err := os.Open(sourcePath)
if err != nil {
return
}
defer sourceFile.Close()
destinationFile, err := os.Create(destinationPath)
if err != nil {
return err
}
if _, err := io.Copy(destinationFile, sourceFile); err != nil {
destinationFile.Close()
return err
}
return destinationFile.Close()
}
func dumpResult(q *qemu.System, ka config.Artifact, ki config.KernelInfo,
res *phasesResult, dist, tag, binary string, db *sql.DB) {
// TODO merge (problem is it's not 100% same) with log.go:logLogEntry
distroInfo := fmt.Sprintf("%s-%s {%s}", ki.DistroType,
ki.DistroRelease, ki.KernelRelease)
colored := ""
switch ka.Type {
case config.KernelExploit:
colored = aurora.Sprintf("[*] %40s: %s %s", distroInfo,
genOkFail("BUILD", res.Build.Ok),
genOkFail("LPE", res.Test.Ok))
case config.KernelModule:
colored = aurora.Sprintf("[*] %40s: %s %s %s", distroInfo,
genOkFail("BUILD", res.Build.Ok),
genOkFail("INSMOD", res.Run.Ok),
genOkFail("TEST", res.Test.Ok))
case config.Script:
colored = aurora.Sprintf("[*] %40s: %s", distroInfo,
genOkFail("", res.Test.Ok))
}
additional := ""
if q.KernelPanic {
additional = "(panic)"
} else if q.KilledByTimeout {
additional = "(timeout)"
}
if additional != "" {
fmt.Println(colored, additional)
} else {
fmt.Println(colored)
}
err := addToLog(db, q, ka, ki, res, tag)
if err != nil {
log.Warn().Err(err).Msgf("[db] addToLog (%v)", ka)
}
if binary == "" && dist != pathDevNull {
err = os.MkdirAll(dist, os.ModePerm)
if err != nil {
log.Warn().Err(err).Msgf("os.MkdirAll (%v)", ka)
}
path := fmt.Sprintf("%s/%s-%s-%s", dist, ki.DistroType,
ki.DistroRelease, ki.KernelRelease)
if ka.Type != config.KernelExploit {
path += ".ko"
}
err = copyFile(res.BuildArtifact, path)
if err != nil {
log.Warn().Err(err).Msgf("copy file (%v)", ka)
}
}
}
func copyArtifactAndTest(slog zerolog.Logger, q *qemu.System, ka config.Artifact,
res *phasesResult, remoteTest string) (err error) {
// Copy all test files to the remote machine
for _, f := range ka.TestFiles {
if f.Local[0] != '/' {
if res.BuildDir != "" {
f.Local = res.BuildDir + "/" + f.Local
}
}
err = q.CopyFile(f.User, f.Local, f.Remote)
if err != nil {
slog.Error().Err(err).Msg("copy test file")
return
}
}
switch ka.Type {
case config.KernelModule:
res.Run.Output, err = q.CopyAndInsmod(res.BuildArtifact)
if err != nil {
slog.Error().Err(err).Msg(res.Run.Output)
return
}
res.Run.Ok = true
res.Test.Output, err = testKernelModule(q, ka, remoteTest)
if err != nil {
slog.Error().Err(err).Msg(res.Test.Output)
return
}
res.Test.Ok = true
case config.KernelExploit:
remoteExploit := fmt.Sprintf("/tmp/exploit_%d", rand.Int())
err = q.CopyFile("user", res.BuildArtifact, remoteExploit)
if err != nil {
return
}
res.Test.Output, err = testKernelExploit(q, ka, remoteTest,
remoteExploit)
if err != nil {
slog.Error().Err(err).Msg(res.Test.Output)
return
}
res.Run.Ok = true // does not really used
res.Test.Ok = true
case config.Script:
res.Test.Output, err = runScript(q, remoteTest)
if err != nil {
slog.Error().Err(err).Msg(res.Test.Output)
return
}
slog.Info().Msg(res.Test.Output)
res.Run.Ok = true
res.Test.Ok = true
default:
slog.Fatal().Msg("Unsupported artifact type")
}
return
}
func copyTest(q *qemu.System, testPath string, ka config.Artifact) (
remoteTest string, err error) {
remoteTest = fmt.Sprintf("/tmp/test_%d", rand.Int())
err = q.CopyFile("user", testPath, remoteTest)
if err != nil {
if ka.Type == config.KernelExploit {
q.Command("user",
"echo -e '#!/bin/sh\necho touch $2 | $1' "+
"> "+remoteTest+
" && chmod +x "+remoteTest)
} else {
q.Command("user", "echo '#!/bin/sh' "+
"> "+remoteTest+" && chmod +x "+remoteTest)
}
}
_, err = q.Command("root", "chmod +x "+remoteTest)
return
}
func copyStandardModules(q *qemu.System, ki config.KernelInfo) (err error) {
_, err = q.Command("root", "mkdir -p /lib/modules")
if err != nil {
return
}
files, err := ioutil.ReadDir(ki.ModulesPath)
if err != nil {
return
}
// FIXME scp cannot ignore symlinks
for _, f := range files {
if f.Mode()&os.ModeSymlink == os.ModeSymlink {
continue
}
path := ki.ModulesPath + "/" + f.Name()
err = q.CopyDirectory("root", path, "/lib/modules/"+ki.KernelRelease+"/")
if err != nil {
return
}
}
return
}
func (cmd PewCmd) testArtifact(swg *sizedwaitgroup.SizedWaitGroup,
ka config.Artifact, ki config.KernelInfo) {
defer swg.Done()
slog := log.With().
Str("distro_type", ki.DistroType.String()).
Str("distro_release", ki.DistroRelease).
Str("kernel", ki.KernelRelease).
Logger()
slog.Info().Msg("start")
kernel := qemu.Kernel{KernelPath: ki.KernelPath, InitrdPath: ki.InitrdPath}
q, err := qemu.NewSystem(qemu.X86x64, kernel, ki.RootFS)
if err != nil {
slog.Error().Err(err).Msg("qemu init")
return
}
q.Timeout = cmd.QemuTimeout
if ka.Qemu.Timeout.Duration != 0 {
q.Timeout = ka.Qemu.Timeout.Duration
}
if ka.Qemu.Cpus != 0 {
q.Cpus = ka.Qemu.Cpus
}
if ka.Qemu.Memory != 0 {
q.Memory = ka.Qemu.Memory
}
if ka.Docker.Timeout.Duration != 0 {
cmd.DockerTimeout = ka.Docker.Timeout.Duration
}
q.SetKASLR(!ka.Mitigations.DisableKaslr)
q.SetSMEP(!ka.Mitigations.DisableSmep)
q.SetSMAP(!ka.Mitigations.DisableSmap)
q.SetKPTI(!ka.Mitigations.DisableKpti)
err = q.Start()
if err != nil {
slog.Error().Err(err).Msg("qemu start")
return
}
defer q.Stop()
go func() {
for !q.Died {
time.Sleep(time.Minute)
slog.Debug().Msg("still alive")
}
}()
usr, err := user.Current()
if err != nil {
return
}
tmpdir := usr.HomeDir + "/.out-of-tree/tmp"
os.MkdirAll(tmpdir, os.ModePerm)
tmp, err := ioutil.TempDir(tmpdir, "out-of-tree_")
if err != nil {
slog.Error().Err(err).Msg("making tmp directory")
return
}
defer os.RemoveAll(tmp)
result := phasesResult{}
defer dumpResult(q, ka, ki, &result, cmd.Dist, cmd.Tag, cmd.Binary, cmd.db)
if ka.Type == config.Script {
result.Build.Ok = true
cmd.Test = ka.Script
} else if cmd.Binary == "" {
// TODO: build should return structure
start := time.Now()
result.BuildDir, result.BuildArtifact, result.Build.Output, err =
build(tmp, ka, ki, cmd.DockerTimeout)
slog.Debug().Str("duration", time.Now().Sub(start).String()).
Msg("build done")
if err != nil {
log.Error().Err(err).Msg("build")
return
}
result.Build.Ok = true
} else {
result.BuildArtifact = cmd.Binary
result.Build.Ok = true
}
if cmd.Test == "" {
cmd.Test = result.BuildArtifact + "_test"
if !exists(cmd.Test) {
cmd.Test = tmp + "/source/" + "test.sh"
}
}
err = q.WaitForSSH(cmd.QemuTimeout)
if err != nil {
return
}
remoteTest, err := copyTest(q, cmd.Test, ka)
if err != nil {
return
}
if ka.StandardModules {
// Module depends on one of the standard modules
start := time.Now()
err = copyStandardModules(q, ki)
if err != nil {
slog.Fatal().Err(err).Msg("copy standard modules")
return
}
slog.Debug().Str("duration", time.Now().Sub(start).String()).
Msg("copy standard modules")
}
err = preloadModules(q, ka, ki, cmd.DockerTimeout)
if err != nil {
slog.Error().Err(err).Msg("preload modules")
return
}
start := time.Now()
copyArtifactAndTest(slog, q, ka, &result, remoteTest)
slog.Debug().Str("duration", time.Now().Sub(start).String()).
Msg("test completed")
}
func shuffleKernels(a []config.KernelInfo) []config.KernelInfo {
// FisherYates shuffle
for i := len(a) - 1; i > 0; i-- {
j := rand.Intn(i + 1)
a[i], a[j] = a[j], a[i]
}
return a
}
func (cmd PewCmd) performCI(ka config.Artifact) (err error) {
found := false
max := cmd.Max
swg := sizedwaitgroup.New(cmd.Threads)
if cmd.Shuffle {
cmd.kcfg.Kernels = shuffleKernels(cmd.kcfg.Kernels)
}
for _, kernel := range cmd.kcfg.Kernels {
if max <= 0 {
break
}
var supported bool
supported, err = ka.Supported(kernel)
if err != nil {
return
}
if supported {
found = true
max--
for i := int64(0); i < cmd.Runs; i++ {
if !cmd.timeoutDeadline.IsZero() &&
time.Now().After(cmd.timeoutDeadline) {
break
}
swg.Add()
go cmd.testArtifact(&swg, ka, kernel)
}
}
}
swg.Wait()
if !found {
err = errors.New("No supported kernels found")
}
return
}
func exists(path string) bool {
if _, err := os.Stat(path); err != nil {
log.Debug().Msgf("%s does not exist", path)
return false
}
log.Debug().Msgf("%s exist", path)
return true
}
func kernelMask(kernel string) (km config.KernelMask, err error) {
parts := strings.Split(kernel, ":")
if len(parts) != 2 {
err = errors.New("Kernel is not 'distroType:regex'")
return
}
dt, err := config.NewDistroType(parts[0])
if err != nil {
return
}
km = config.KernelMask{DistroType: dt, ReleaseMask: parts[1]}
return
}
func genAllKernels() (sk []config.KernelMask, err error) {
for _, dType := range config.DistroTypeStrings {
var dt config.DistroType
dt, err = config.NewDistroType(dType)
if err != nil {
return
}
sk = append(sk, config.KernelMask{
DistroType: dt,
ReleaseMask: ".*",
})
}
return
}

View File

@ -30,7 +30,7 @@ Note: qemu on macOS since v2.12 (24 April 2018) supports Hypervisor.framework.
#### Generate image
$ cd $GOPATH/src/code.dumpstack.io/tools/out-of-tree/tools/qemu-debian-img
$ cd $GOPATH/src/code.dumpstack.io/tools/out-of-tree/tools/qemu-ubuntu-img
$ ./bootstrap.sh
### Fill configuration file

View File

@ -15,9 +15,11 @@ import (
"os/exec"
"runtime"
"strings"
"sync"
"syscall"
"time"
"github.com/povsister/scp"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"golang.org/x/crypto/ssh"
@ -35,6 +37,13 @@ const (
unsupported = "unsupported" // for test purposes
)
const (
DefaultCPUs = 1
DefaultMemory = 512 // megabytes
DefaultSSHRetries = 4
DefaultSSHRetryTimeout = time.Second / 4
)
// Kernel describe kernel parameters for qemu
type Kernel struct {
Name string
@ -42,6 +51,11 @@ type Kernel struct {
InitrdPath string
}
type CPU struct {
Model string
Flags []string
}
// System describe qemu parameters and executed process
type System struct {
arch arch
@ -53,6 +67,8 @@ type System struct {
Cpus int
Memory int
CPU CPU
debug bool
gdb string // tcp::1234
@ -67,8 +83,13 @@ type System struct {
KernelPanic bool
Died bool
sshAddrPort string
Died bool
SSH struct {
AddrPort string
Retries int
RetryTimeout time.Duration
}
// accessible while qemu is running
cmd *exec.Cmd
@ -83,13 +104,13 @@ type System struct {
// accessible after qemu is closed
exitErr error
log zerolog.Logger
Log zerolog.Logger
}
// NewSystem constructor
func NewSystem(arch arch, kernel Kernel, drivePath string) (q *System, err error) {
q = &System{}
q.log = log.With().
q.Log = log.With().
Str("kernel", kernel.KernelPath).
Logger()
@ -109,16 +130,17 @@ func NewSystem(arch arch, kernel Kernel, drivePath string) (q *System, err error
}
q.drivePath = drivePath
// Default values
q.Cpus = 1
q.Memory = 512 // megabytes
q.Cpus = DefaultCPUs
q.Memory = DefaultMemory
q.SSH.Retries = DefaultSSHRetries
q.SSH.RetryTimeout = DefaultSSHRetryTimeout
return
}
func (q *System) SetSSHAddrPort(addr string, port int) (err error) {
// TODO validate
q.sshAddrPort = fmt.Sprintf("%s:%d", addr, port)
q.SSH.AddrPort = fmt.Sprintf("%s:%d", addr, port)
return
}
@ -136,7 +158,7 @@ func getRandomPort(ip string) (addr string) {
return fmt.Sprintf("%s:%d", ip, port)
}
func getFreeAddrPort() (addrPort string) {
func GetFreeAddrPort() (addrPort string) {
timeout := time.Now().Add(time.Second)
for {
if runtime.GOOS == "linux" {
@ -172,17 +194,20 @@ func kvmExists() bool {
return true
}
func (q *System) panicWatcher() {
for {
func (q *System) checkOopsPanic(s string) {
if strings.Contains(s, "Kernel panic") {
q.KernelPanic = true
q.Log.Warn().Msg("kernel panic")
time.Sleep(time.Second)
if strings.Contains(q.Stdout, "Kernel panic") {
q.KernelPanic = true
q.log.Debug().Msg("kernel panic")
time.Sleep(time.Second)
// There is no reason to stay alive after kernel panic
q.Stop()
return
}
// There is no reason to stay alive after kernel panic
q.Stop()
} else if strings.Contains(s, "BUG") {
q.Log.Warn().Msg(s)
time.Sleep(time.Second)
// consider BUG() as non-recoverable state
q.Stop()
} else if strings.Contains(s, "WARNING") {
q.Log.Warn().Msg(s)
}
}
@ -208,14 +233,16 @@ func (q System) cmdline() (s string) {
return
}
// Start qemu process
func (q *System) Start() (err error) {
rand.Seed(time.Now().UnixNano()) // Are you sure?
if q.sshAddrPort == "" {
q.sshAddrPort = getFreeAddrPort()
func (q System) Executable() string {
return "qemu-system-" + string(q.arch)
}
func (q *System) Args() (qemuArgs []string) {
if q.SSH.AddrPort == "" {
q.SSH.AddrPort = GetFreeAddrPort()
}
hostfwd := fmt.Sprintf("hostfwd=tcp:%s-:22", q.sshAddrPort)
qemuArgs := []string{"-nographic",
hostfwd := fmt.Sprintf("hostfwd=tcp:%s-:22", q.SSH.AddrPort)
qemuArgs = []string{"-nographic",
"-hda", q.drivePath,
"-kernel", q.kernel.KernelPath,
"-smp", fmt.Sprintf("%d", q.Cpus),
@ -236,18 +263,33 @@ func (q *System) Start() (err error) {
qemuArgs = append(qemuArgs, "-initrd", q.kernel.InitrdPath)
}
if (q.arch == X86x64 || q.arch == X86x32) && kvmExists() {
qemuArgs = append(qemuArgs, "-enable-kvm", "-cpu", "host")
cpu := "max"
if q.CPU.Model != "" {
cpu = q.CPU.Model
}
for _, flag := range q.CPU.Flags {
cpu += "," + flag
}
qemuArgs = append(qemuArgs, "-cpu", cpu)
if q.arch == X86x64 || q.arch == X86x32 {
if kvmExists() {
qemuArgs = append(qemuArgs, "-enable-kvm")
}
}
if q.arch == X86x64 && runtime.GOOS == "darwin" {
qemuArgs = append(qemuArgs, "-accel", "hvf", "-cpu", "host")
qemuArgs = append(qemuArgs, "-accel", "hvf")
}
qemuArgs = append(qemuArgs, "-append", q.cmdline())
return
}
q.cmd = exec.Command("qemu-system-"+string(q.arch), qemuArgs...)
q.log.Debug().Msgf("%v", q.cmd)
// Start qemu process
func (q *System) Start() (err error) {
q.cmd = exec.Command(q.Executable(), q.Args()...)
q.Log.Debug().Msgf("%v", q.cmd)
if q.pipe.stdin, err = q.cmd.StdinPipe(); err != nil {
return
@ -261,6 +303,7 @@ func (q *System) Start() (err error) {
return
}
q.Log.Debug().Msg("start qemu")
err = q.cmd.Start()
if err != nil {
return
@ -271,7 +314,8 @@ func (q *System) Start() (err error) {
for scanner.Scan() {
m := scanner.Text()
q.Stdout += m + "\n"
q.log.Trace().Str("stdout", m).Msg("")
q.Log.Trace().Str("stdout", m).Msg("qemu")
go q.checkOopsPanic(m)
}
}()
@ -280,13 +324,14 @@ func (q *System) Start() (err error) {
for scanner.Scan() {
m := scanner.Text()
q.Stderr += m + "\n"
q.log.Trace().Str("stderr", m).Msg("")
q.Log.Trace().Str("stderr", m).Msg("qemu")
}
}()
go func() {
q.exitErr = q.cmd.Wait()
q.Died = true
q.Log.Debug().Msg("qemu died")
}()
time.Sleep(time.Second / 10) // wait for immediately die
@ -295,8 +340,6 @@ func (q *System) Start() (err error) {
err = errors.New("qemu died immediately: " + string(q.Stderr))
}
go q.panicWatcher()
if q.Timeout != 0 {
go func() {
time.Sleep(q.Timeout)
@ -310,6 +353,8 @@ func (q *System) Start() (err error) {
// Stop qemu process
func (q *System) Stop() {
q.Log.Debug().Msg("stop qemu process")
// 1 00/01 01 01 SOH (Ctrl-A) START OF HEADING
fmt.Fprintf(q.pipe.stdin, "%cx", 1)
// wait for die
@ -321,34 +366,68 @@ func (q *System) Stop() {
}
}
func (q System) WaitForSSH(timeout time.Duration) error {
func (q *System) WaitForSSH(timeout time.Duration) error {
q.Log.Debug().Msgf("wait for ssh for %v", timeout)
for start := time.Now(); time.Since(start) < timeout; {
time.Sleep(time.Second / 4)
if q.Died || q.KernelPanic {
return errors.New("no ssh (qemu is dead)")
}
client, err := q.ssh("root")
if err != nil {
time.Sleep(time.Second / 10)
q.Log.Debug().Err(err).Msg("")
continue
}
session, err := client.NewSession()
if err != nil {
client.Close()
q.Log.Debug().Err(err).Msg("")
continue
}
_, err = session.CombinedOutput("echo")
if err != nil {
client.Close()
q.Log.Debug().Err(err).Msg("")
continue
}
client.Close()
q.Log.Debug().Msg("ssh is available")
return nil
}
return errors.New("no ssh (timeout)")
}
func (q System) ssh(user string) (client *ssh.Client, err error) {
func (q *System) ssh(user string) (client *ssh.Client, err error) {
cfg := &ssh.ClientConfig{
User: user,
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}
client, err = ssh.Dial("tcp", q.sshAddrPort, cfg)
for retries := q.SSH.Retries; retries > 0; retries-- {
if q.Died {
err = errors.New("qemu is dead")
return
}
client, err = ssh.Dial("tcp", q.SSH.AddrPort, cfg)
if err == nil {
break
}
time.Sleep(q.SSH.RetryTimeout)
}
return
}
// Command executes shell commands on qemu system
func (q System) Command(user, cmd string) (output string, err error) {
flog := log.With().
Str("kernel", q.kernel.KernelPath).
flog := q.Log.With().Str("kernel", q.kernel.KernelPath).
Str("user", user).
Str("cmd", cmd).
Logger()
@ -357,37 +436,67 @@ func (q System) Command(user, cmd string) (output string, err error) {
client, err := q.ssh(user)
if err != nil {
flog.Debug().Err(err).Msg("ssh connection")
return
}
defer client.Close()
session, err := client.NewSession()
if err != nil {
flog.Debug().Err(err).Msg("new session")
return
}
stdout, err := session.StdoutPipe()
if err != nil {
flog.Debug().Err(err).Msg("get stdout pipe")
return
}
stderr, err := session.StderrPipe()
if err != nil {
flog.Debug().Err(err).Msg("get stderr pipe")
return
}
session.Stderr = session.Stdout
err = session.Start(cmd)
if err != nil {
flog.Debug().Err(err).Msg("start session")
return
}
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
m := scanner.Text()
output += m + "\n"
flog.Trace().Str("stdout", m).Msg("")
flog.Trace().Str("stdout", m).Msg("qemu command")
}
output = strings.TrimSuffix(output, "\n")
}()
wg.Add(1)
go func() {
defer wg.Done()
scanner := bufio.NewScanner(stderr)
for scanner.Scan() {
m := scanner.Text()
output += m + "\n"
// Note: it prints stderr as stdout
flog.Trace().Str("stdout", m).Msg("qemu command")
}
output = strings.TrimSuffix(output, "\n")
}()
err = session.Wait()
wg.Wait()
return
}
@ -409,67 +518,65 @@ func (q System) AsyncCommand(user, cmd string) (err error) {
}
func (q System) scp(user, localPath, remotePath string, recursive bool) (err error) {
addrPort := strings.Split(q.sshAddrPort, ":")
addr := addrPort[0]
port := addrPort[1]
q.Log.Debug().Msgf("scp[%s] %s -> %s", user, localPath, remotePath)
args := []string{
"-P", port,
"-o", "StrictHostKeyChecking=no",
"-o", "UserKnownHostsFile=/dev/null",
"-o", "LogLevel=error",
sshClient, err := q.ssh(user)
if err != nil {
return
}
defer sshClient.Close()
client, err := scp.NewClientFromExistingSSH(sshClient, &scp.ClientOption{})
if err != nil {
return
}
if recursive {
cmd := exec.Command("ssh", "-V")
err = client.CopyDirToRemote(
localPath,
remotePath,
&scp.DirTransferOption{},
)
} else {
err = client.CopyFileToRemote(
localPath,
remotePath,
&scp.FileTransferOption{},
)
}
return
}
log.Debug().Msgf("%v", cmd)
var output []byte
output, err = cmd.CombinedOutput()
if err != nil {
func (q *System) scpWithRetry(user, localPath, remotePath string, recursive bool) (err error) {
for retries := q.SSH.Retries; retries > 0; retries-- {
if q.Died {
err = errors.New("qemu is dead")
return
}
sshVersion := string(output)
log.Debug().Str("ssh version", sshVersion).Msg("")
if strings.Contains(sshVersion, "OpenSSH_9") {
// This release switches scp from using the
// legacy scp/rcp protocol to using the SFTP
// protocol by default.
//
// To keep compatibility with old distros,
// using -O flag to use the legacy scp/rcp.
//
// Note: old ssh doesn't support -O flag
args = append(args, "-O")
err = q.scp(user, localPath, remotePath, recursive)
if err == nil {
break
}
args = append(args, "-r")
q.Log.Debug().Err(err).Msgf(
"scp[%v] (r: %v) %v -> %v: failed",
user, recursive, localPath, remotePath)
time.Sleep(q.SSH.RetryTimeout)
q.Log.Warn().Msgf("scp: %d retries left", retries)
}
args = append(args, localPath, user+"@"+addr+":"+remotePath)
cmd := exec.Command("scp", args...)
log.Debug().Msgf("%v", cmd)
output, err := cmd.CombinedOutput()
if err != nil || string(output) != "" {
return errors.New(string(output))
}
return
}
// CopyFile from local machine to remote via scp
func (q System) CopyFile(user, localPath, remotePath string) (err error) {
return q.scp(user, localPath, remotePath, false)
return q.scpWithRetry(user, localPath, remotePath, false)
}
// CopyDirectory from local machine to remote via scp
func (q System) CopyDirectory(user, localPath, remotePath string) (err error) {
return q.scp(user, localPath, remotePath, true)
return q.scpWithRetry(user, localPath, remotePath, true)
}
// CopyAndInsmod copy kernel module to temporary file on qemu then insmod it
@ -494,6 +601,17 @@ func (q *System) CopyAndRun(user, path string) (output string, err error) {
return q.Command(user, "chmod +x "+remotePath+" && "+remotePath)
}
// CopyAndRunAsync is copy local file to qemu vm then run it w/o wait for exit
func (q *System) CopyAndRunAsync(user, path string) (err error) {
remotePath := fmt.Sprintf("/tmp/executable_%d", rand.Int())
err = q.CopyFile(user, path, remotePath)
if err != nil {
return
}
return q.AsyncCommand(user, "chmod +x "+remotePath+" && "+remotePath)
}
// Debug is for enable qemu debug and set hostname and port for listen
func (q *System) Debug(conn string) {
q.debug = true
@ -542,7 +660,7 @@ func (q *System) GetKPTI() bool {
// GetSSHCommand returns command for connect to qemu machine over ssh
func (q System) GetSSHCommand() (cmd string) {
addrPort := strings.Split(q.sshAddrPort, ":")
addrPort := strings.Split(q.SSH.AddrPort, ":")
addr := addrPort[0]
port := addrPort[1]

Some files were not shown because too many files have changed in this diff Show More