1
0
Fork 0

Compare commits

...

874 Commits

Author SHA1 Message Date
dump_stack() a79ea1905a
feat: explicit list of source files 2024-05-11 14:48:23 +00:00
dump_stack() 331876127a
feat(api): job description 2024-05-11 14:42:44 +00:00
dump_stack() ee1262e983
fix: re-tagging podman-generated images 2024-05-11 11:10:09 +00:00
dump_stack() e51a528838
ci: pin macOS-12 2024-05-11 11:09:08 +00:00
dump_stack() fc193afe92
refactor(daemon): switch to gob encoding 2024-02-28 03:04:38 +00:00
dump_stack() 1c8e1d068b
feat(daemon): query jobs by update time 2024-02-28 01:51:45 +00:00
dump_stack() c909c2a352
feat(daemon): timestamps 2024-02-27 02:00:07 +00:00
dump_stack() e633fd2e79
feat(daemon): task groups 2024-02-26 09:48:00 +00:00
dump_stack() 2b4db95166
feat(daemon): parallel execution 2024-02-25 18:19:48 +00:00
dump_stack() 6a9bfb503f
refactor: set qemu default values as constants 2024-02-25 18:02:39 +00:00
dump_stack() 29f4821320
feat: add watch flag 2024-02-25 18:02:03 +00:00
dump_stack() bb8344958e
ci: rename logs 2024-02-22 13:46:03 +00:00
dump_stack() 2c66dbc736
docs: add .readthedocs.yaml 2024-02-22 13:06:05 +00:00
dump_stack() cc43cbcc2d
Revert "ci: remove setup.sh"
This reverts commit e203229f00.
2024-02-22 10:33:50 +00:00
dump_stack() e203229f00
ci: remove setup.sh 2024-02-22 10:19:02 +00:00
dump_stack() 7b7c01ac8a
ci: deps 2024-02-22 10:01:01 +00:00
dump_stack() 247f0f90ed
ci: build cache 2024-02-22 09:58:49 +00:00
dump_stack() b1dc739cfa
ci: 8 threads; 128 kernels 2024-02-22 08:56:30 +00:00
dump_stack() 9727c7863c
ci: bump ssh-agent version 2024-02-21 22:52:54 +00:00
dump_stack() 335eeb5ed5
fix: load local archive 2024-02-21 22:52:38 +00:00
dump_stack() 8812cb4293
feat: allow amended commits 2024-02-21 22:48:39 +00:00
dump_stack() a9a42ba33a
fix: C-c/C-v 2024-02-21 18:58:49 +00:00
dump_stack() c17676d0f9
fix: use image load 2024-02-21 18:57:19 +00:00
dump_stack() 8251927821
Revert "fix: load, not import"
This reverts commit 35df5850f5.
2024-02-21 18:55:48 +00:00
dump_stack() 4a1422e010
ci: use the latest podman 2024-02-21 18:43:32 +00:00
dump_stack() 35df5850f5
fix: load, not import 2024-02-21 18:31:33 +00:00
dump_stack() 451566d27f
fix: do not rebuild existing containers w/o files 2024-02-21 18:16:51 +00:00
dump_stack() 61b995f330
Revert "fix: set default value"
This reverts commit b1be394d6b.
2024-02-21 18:08:36 +00:00
dump_stack() b1be394d6b
fix: set default value 2024-02-21 17:57:54 +00:00
dump_stack() cc5e70373f
fix: fetch prebuilt containers in `kernel list-remote` 2024-02-21 17:25:09 +00:00
dump_stack() 86213c171a
fix: do not reimport existing containers 2024-02-21 16:15:38 +00:00
dump_stack() 8a5971379a
feat(distro): add gcc-12 to ubuntu 22.04 image 2024-02-21 16:03:18 +00:00
dump_stack() ce0a4d92fc
refactor: logs 2024-02-21 15:39:07 +00:00
dump_stack() 059ca6dc93
fix: do not fail if no prebuilt container 2024-02-21 15:05:59 +00:00
dump_stack() d317769a64
refactor: logs 2024-02-21 15:00:21 +00:00
dump_stack() d733cde2ae
feat: prebuilt containers 2024-02-21 14:55:04 +00:00
dump_stack() 8a4ce9909b
ci: upload images only after test success 2024-02-21 14:05:47 +00:00
dump_stack() 293dbda2a7
fix: script path 2024-02-21 13:49:38 +00:00
dump_stack() 0c6d5bd371
fix: set build dir for script type 2024-02-21 13:42:08 +00:00
dump_stack() bd2f274887
refactor: logs 2024-02-21 13:30:49 +00:00
dump_stack() 0ba3651c4a
refactor: remove excessing logging 2024-02-21 13:14:55 +00:00
dump_stack() 6bda2344c9
ci: prune docker 2024-02-21 12:53:16 +00:00
dump_stack() 30e0e5f554
fix: correct output path 2024-02-21 12:38:29 +00:00
dump_stack() fcd559124d
ci: maximize build space 2024-02-21 12:35:51 +00:00
dump_stack() 4e01c719a0
ci: PATH 2024-02-21 12:30:32 +00:00
dump_stack() 3c6a2eab32
ci: yaml 2024-02-21 12:17:25 +00:00
dump_stack() a8066428f8
feat: save container images 2024-02-21 12:11:57 +00:00
dump_stack() 94d0cf1ae4
ci: yaml 2024-02-21 11:51:30 +00:00
dump_stack() 987be594a4
ci: yaml 2024-02-21 11:46:46 +00:00
dump_stack() 8aa5391a25
ci: add ssh key 2024-02-21 11:42:30 +00:00
dump_stack() e63059043f
fix: typo 2024-02-21 09:34:28 +00:00
dump_stack() 438f7f7386
feat: flag to skip kernels.toml update 2024-02-21 09:28:49 +00:00
dump_stack() edfaf68b11
fix: do not quit on error 2024-02-21 08:27:34 +00:00
dump_stack() ee232dc54a
ci: maximize build space 2024-02-21 02:11:52 +00:00
dump_stack() ed99ffd2e1
fix: add timeouts to kernel artifact 2024-02-20 23:00:52 +00:00
dump_stack() 375844e2cd
ci(qemu): set timeouts 2024-02-20 22:50:58 +00:00
dump_stack() bdeb395dd9
ci: upload-artifact v3 -> v4 2024-02-20 15:09:58 +00:00
dump_stack() 592fdc8b83
refactor(logs): show timeout 2024-02-20 15:08:44 +00:00
dump_stack() ed34797dc0
nix flake update 2024-02-20 14:35:19 +00:00
dump_stack() 4e5a499db7
fix: remove spaces from examples names 2024-02-20 14:30:17 +00:00
dump_stack() 4202a7bc26
test: do not compare non-marshallable field, refactor 2024-02-20 14:28:14 +00:00
dump_stack() 1356e0dc34
fix: remove toolchain directive 2024-02-20 13:28:43 +00:00
dump_stack() 0314b5ca93
feat: initial daemon implementation 2024-02-20 13:25:31 +00:00
dump_stack() 820208d079
refactor: remove ioutil 2024-02-20 12:44:35 +00:00
dump_stack() 48e5e8cd04
refactor: rand.Seed not required anymore 2024-02-20 12:28:27 +00:00
dump_stack() 9b69738163
refactor: replace deprecated functions 2024-02-20 12:24:40 +00:00
dump_stack() 0a9b16a0f6
feat: log function name instead of path 2024-02-20 12:14:34 +00:00
dump_stack() b4bf0314f0
refactor: remove ioutil (deprecated) 2024-02-20 12:04:24 +00:00
dump_stack() d43cd36858
refactor: debug logging 2024-02-20 12:03:35 +00:00
dump_stack() 67ffa2347a
feat: export GetFreeAddrPort 2024-02-20 12:01:41 +00:00
dump_stack() 6036211172
refactor: errors 2024-02-20 11:37:19 +00:00
dump_stack() 4e92950929
refactor: move commands to cmd/ 2024-02-17 22:38:43 +00:00
dump_stack() 1b3e23d188
feat: watch for kernel panic/warn/bug 2023-12-25 15:03:46 +00:00
dump_stack() 6f53a3f386
feat: use artefact name as default build output filename 2023-10-18 01:28:11 +00:00
dump_stack() 4a8f119b5d
feat: check ssh connection after tests 2023-08-24 00:52:21 +00:00
dump_stack() 56faf1a351
fix: remove type from kernel release 2023-06-20 16:01:38 +00:00
dump_stack() 41c4241d75
fix: do not include some modules on opensuse 15.2 2023-06-20 12:25:02 +00:00
dump_stack() 1cb5c40c77
fix: apply modprobe workaround to all opensuse releases 2023-06-20 06:34:11 +00:00
dump_stack() b86c0508f9
fix: apply af_packet workaround for all except opensuse15 2023-06-19 22:41:35 +00:00
dump_stack() 7e1b2a24f3
fix: use --no-hostonly only on opensuse 12 2023-06-19 22:34:08 +00:00
dump_stack() f8e5d29722
ci: increase timeout to 20 minutes 2023-06-18 14:02:02 +00:00
dump_stack() 47a3d00f1b
fix: use separate container for opensuse 12.3 2023-06-18 12:58:40 +00:00
dump_stack() 3a1fc86251
fix: cleanup boot and modules dirs 2023-06-18 12:25:37 +00:00
dump_stack() f44c275c9d
fix: add dracut on opensuse 12.3 2023-06-18 12:19:36 +00:00
dump_stack() 6ffaa3dad4
fix: cleanup dracut repo after install 2023-06-18 12:02:32 +00:00
dump_stack() 8516ff9e91
ci: include internal errors in status 2023-06-18 11:47:02 +00:00
dump_stack() 983f135097
feat: use dracut from opensuse 12.3 on 12.{1,2} 2023-06-18 11:42:52 +00:00
dump_stack() 2a55d611d1
fix: avoid fail on non-existing dracut module 2023-06-17 15:25:12 +00:00
dump_stack() 77201baa63
fix: use mkinitrd for opensuse 12 and older 2023-06-17 14:44:51 +00:00
dump_stack() 338e184424
fix: do not use replacefiles on opensuse 12 2023-06-17 13:36:00 +00:00
dump_stack() cfc6c1928f
ci: add opensuse 12 to matrix 2023-06-17 13:26:11 +00:00
dump_stack() 7497b67b8b
feat: support for openSUSE 12 2023-06-17 13:19:16 +00:00
dump_stack() 6d725d3581
ci: free more disk space 2023-06-17 12:47:12 +00:00
dump_stack() 312e708116
fix: ignore file conflicts 2023-06-17 09:16:48 +00:00
dump_stack() 0c16dc02dc
feat: support for openSUSE 13 2023-06-17 08:46:20 +00:00
dump_stack() b1f11af512
feat: implement container import 2023-06-17 08:45:17 +00:00
dump_stack() a6944050cc
feat: implement openSUSE 42+ support 2023-06-15 16:30:12 +00:00
dump_stack() c12b0a8829
feat: remove dangling or unused images from local storage after build 2023-06-03 13:05:17 +00:00
dump_stack() b53b3f2632
feat: blocklist buggy oel6 kernels 2023-06-02 09:53:05 +00:00
dump_stack() 79037d61ec
feat: add support for blocklisting kernels 2023-06-02 09:52:17 +00:00
dump_stack() 482378abaf
feat: add smap blocklist for some oracle linux kernels 2023-06-01 14:19:21 +00:00
dump_stack() b0b19d87ca
feat: add flag to count internal errors as part of the success rate 2023-06-01 14:19:20 +00:00
dump_stack() 5396375b47
feat: add smap blocklist for some debian 7 kernels 2023-06-01 14:19:18 +00:00
dump_stack() 6d6ee135cd
feat: make qemu cpu model/flags configurable 2023-06-01 14:19:17 +00:00
dump_stack() c7bc206ad8
feat: add error counters output 2023-06-01 14:19:16 +00:00
dump_stack() c54616594c
ci: use vm with the latest cpu 2023-06-01 14:19:14 +00:00
dump_stack() 4441e84063
fix: add check for shutdown request 2023-06-01 14:18:27 +00:00
dump_stack() 7d28549db7
fix: define autogen var 2023-06-01 14:07:53 +00:00
dump_stack() c1ee3e1ac0
fix: make shutdown var change available 2023-06-01 14:02:32 +00:00
dump_stack() 49388981aa
fix: check that the kernel is from the same distribution 2023-06-01 13:52:55 +00:00
dump_stack() a72d9b77de
test: skip debian list of packages test in CI 2023-06-01 09:51:03 +00:00
dump_stack() 75d740b22b
feat: sleep one second for the first threads 2023-05-30 21:04:06 +00:00
dump_stack() 8f39b502a4
refactor: rename 2023-05-30 20:55:23 +00:00
dump_stack() 916acc9895
feat: add timeout for first threads 2023-05-30 20:52:41 +00:00
dump_stack() 2f9b5d615b
feat: add qemu start up time before ssh and test duration logs 2023-05-30 20:37:02 +00:00
dump_stack() eaba233ca3
feat: add apparmor to debian 9+ base container 2023-05-29 21:55:32 +00:00
dump_stack() bec424b493
feat: caching more deps in the base docker container 2023-05-29 21:38:46 +00:00
dump_stack() 6b4298c55d
feat: add libc6 >= 2.14 on debian wheezy 2023-05-29 20:13:40 +00:00
dump_stack() 56dfabdfa3
feat: filter out pre-release kernels 2023-05-29 19:53:41 +00:00
dump_stack() 1e3b7a867d
fix: add gcc-6 to debian 10 2023-05-29 18:45:28 +00:00
dump_stack() 3b76b4c0cd
fix: add gcc-5 to debian 9 2023-05-29 12:37:26 +00:00
dump_stack() 43d7643ba7
fix: do not invalidate new kernels while updating kbuild 2023-05-29 12:27:00 +00:00
dump_stack() 347fcbae60
ci: disable kbuild refetch 2023-05-29 12:01:46 +00:00
dump_stack() e141f46892
feat: parameter to refetch kbuild 2023-05-29 11:48:25 +00:00
dump_stack() 9271d69bc6
feat: less smart kbuild version guess 2023-05-29 11:21:20 +00:00
dump_stack() 7942bd22fa
feat: implement parse debian kernel version 2023-05-29 10:19:29 +00:00
dump_stack() 17356ac0e4
test: remove obsolete test 2023-05-29 10:18:19 +00:00
dump_stack() ca57ea2dac
refactor: cache control for the container module 2023-05-29 09:09:30 +00:00
dump_stack() 004d4223f9
feat: add gcc-10 to debian 12 container 2023-05-29 08:56:35 +00:00
dump_stack() 443d23bd67
feat: add update for debian release 2023-05-29 08:38:53 +00:00
dump_stack() a3170ada69
feat: filter out unstable debian kernels 2023-05-29 08:16:21 +00:00
dump_stack() 31f4d0e92d
feat: implement list kernels for all distro versions 2023-05-29 08:14:38 +00:00
dump_stack() a748778b72
ci: set kernel cache limit to 128, disable cache on packages fetch 2023-05-28 20:51:49 +00:00
dump_stack() 487b9c520d
feat: add limit amount of kernels to fetch 2023-05-28 20:37:05 +00:00
dump_stack() 4db10c66dc
feat: change max rate limiter timeout to 2s 2023-05-28 20:36:20 +00:00
dump_stack() 5ec6873c57
feat: change rate limiter timeout to 1s 2023-05-28 17:22:16 +00:00
dump_stack() 8e57f7f5ef
feat: set one minute delay before trying with the new rate limiter 2023-05-28 12:42:01 +00:00
dump_stack() c0914820c0
feat: disable burst for mr/metasnap api 2023-05-28 12:31:52 +00:00
dump_stack() 9df0880e3e
feat: add max timeout for mr/metasnap api 2023-05-28 12:16:05 +00:00
dump_stack() 88bfa867fd
feat: include proposed-updates 2023-05-28 12:15:44 +00:00
dump_stack() 013fb42350
feat: match only stable debian releases, backports, and updates 2023-05-28 12:02:59 +00:00
dump_stack() dce7546dd2
fix: trace error before nullifying 2023-05-28 11:38:47 +00:00
dump_stack() b6bc9b36c5
feat: add release to debian kernel 2023-05-28 09:38:36 +00:00
dump_stack() 4fca6b07e1
ci: run full e2e tests on all branches 2023-05-28 06:57:27 +00:00
dump_stack() e618d6b001
fix: use gcc 4.8 for Debian 3.11-{1,2} kernels 2023-05-27 20:51:05 +00:00
dump_stack() 3d70591717
feat: use libc from jessie on wheezy 2023-05-27 18:45:17 +00:00
dump_stack() 5813721dc9
feat: filter out experimental debian kernels 2023-05-27 18:30:54 +00:00
dump_stack() f827a72bee
feat: build for 5.15.0 kernel modules in ol9 container 2023-05-27 16:56:15 +00:00
dump_stack() 5c1bd9a27d
ci: set kernel autogen limit to 256 2023-05-26 22:43:36 +00:00
dump_stack() 99fea27497
ci: increase qemu timeout to 4 minutes 2023-05-26 19:20:44 +00:00
dump_stack() fe2b05d5fc
fix: let deb12 to fetch libssl1 from deb11 2023-05-26 18:15:14 +00:00
dump_stack() 1410fe4660
fix: use wildcard in apt pin priority 2023-05-26 17:32:35 +00:00
dump_stack() 8c49680675
feat: add container timeout parameter to kernel command 2023-05-26 17:01:53 +00:00
dump_stack() 852680e944
feat: remove default timeout 2023-05-26 16:57:57 +00:00
dump_stack() 673b273593
feat: set the default container timeout to 8min 2023-05-26 16:22:02 +00:00
dump_stack() 650cf65fa4
fix: correctly handle already installed kernels for stats 2023-05-26 16:21:01 +00:00
dump_stack() 56e9898d75
fix: make sure apt-get will not download the repo version 2023-05-26 15:44:06 +00:00
dump_stack() 022ced0eba
feat: show amount of kernels failed to install 2023-05-26 15:43:24 +00:00
dump_stack() 603f1c3654
fix: always update containerfile if it is to build 2023-05-26 14:22:14 +00:00
dump_stack() 92f95d8658
feat: add delay for new limit 2023-05-26 13:10:54 +00:00
dump_stack() 1b87946130
feat: fetch only the local repo 2023-05-26 12:56:18 +00:00
dump_stack() d9bfa63ed1
feat: install .deb packages with apt-get 2023-05-26 12:43:44 +00:00
dump_stack() a98fca403c
Revert "fix: set gcc to 4.8 for linux-compiler-3.12-x86"
This reverts commit 3292e5c874.
2023-05-26 10:12:57 +00:00
dump_stack() 1354c029b1
feat: add gcc 4.6,4.8 to debian wheezy container 2023-05-26 10:03:44 +00:00
dump_stack() a57478e38b
fix: install only the latest gcc on debian wheezy 2023-05-26 09:39:03 +00:00
dump_stack() 60de4af81e
fix: break on found 2023-05-26 09:07:10 +00:00
dump_stack() 3292e5c874
fix: set gcc to 4.8 for linux-compiler-3.12-x86 2023-05-26 08:53:21 +00:00
dump_stack() db9516b358
feat: set the default kernel gen retries to 2 2023-05-26 08:20:21 +00:00
dump_stack() b17433ab42
fix: check for shutdown after unlock 2023-05-25 23:01:42 +00:00
dump_stack() d7cf88e34f
feat: add package name, use for debian check if already installed 2023-05-25 23:00:42 +00:00
dump_stack() 1a9fdf0917
feat: support for excluding kernels using regex 2023-05-25 18:35:27 +00:00
dump_stack() 0dceacd2df
fix: change artifact info padding 2023-05-25 16:32:44 +00:00
dump_stack() aceaf96448
feat: show only the last step that was successful/failed 2023-05-25 13:29:11 +00:00
dump_stack() b24008ad3f
fix: typo 2023-05-25 12:48:25 +00:00
dump_stack() a930b8d9b8
fix: typo 2023-05-25 12:46:21 +00:00
dump_stack() e18ddf8a11
fix: install kernel-firmware package on centos6 2023-05-25 12:42:16 +00:00
dump_stack() 2c462da3de
fix: change timestamp and padding in log query 2023-05-25 12:35:44 +00:00
dump_stack() 5e67783bb8
ci: modprobe xfs in case no uio/9p module 2023-05-25 12:12:07 +00:00
dump_stack() 9a5a42202e
feat: add linux-firmware to centos container 2023-05-25 12:05:32 +00:00
dump_stack() 4fa8c2c02b
feat: always show script 2023-05-25 12:04:29 +00:00
dump_stack() 301e2fde1a
fix: set internal error on module scp connection refused 2023-05-25 11:04:30 +00:00
dump_stack() a646e9cf33
refactor: rename parameter 2023-05-25 10:31:24 +00:00
dump_stack() b631767d98
fix: correct support for empty release 2023-05-25 10:25:26 +00:00
dump_stack() 6db5ffc8c2
fix: correct success rate 2023-05-25 10:02:45 +00:00
dump_stack() 10c5fb7ac4
feat: generate all kernels if no distro is set 2023-05-25 06:36:02 +00:00
dump_stack() ad3a76320e
ci: try also to load 9p 2023-05-25 06:14:43 +00:00
dump_stack() 65b49996e6
fix: use nullstring in lastlog 2023-05-24 22:48:58 +00:00
dump_stack() 7806a774e4
feat: add internal error to log database 2023-05-24 22:46:04 +00:00
dump_stack() 605871d17a
fix: check only distro id 2023-05-24 21:58:25 +00:00
dump_stack() 49760c065e
fix: set error when qemu is dead 2023-05-24 19:46:11 +00:00
dump_stack() 5749f7d96e
Revert "fix: check client is not nil before closing"
This reverts commit 94285cd94d.
2023-05-24 19:43:40 +00:00
dump_stack() 94285cd94d
fix: check client is not nil before closing 2023-05-24 19:36:54 +00:00
dump_stack() bf9a43c1b8
feat: reduce the default retry count to 4 2023-05-24 18:51:02 +00:00
dump_stack() 7d0ee9a1dc
fix: do not continue retrying when qemu is dead 2023-05-24 18:47:46 +00:00
dump_stack() b8058bffb0
feat: exclude internal errors from success rate 2023-05-24 18:44:14 +00:00
dump_stack() 04d6f0dbd3
fix: exit on panic while waiting for ssh 2023-05-24 18:39:01 +00:00
dump_stack() 11bf6eda38
ci: modprobe uio 2023-05-24 18:03:04 +00:00
dump_stack() 3d8faafcce
Revert "ci: run qemu in 2 threads"
This reverts commit 2c31dd25f9.
2023-05-24 17:40:13 +00:00
dump_stack() 2c31dd25f9
ci: run qemu in 2 threads 2023-05-24 17:07:25 +00:00
dump_stack() f36c412250
ci: rename workflow 2023-05-24 16:53:14 +00:00
dump_stack() abcf2c1013
fix: support updating containers 2023-05-24 16:43:25 +00:00
dump_stack() ee90bfaa72
fix: support force kernel reinstallation 2023-05-24 16:29:52 +00:00
dump_stack() 3271710653
refactor: add genkernel function 2023-05-24 16:13:12 +00:00
dump_stack() 3dd88bac0e
ci: rename jobs 2023-05-24 16:03:10 +00:00
dump_stack() 2c2124bdb0
refactor: merge guess with when no kernels defined 2023-05-24 15:46:58 +00:00
dump_stack() ceaacade0b
ci: disable selinux to allow to run from unit 2023-05-24 15:35:50 +00:00
dump_stack() 7b9935dc13
ci: do full e2e tests only on master 2023-05-24 15:20:12 +00:00
dump_stack() 4171954350
ci: add full e2e tests on vm 2023-05-24 15:17:28 +00:00
dump_stack() 707cf6f268
feat: use libc from jessie to support the latest backports 2023-05-24 14:36:54 +00:00
dump_stack() a4b20299cd
fix: do not use custom sources.list on debian 12 bookworm 2023-05-24 12:39:13 +00:00
dump_stack() 93f66b08f4
feat: add debian 12 bookworm image 2023-05-24 10:28:23 +00:00
dump_stack() c1fceb6ce6
fix: use kernel version (same as uname -r) for /lib/modules path 2023-05-24 10:01:25 +00:00
dump_stack() e0295664af
fix: typo 2023-05-24 09:32:43 +00:00
dump_stack() a9cd7ba18b
fix: use initramfs-tools from backports only on wheezy 2023-05-24 09:30:52 +00:00
dump_stack() 3740a07619
feat: use initramfs-tools from backports 2023-05-24 09:24:41 +00:00
dump_stack() fe96366eba
feat: support Debian 12 Bookworm 2023-05-24 09:14:18 +00:00
dump_stack() 48ba7b7c7b
feat: match the gcc version to distinguish between Debian releases 2023-05-24 09:12:33 +00:00
dump_stack() ae00b57471
fix: match gcc-10+ 2023-05-24 09:10:56 +00:00
dump_stack() 408e330b27
feat: add backports repo for debian 2023-05-24 09:10:32 +00:00
dump_stack() ac4fcaaa91
fix: no need to have separate case for debian anymore 2023-05-24 09:09:16 +00:00
dump_stack() bff4422098
feat: update existing container in case of containerfile changes 2023-05-24 09:08:31 +00:00
dump_stack() 4a5376eb43
fix: copy debian /usr/src with folow symbolic links 2023-05-24 07:30:18 +00:00
dump_stack() 02bca8e0ae
feat: use debian codename for container image 2023-05-24 07:17:05 +00:00
dump_stack() 26a65924df
feat: use numerical debian release string by default 2023-05-24 07:15:47 +00:00
dump_stack() 77e118be64
fix: use distro equal check 2023-05-23 23:02:09 +00:00
dump_stack() 66d45e69d9
fix: change return values of macos placeholder 2023-05-23 22:44:38 +00:00
dump_stack() c35def964e
fix: macos build 2023-05-23 22:40:08 +00:00
dump_stack() e2d66db16f
feat: add kernel install to distro interface 2023-05-23 22:36:46 +00:00
dump_stack() daaef89050
feat: add kernels list to distro interface 2023-05-23 22:00:20 +00:00
dump_stack() c1ec4add81
refactor: move kernelinfo to distro module 2023-05-23 21:33:50 +00:00
dump_stack() 0edb0ac0af
refactor: get rid of too many parameters 2023-05-23 21:21:06 +00:00
dump_stack() c6e06d8e3e
feat: multiple commands to run in container 2023-05-23 20:46:09 +00:00
dump_stack() e302c447f5
Revert "feat: numerical release strings"
This reverts commit 330519f617.
2023-05-23 17:53:58 +00:00
dump_stack() 330519f617
feat: numerical release strings 2023-05-23 17:48:15 +00:00
dump_stack() 7ca989fd8d
feat: use distro info to create the container 2023-05-23 16:54:45 +00:00
dump_stack() f2ce20e53b
feat: change interface from ID()/Release() to Distro() with both 2023-05-23 16:26:36 +00:00
dump_stack() 6f40fa554e
fix: avoid use of range variable 2023-05-23 13:46:16 +00:00
dump_stack() a1999115db
refactor: move container generation to distro modules 2023-05-23 13:20:48 +00:00
dump_stack() ff7bed76f2
fix: search in source volumes not destination ones 2023-05-23 09:34:29 +00:00
dump_stack() 14320faca8
fix: make sure we only remove the package extension 2023-05-23 09:22:50 +00:00
dump_stack() fa5d0adb39
feat: implement global docker timeout 2023-05-22 14:41:00 +00:00
dump_stack() 2fe3103603
refactor: add volume list 2023-05-22 14:28:28 +00:00
dump_stack() 2eb91ffac9
fix: missed unlock 2023-05-22 07:20:40 +00:00
dump_stack() 519b8d190a
fix: check that max is reached after unlock 2023-05-22 06:58:13 +00:00
dump_stack() d507b86373
fix: do not match generic-pae 2023-05-22 06:56:12 +00:00
dump_stack() e1dd7c18be
fix: workaround for grub-install issues 2023-05-22 06:55:06 +00:00
dump_stack() c076db3505
fix: avoid stderr 2023-05-22 06:01:40 +00:00
dump_stack() 632e4f5ffc
feat: parallel kernel installation 2023-05-21 21:43:18 +00:00
dump_stack() b02da8adeb
fix: typo 2023-05-21 20:39:52 +00:00
dump_stack() 31b0945a15
fix: use the default config dirs provider 2023-05-21 20:31:47 +00:00
dump_stack() ba03d4a049
feat: use all available kernels in case of no targets defined 2023-05-21 14:40:24 +00:00
dump_stack() b88ab7cca3
feat: emulate a cpu on macOS with all features supported by KVM 2023-05-21 14:33:46 +00:00
dump_stack() b8817a4930
feat: less verbose wget output 2023-05-18 22:31:34 +00:00
dump_stack() e767299222
feat: show both release number and name for debian 2023-05-18 22:26:42 +00:00
dump_stack() f0c82f9289
feat: implement list of available distros 2023-05-18 22:02:41 +00:00
dump_stack() 9c237b52db
test: remove obsolete match test 2023-05-18 21:40:22 +00:00
dump_stack() 120fcdc56b
feat: initial implementation of distro interface 2023-05-18 21:37:07 +00:00
dump_stack() c3774714fd
refactor: move distro id to separate file 2023-05-18 20:02:09 +00:00
dump_stack() 73f5df2425
feat!: new kernel config structure
BREAKING CHANGE: kernel definition in the configuration files has switched

from

  [[targets]]
  distro = { id = "Ubuntu", release = "18.04" }
  release_mask = ".*"

to

  [[targets]]
  distro = { id = "Ubuntu", release = "18.04" }
  kernel = { regex = ".*" }
2023-05-18 18:48:09 +00:00
dump_stack() d551cc8fc4
refactor: use the same name as in config 2023-05-18 18:27:51 +00:00
dump_stack() 6385ce92e3
feat!: rename supported kernels to targets
BREAKING CHANGE: .out-of-tree.toml: s/[[supported_kernels]]/[[targets]]/
2023-05-18 18:13:09 +00:00
dump_stack() 6939d64226
fix: continue in case of no/wrong preload .out-of-tree.toml 2023-05-18 16:43:15 +00:00
dump_stack() 071608805e
ci: typo 2023-05-18 16:25:36 +00:00
dump_stack() 80e57cb60c
ci: fix .out-of-tree.toml 2023-05-18 16:21:42 +00:00
dump_stack() bcf8de336f
feat!: introduce new distribution structure
BREAKING CHANGE: distro definition in the configuration files has switched

from

  [[supported_kernels]]
  distro_type = "Ubuntu"
  distro_release = "16.04"
  ...

to

  [[supported_kernels]]
  distro = { id = "Ubuntu", release = "16.04" }
  ...
2023-05-18 16:07:24 +00:00
dump_stack() 8d2d56bea3
build: add version for flake 2023-05-18 12:53:46 +00:00
dump_stack() 17256317c9
test: fix function name 2023-05-18 12:52:52 +00:00
dump_stack() 26faa53f8b
refactor: move cmdline generation out of distro switch 2023-05-18 12:07:59 +00:00
dump_stack() 5ccca6617f
ci: wait for the previous debian cache workflow to finish 2023-05-18 12:00:12 +00:00
dump_stack() 0589ae25e4
ci: automatically cancel previous jobs on new commits 2023-05-18 11:59:15 +00:00
dump_stack() d6670ee8d9
fix: typo 2023-05-18 11:50:17 +00:00
dump_stack() 6a338fc6ad
refactor: move ubutu install/cleanup to module 2023-05-18 11:46:12 +00:00
dump_stack() 407c1a7975
refactor: move oracle linux install/cleanup to module 2023-05-18 11:42:25 +00:00
dump_stack() 99c9346995
refactor: rename debian functions to common interface 2023-05-18 11:34:46 +00:00
dump_stack() 90f7e62888
Revert "refactor: remove debian functions to common interface"
This reverts commit 412199966e.
2023-05-18 11:32:48 +00:00
dump_stack() 412199966e
refactor: remove debian functions to common interface 2023-05-18 11:31:54 +00:00
dump_stack() ef35743579
refactor: move oraclelinux/ubuntu kernels match to modules 2023-05-18 11:28:06 +00:00
dump_stack() 71c2b2001c
refactor: move oracle linux runs/envs to module 2023-05-18 11:08:23 +00:00
dump_stack() 4eed03ec2a
fix: install/remove kernel in one layer 2023-05-18 10:50:24 +00:00
dump_stack() 3cd901b1be
fix: add appstream repo for centos8, refactor 2023-05-18 10:37:59 +00:00
dump_stack() 73b1edd1cb
fix: clean ubuntu modules package 2023-05-18 09:50:02 +00:00
dump_stack() a607ce62d1
feat: set -cpu max, also for non-kvm 2023-05-17 17:45:52 +00:00
dump_stack() e1ac462642
feat: test lkrg alert 2023-05-17 17:04:13 +00:00
dump_stack() 304bb74ecf
fix: keep target with random name 2023-05-17 16:55:22 +00:00
dump_stack() 8486a0337d
fix: dereference symbolic links only for debian 2023-05-17 16:15:58 +00:00
dump_stack() 2a6e775b69
ci: disable testing with script type as redundant 2023-05-17 15:39:18 +00:00
dump_stack() f2e43f891a
ci: remove check for kvm 2023-05-17 13:40:54 +00:00
dump_stack() 5707559c28
docs: update readme 2023-05-17 13:32:31 +00:00
dump_stack() 51a67db71a
ci: disable fail-fast for examples 2023-05-17 13:22:30 +00:00
dump_stack() 6df94d7e15
feat: add distro type/release to target name 2023-05-17 12:58:14 +00:00
dump_stack() d45d5731a9
fix: dereference symbolic links when copying /usr/src 2023-05-17 12:41:09 +00:00
dump_stack() 950cee6df0
feat: add --dump to dump cache 2023-05-17 12:33:59 +00:00
dump_stack() 7e3f02f3a9
ci: checkout first 2023-05-17 12:04:13 +00:00
dump_stack() 360afdb05e
ci: 755 2023-05-17 12:02:16 +00:00
dump_stack() 0cb9128810
ci: reclaim some space in runner vm 2023-05-17 11:59:15 +00:00
dump_stack() c3f6e90137
ci: fix log names 2023-05-17 11:42:31 +00:00
dump_stack() be5f114694
fix: decrease log level 2023-05-17 11:25:07 +00:00
dump_stack() f1429d3e1d
ci: return back oracle linux to the matrix 2023-05-17 11:11:34 +00:00
dump_stack() fb6ef30aaa
Update readme 2023-05-17 11:09:58 +00:00
dump_stack() eb54ec4a24
ci: do not run on docs/readme change 2023-05-17 11:09:26 +00:00
dump_stack() 5d95422624
ci: typo 2023-05-17 10:36:11 +00:00
dump_stack() e95e8d299f
ci: rename workflows 2023-05-17 10:32:49 +00:00
dump_stack() 3de5f5e12d
ci: add kernel module tests to e2e matrix 2023-05-17 10:24:28 +00:00
dump_stack() a68ceacb43
feat: parallel download of deb packages 2023-05-17 10:04:37 +00:00
dump_stack() 72f52d3200
fix: support --no-headers for debian 2023-05-17 06:50:52 +00:00
dump_stack() 706d442948
fix: install gcc for all debian releases 2023-05-17 06:24:18 +00:00
dump_stack() f7b9f538b4
fix: wait 10 seconds before query with new limit 2023-05-17 06:05:37 +00:00
dump_stack() d70be6a306
feat: install all gcc versions for debian base image 2023-05-17 05:48:28 +00:00
dump_stack() 15a6f38631
fix: lower limit also on connection refused 2023-05-17 05:39:24 +00:00
dump_stack() ac2166b050
refactor: avoid potential typos 2023-05-17 05:28:34 +00:00
dump_stack() f630fa6f49
fix: add timeout to wget 2023-05-17 05:12:32 +00:00
dump_stack() 6e92010dc0
feat: set default limiter timeout to 50ms 2023-05-17 05:05:49 +00:00
dump_stack() 008ce1cdbf
refactor: remove unused code 2023-05-17 05:03:16 +00:00
dump_stack() 5270f2438c
feat: set default limiter timeout to 100ms 2023-05-17 05:02:57 +00:00
dump_stack() 204413af9e
fix: add linux-compiler-* to dependencies 2023-05-17 04:52:04 +00:00
dump_stack() c43f16733e
fix: add libssl-dev for debian base image 2023-05-17 04:51:32 +00:00
dump_stack() 74898924da
ci: set 60 minutes timeout for e2e test jobs 2023-05-17 04:18:22 +00:00
dump_stack() c6acbef7f5
feat: set timeout on first tries to install 2023-05-16 20:30:04 +00:00
dump_stack() d27847c533
ci: typo 2023-05-16 20:25:54 +00:00
dump_stack() eec740b208
ci: return back to Github Actions for E2E testing 2023-05-16 20:13:09 +00:00
dump_stack() bd2dfe3e4e
ci: run debian cache at midnight, change batch size 2023-05-16 19:44:15 +00:00
dump_stack() 6ab8f2fea1
ci: rename ubuntu workflow 2023-05-16 19:40:39 +00:00
dump_stack() e7614ef3a7
feat: use snapshots only in case of failed fetch from repos 2023-05-16 19:37:57 +00:00
dump_stack() 18426775b9
fix: deb packages install command 2023-05-16 19:20:58 +00:00
dump_stack() e87add8e44
ci: remove name 2023-05-16 19:12:03 +00:00
dump_stack() b8d0319097
ci: typo 2023-05-16 19:11:50 +00:00
dump_stack() 968c4d7363
ci: use bash 2023-05-16 19:05:32 +00:00
dump_stack() 246e0efac1
ci: typo 2023-05-16 19:03:30 +00:00
dump_stack() 4cc0166a92
ci: install less packages, add symlink for qemu 2023-05-16 18:59:00 +00:00
dump_stack() 87e9790f79
ci: switch to root 2023-05-16 18:54:59 +00:00
dump_stack() 2af2692a66
ci: correct directory 2023-05-16 18:50:39 +00:00
dump_stack() ef1ebf6f23
ci: parallel testing of examples 2023-05-16 18:45:59 +00:00
dump_stack() 8a7439d7a9
ci: use dnf 2023-05-16 18:41:19 +00:00
dump_stack() 8d93517be7
ci: switch to almalinux 2023-05-16 18:39:34 +00:00
dump_stack() da637c2923
ci: typo 2023-05-16 18:28:37 +00:00
dump_stack() 6f18f6c779
ci: add timeout before enable linger 2023-05-16 18:09:23 +00:00
dump_stack() 66026ebf5a
fix: typo 2023-05-16 17:55:30 +00:00
dump_stack() f5b1283690
fix: try to install debian packages 3 times 2023-05-16 17:52:15 +00:00
dump_stack() f906e3187f
ci: set XDG_RUNTIME_DIR 2023-05-16 17:36:17 +00:00
dump_stack() ba3e6072d4
ci: enable linger 2023-05-16 17:03:55 +00:00
dump_stack() 0338483e72
ci: typo 2023-05-16 16:54:39 +00:00
dump_stack() f2d0035c0e
ci: fix parameters 2023-05-16 16:54:12 +00:00
dump_stack() 7e87567070
ci: add stdout logs 2023-05-16 16:54:05 +00:00
dump_stack() 2be8b14fc7
ci: add to kvm group 2023-05-16 16:03:38 +00:00
dump_stack() a043b998ff
ci: use full path to logs 2023-05-16 15:52:54 +00:00
dump_stack() c527544107
ci: typo 2023-05-16 15:46:06 +00:00
dump_stack() ef4eeea6a2
ci: typo 2023-05-16 15:43:43 +00:00
dump_stack() d4fe5d8f15
ci: do not keep ssh connection 2023-05-16 15:37:14 +00:00
dump_stack() e3652db73b
ci: set droplet region 2023-05-16 15:06:46 +00:00
dump_stack() f571635848
ci: install recommends
rootless podman does not work without it
2023-05-16 14:53:27 +00:00
dump_stack() 2cc06ed092
feat: log output on container error 2023-05-16 14:47:28 +00:00
dump_stack() adc450c201
ci: --no-install-recommends 2023-05-16 14:38:15 +00:00
dump_stack() 5de9c50579
ci: wait for cloud-init to finish 2023-05-16 14:26:22 +00:00
dump_stack() a49d705846
ci: switch to apt 2023-05-16 14:18:14 +00:00
dump_stack() 67630e080b
ci: download by 64 packages 2023-05-16 14:15:18 +00:00
dump_stack() 697eb18552
ci: do not start new fetch after 2 hours 2023-05-16 14:14:24 +00:00
dump_stack() a855a6e70c
ci: switch to apt-get with 360 seconds timeout 2023-05-16 14:11:51 +00:00
dump_stack() d8aafe081f
ci: switch to apt 2023-05-16 14:06:37 +00:00
dump_stack() 4e956d10ad
ci: yet another try to get rid of apt daemon 2023-05-16 13:58:38 +00:00
dump_stack() a6d4fe362c
ci: update repos 2023-05-16 13:48:56 +00:00
dump_stack() 6ff9fcc2c0
ci: switch to github actions build 2023-05-16 13:44:40 +00:00
dump_stack() fca20d2d63
ci: set container registries 2023-05-16 13:15:28 +00:00
dump_stack() 4fe7a0906e
ci: set shell (bash) for the user 2023-05-16 13:05:09 +00:00
dump_stack() 22ddada2f7
ci: fix user authorized keys 2023-05-16 13:00:34 +00:00
dump_stack() 08dcfd52a3
Revert "ci: run from root"
This reverts commit 3dd9071057.
2023-05-16 12:48:08 +00:00
dump_stack() 3dd9071057
ci: run from root 2023-05-16 12:17:18 +00:00
dump_stack() decdf0625b
ci: fix yml 2023-05-16 12:12:48 +00:00
dump_stack() 2ee26c989d
ci: typo 2023-05-16 12:09:33 +00:00
dump_stack() 73eb3bf70d
ci: dependencies 2023-05-16 12:08:31 +00:00
dump_stack() 65688dcd9d
ci: ignore e2e.yml 2023-05-16 12:06:59 +00:00
dump_stack() d447b91908
ci: typo 2023-05-16 12:06:32 +00:00
dump_stack() 926631e19d
ci: e2e testing with kvm 2023-05-16 12:04:28 +00:00
dump_stack() 5ecacf00bd
build: nix flakes 2023-05-16 11:47:17 +00:00
dump_stack() 4d950d7302
refactor: move debian-related functions to debian module 2023-05-16 09:24:34 +00:00
dump_stack() e1ae427757
fix: add gcc-4.6 for wheezy 2023-05-16 09:23:44 +00:00
dump_stack() 4fd2fd31d2
fix: add podman to shell.nix 2023-05-16 09:22:37 +00:00
dump_stack() 046c553ed5
fix: cleanup tar.gz after unpack 2023-05-16 08:17:15 +00:00
dump_stack() a7e5827ff9
ci: mirror debian packages after updating the metadata cache 2023-05-16 08:04:26 +00:00
dump_stack() 7f6fe18d0a
ci: do not start new fetch after 4 hours 2023-05-16 07:52:00 +00:00
dump_stack() 53183245ce
fix: old dhclient requires interface name 2023-05-15 18:55:27 +00:00
dump_stack() 8f1a2afc53
feat: modify cache api to store more than one kernel for version 2023-05-15 18:41:12 +00:00
dump_stack() 8949b53ccc
fix: return after found 2023-05-15 18:17:31 +00:00
dump_stack() 4ea7fbfbf9
refactor: move getting kernel by version to function 2023-05-15 17:57:44 +00:00
dump_stack() 9b33140cc8
feat: always set last fetch date 2023-05-15 17:42:31 +00:00
dump_stack() c13b595ab1
refactor: unexport getDebianKernel 2023-05-15 17:40:53 +00:00
dump_stack() 7c2957dafb
feat: lower debug level 2023-05-15 15:35:48 +00:00
dump_stack() 3e64c99b1c
fix: show actual refetch date 2023-05-15 15:29:23 +00:00
dump_stack() b0c795153a
feat: bump go version 2023-05-15 15:09:09 +00:00
dump_stack() 43bb539db8
ci: rename job 2023-05-15 14:28:52 +00:00
dump_stack() 3959a23efa
fix: typo 2023-05-15 14:26:38 +00:00
dump_stack() 262362659a
ci: archive logs after run 2023-05-15 14:25:41 +00:00
dump_stack() bbdc9712c5
ci: fix typo 2023-05-15 14:25:24 +00:00
dump_stack() 53878bcb23
ci: deb package mirroring 2023-05-15 14:21:49 +00:00
dump_stack() 24c0a05ab0
feat: do not fail if at least one download succeeds 2023-05-15 14:11:08 +00:00
dump_stack() 40b1b223d4
feat: support to specify maximum amount of downloads 2023-05-15 13:41:37 +00:00
dump_stack() 97ee8f09a4
feat: exit with error if no packages found to download 2023-05-15 13:41:04 +00:00
dump_stack() 346e24db6b
feat: support for ignoring packages exists on the mirror 2023-05-15 13:19:17 +00:00
dump_stack() d118ab03c3
feat: check for mirrored packages 2023-05-15 13:07:56 +00:00
dump_stack() e1ac75d0fa
ci: always refetch 2023-05-15 12:17:04 +00:00
dump_stack() 34b5693ae8
test: do not rebuild the cache every time
We can safely do that because cache library does not breaks on structures changes.
2023-05-15 12:07:28 +00:00
dump_stack() 562abec7f4
feat: fail fast on regexp error 2023-05-15 11:50:54 +00:00
dump_stack() 883c8ee6cb
feat: do not download if already exists 2023-05-15 11:48:06 +00:00
dump_stack() 689bf1098a
feat: command to download debian packages 2023-05-15 11:35:15 +00:00
dump_stack() eda23b45b9
feat: combined packages list 2023-05-15 11:14:59 +00:00
dump_stack() 6c1f9f8606
feat: add command for distro-related helpers 2023-05-15 10:50:56 +00:00
dump_stack() 5e11c1939d
fix: set volumes after search 2023-05-15 10:08:34 +00:00
dump_stack() 52c452debe
feat: pass kernel mask to get container volumes 2023-05-15 09:43:48 +00:00
dump_stack() a05b579086
feat: remove the overhead for something not used 2023-05-15 08:21:53 +00:00
dump_stack() d089ad4931
feat: return complete repo info 2023-05-15 08:19:35 +00:00
dump_stack() 0f799b0d5a
feat: implement part of metasnap api 2023-05-15 07:30:00 +00:00
dump_stack() 21882ff461
test: disable fail-fast 2023-05-14 22:37:52 +00:00
dump_stack() 82ba7bd7af
fix: always use the next command because dpkg returns error if deps not met 2023-05-14 22:34:46 +00:00
dump_stack() c0603404a8
feat: use the latest snapshot for Debian Jessie and Stretch 2023-05-14 22:20:34 +00:00
dump_stack() e0b63aee1a
fix: handle existing debian kernels correctly 2023-05-14 22:16:32 +00:00
dump_stack() b2383ba442
feat: add container volumes list function 2023-05-14 22:15:43 +00:00
dump_stack() 1b2d636410
feat: introduce kernel version 2023-05-14 22:00:29 +00:00
dump_stack() de5ebd6455
feat: mount container volumes based on kernel info 2023-05-14 21:39:41 +00:00
dump_stack() 42be5161d8
feat: define container volumes as a structure 2023-05-14 21:24:01 +00:00
dump_stack() 1a2929a1aa
fix: wrong image release 2023-05-14 21:19:06 +00:00
dump_stack() 5778f39ac4
fix: no module-init-tools on debian 9 2023-05-14 21:17:21 +00:00
dump_stack() 032bba6ee5
fix: sed edit in place 2023-05-14 21:12:24 +00:00
dump_stack() 23a28f33d4
test: add debian to e2e test matrix 2023-05-14 21:06:46 +00:00
dump_stack() 8bb211cf01
feat: list debian kernels implementation 2023-05-14 21:04:22 +00:00
dump_stack() c75f10e692
feat: search file in directory by substring 2023-05-14 21:03:57 +00:00
dump_stack() b4a75dc66e
feat: install dependencies, use http for deb urls 2023-05-14 20:32:44 +00:00
dump_stack() f85ad89130
fix: match exact version (binpackages can return more than one) 2023-05-14 19:48:15 +00:00
dump_stack() 501dcb23ae
feat: set default refetch timeout to 14 days 2023-05-14 18:08:26 +00:00
dump_stack() 19081aea5d
test: skip MatchImagePkg test in CI 2023-05-14 17:34:49 +00:00
dump_stack() a090328b1c
fix: exp should be filtered in other place 2023-05-14 17:29:53 +00:00
dump_stack() 2452b090b0
feat: use semver wrapper to match debian release 2023-05-14 17:14:05 +00:00
dump_stack() b09b51840c
feat: fetch kbuild from linux-tools for older kernels 2023-05-14 16:54:12 +00:00
dump_stack() a13a78e292
feat: ignore experimental kernels 2023-05-14 14:34:11 +00:00
dump_stack() e10b50a41a
test: pretty print result 2023-05-14 14:06:04 +00:00
dump_stack() 87ed8da5b8
fix: ignore dbg packages 2023-05-14 14:05:10 +00:00
dump_stack() e9ced28b29
feat: match kbuild 2023-05-14 13:59:25 +00:00
dump_stack() f8f3424e1e
refactor: move cache-related functions 2023-05-14 12:37:45 +00:00
dump_stack() 0fd9d80940
fix: match the "all" arch of the debian kernel packages 2023-05-14 11:41:26 +00:00
dump_stack() fa23cdfc54
feat: support matching several architectures 2023-05-14 11:38:41 +00:00
dump_stack() 6bb0da5082
feat: support multiple headers packages 2023-05-14 11:06:54 +00:00
dump_stack() 6b8d97be39
test: fix kernel release test 2023-05-14 10:30:35 +00:00
dump_stack() 7502221cfd
feat: support adding dependencies to debian kernel packages 2023-05-14 10:27:35 +00:00
dump_stack() 181115d914
fix: use only 3.2 -> 3.7 kernels on wheezy' 2023-05-14 10:13:01 +00:00
dump_stack() f91534aa6a
feat: add common debian packages 2023-05-14 10:12:33 +00:00
dump_stack() 0ee813124d
feat: disable deb package installation without meeting dependencies 2023-05-14 10:12:20 +00:00
dump_stack() f7f8a27dfa
feat: use old debian containers from snapshots 2023-05-14 09:53:59 +00:00
dump_stack() bb676fa491
fix: incorrect comment about valid releases 2023-05-14 07:27:07 +00:00
dump_stack() 78626c10af
feat: get container path relative to config dir 2023-05-14 07:24:49 +00:00
dump_stack() 93a1b74e34
feat: add dummy func to list debian kernels 2023-05-14 07:19:52 +00:00
dump_stack() 73139e1b91
refactor: docker -> container 2023-05-14 07:00:00 +00:00
dump_stack() e231121082
feat: install debian kernels 2023-05-14 06:53:32 +00:00
dump_stack() 9e8a381de0
feat: implement basic debian container image 2023-05-13 19:51:57 +00:00
dump_stack() 17295cad89
feat: list debian kernels 2023-05-13 19:51:06 +00:00
dump_stack() 29010b2a1b
feat: support getting cached kernels 2023-05-13 19:48:01 +00:00
dump_stack() 0bf2acb043
feat: lowercase release strings 2023-05-13 19:08:46 +00:00
dump_stack() d0693e64c4
feat: make sure of cache thread-safety 2023-05-13 18:49:11 +00:00
dump_stack() 70fec57d2f
feat: debian versions cache 2023-05-13 18:43:15 +00:00
dump_stack() 2cc84ac962
fix: match kernel release correctly 2023-05-13 17:42:25 +00:00
dump_stack() 143e54984d
test: cover more kernel releases 2023-05-13 17:32:02 +00:00
dump_stack() c6d0ee0102
feat: lower debug level for some container-related stuff 2023-05-13 16:11:45 +00:00
dump_stack() 39f4cd4cfd
fix: check if cache already exists 2023-05-13 16:08:59 +00:00
dump_stack() 065aca24b0
fix: fill config value in case of default directory 2023-05-13 15:56:54 +00:00
dump_stack() baf282ec2c
fix: typo 2023-05-13 15:56:11 +00:00
dump_stack() 9d1bbcc288
feat: support changing dotdir 2023-05-13 15:45:21 +00:00
dump_stack() 804b6b56ba
feat: debug logging for download 2023-05-13 12:56:54 +00:00
dump_stack() 5975898225
test: use tmpdir to not interfere with other tests 2023-05-13 12:56:25 +00:00
dump_stack() dc8d667930
fix: filename is already full path 2023-05-13 12:50:26 +00:00
dump_stack() 05f210494a
test: missing panic 2023-05-13 12:40:36 +00:00
dump_stack() fb5411503c
fix: log error if no cache found 2023-05-13 12:38:03 +00:00
dump_stack() 1818d38b03
test: cover download debian cache 2023-05-13 12:36:19 +00:00
dump_stack() c8d171da98
refactor: use fs/ helper to get temp dir 2023-05-13 12:33:12 +00:00
dump_stack() 4e77cf82d3
refactor: use one provider for default cache url 2023-05-13 12:14:20 +00:00
dump_stack() 60a1d19042
test: avoid api requests 2023-05-13 12:02:41 +00:00
dump_stack() 7cf1bbd194
refactor: remove global temporary directory base 2023-05-13 11:32:29 +00:00
dump_stack() 5ada1ef41a
feat: introduce temp dir helper 2023-05-13 11:29:25 +00:00
dump_stack() 997d6a67ba
test: fix match image test 2023-05-13 11:17:11 +00:00
dump_stack() 1d22902eb0
feat: download debian cache automatically 2023-05-13 11:04:35 +00:00
dump_stack() 59febd75b0
test: enable match image test 2023-05-13 11:00:34 +00:00
dump_stack() b1b7a9e675
refactor: move kernel functions to submodule 2023-05-13 10:47:47 +00:00
dump_stack() 95695a4070
feat: add cache argument 2023-05-13 10:46:43 +00:00
dump_stack() 28acc51417
test: disable match image test until cache is implemented 2023-05-13 10:24:29 +00:00
dump_stack() ebc597ff0b
refactor: move container functions to submodule 2023-05-13 10:14:45 +00:00
dump_stack() 2c2435a7a5
refactor: use cavaliergopher/grab to download files 2023-05-13 09:43:06 +00:00
dump_stack() 2977b6f7fd
test: add download image test 2023-05-13 09:31:25 +00:00
dump_stack() da5797766b
refactor: move fs-related functions to submodule 2023-05-13 09:17:57 +00:00
dump_stack() 9b987bcc82
Refactor 2023-05-13 09:09:29 +00:00
dump_stack() 599ce03ca4
Implement config paths helpers 2023-05-13 08:44:45 +00:00
dump_stack() d13eab6947
Match by debian release 2023-05-12 20:05:44 +00:00
dump_stack() 9fd4b541da
Add missing directory 2023-05-12 19:34:55 +00:00
dump_stack() 43aa116682
Implement match for Debian packages 2023-05-12 17:27:48 +00:00
dump_stack() b5d4bdd5f4
Fetch debian kernels function 2023-05-12 15:02:43 +00:00
dump_stack() fa579e5170
Wait to make sure IP is ready 2023-05-12 12:36:06 +00:00
dump_stack() b310b29670
Sleep is more error-prone ¯\_(ツ)_/¯ 2023-05-12 12:29:01 +00:00
dump_stack() 490d063e5b
Wildcard 2023-05-12 12:24:04 +00:00
dump_stack() 33ee48692c
Disable auto-upgrades 2023-05-12 12:21:32 +00:00
dump_stack() ed5d0ab1d1
Increase exit check timeout 2023-05-12 12:16:32 +00:00
dump_stack() ea6775fa45
Handle ssh errors 2023-05-12 12:13:04 +00:00
dump_stack() 4e16dec7c1
Do not exit in case of unsuccessful kill 2023-05-12 12:12:41 +00:00
dump_stack() 0a1a5890ed
Add setup.sh for debian image generator 2023-05-12 12:08:03 +00:00
dump_stack() 593c152798
Handle exit codes correctly 2023-05-12 12:07:42 +00:00
dump_stack() e8554e7c4a
Test 2023-05-12 11:27:30 +00:00
dump_stack() 39d7adc72f
Fix early exit 2023-05-12 11:15:38 +00:00
dump_stack() 4688c6aefd
Fix status code 2023-05-12 11:07:39 +00:00
dump_stack() 4f2b7e1962
We still need to wait to kill apt properly 2023-05-12 11:00:07 +00:00
dump_stack() dc025ff32f
Fix broken ssh connections 2023-05-12 10:56:22 +00:00
dump_stack() 11c69f24ce
Set public acl, fix host-bucket 2023-05-12 09:31:46 +00:00
dump_stack() da57e4e6b5
Run workflows on their change 2023-05-12 09:13:11 +00:00
dump_stack() f46a2ec4b0
Do not run tests on images actions change 2023-05-12 09:00:47 +00:00
dump_stack() 166c125391
Rename 2023-05-12 08:58:35 +00:00
dump_stack() bb511898bb
Rename 2023-05-12 08:16:24 +00:00
dump_stack() e919def8d0
Rename 2023-05-12 08:13:59 +00:00
dump_stack() 8f06448240
Switch to podman 2023-05-12 08:11:57 +00:00
dump_stack() 7d88f09bb0
Use separate workflows for generating images 2023-05-12 08:11:38 +00:00
dump_stack() 9c4313c3a4
Archive logs on failure 2023-05-12 07:59:58 +00:00
dump_stack() e87a6be1bc
Typo 2023-05-12 07:54:09 +00:00
dump_stack() eb51469283
Switch to s3cmd 2023-05-12 07:53:23 +00:00
dump_stack() 3ae8707dcb
Missing quotation mark 2023-05-12 07:52:40 +00:00
dump_stack() 9c830bf22a
Remove newline 2023-05-12 07:48:56 +00:00
dump_stack() a78429729b
Upload images 2023-05-12 07:47:22 +00:00
dump_stack() c965cf3ade
Remove sh -c 2023-05-12 07:41:44 +00:00
dump_stack() 5b50fd2b13
Switch back to pkill 2023-05-12 07:36:32 +00:00
dump_stack() c860b77332
Do not delete droplet (for debug) 2023-05-12 07:32:49 +00:00
dump_stack() 4e4c62c376
Proper wait for the lock 2023-05-12 07:32:03 +00:00
dump_stack() a48cbe4bb2
Kill apt 2023-05-12 07:29:04 +00:00
dump_stack() 2179f84874
Wait for the apt lock 2023-05-12 07:24:12 +00:00
dump_stack() 33d3d28e5d
Add ssh key 2023-05-12 07:19:55 +00:00
dump_stack() 8873566dcb
Switch to podman 2023-05-12 07:09:56 +00:00
dump_stack() fb12fc2f65
Fix quotes 2023-05-12 07:05:34 +00:00
dump_stack() da28fef007
Boilerplate for external VM action 2023-05-12 06:58:17 +00:00
dump_stack() 44d474d6e5
Fix path 2023-05-12 05:41:07 +00:00
dump_stack() 3e87a1b651
Run with sudo 2023-05-12 02:06:25 +00:00
dump_stack() 310999744e
Untabify 2023-05-12 02:05:36 +00:00
dump_stack() 61ac856afb
Try from the same directory 2023-05-12 02:05:28 +00:00
dump_stack() 995f24fdc4
Remove policykit-1 on Debian 11 2023-05-12 01:40:28 +00:00
dump_stack() 8b807a7e77
Allow to run manually 2023-05-12 01:32:46 +00:00
dump_stack() 467f31d141
Upload images 2023-05-12 01:31:14 +00:00
dump_stack() 9752c7ae7d
Typo 2023-05-12 01:25:42 +00:00
dump_stack() e3c2bb134f
Upload cache 2023-05-12 01:25:05 +00:00
dump_stack() 94f77cd742
Add sources.list to the image 2023-05-12 01:09:31 +00:00
dump_stack() 028fa48f71
Generate images on push 2023-05-12 01:04:17 +00:00
dump_stack() 2910ce17c7
Add generator for debian images 2023-05-12 00:59:36 +00:00
dump_stack() 183b4698dd
Build debian cache once a week 2023-05-12 00:39:50 +00:00
dump_stack() efbdc9db36
Implement refetch 2023-05-12 00:07:51 +00:00
dump_stack() 3d2b8b7d2a
Disable E2E testing for Oracle Linux 9
Works fine locally, but for some reason does not work on GitHub Actions
2023-05-11 23:22:44 +00:00
dump_stack() 9190b850b7
Rename for consistency 2023-05-11 22:53:01 +00:00
dump_stack() d41846ede2
Fix parameters 2023-05-11 22:35:38 +00:00
dump_stack() 52c6581675
Ignore cache 2023-05-11 22:18:14 +00:00
dump_stack() 83c1ca303c
Add debian code names 2023-05-11 22:17:46 +00:00
dump_stack() ad0a3706cb
Implement as normal filter 2023-05-11 22:17:31 +00:00
dump_stack() 2e6ce1e8f9
Skip also all-amd64 2023-05-11 22:06:54 +00:00
dump_stack() a77d56c26b
Do not produce error on empty response 2023-05-11 22:06:31 +00:00
dump_stack() 53324e663a
Add ErrNotFound 2023-05-11 21:59:44 +00:00
dump_stack() d4fbec39a6
Crutch for regex 2023-05-11 21:47:43 +00:00
dump_stack() cca637cf9d
Fix regexp 2023-05-11 21:36:54 +00:00
dump_stack() bef382920e
Fix regexp 2023-05-11 21:35:20 +00:00
dump_stack() 20cbd2f72e
Fix regexp 2023-05-11 21:34:32 +00:00
dump_stack() 0594b0ea60
More than one result is legit 2023-05-11 21:29:15 +00:00
dump_stack() f6cb2d8d12
Check if package already in slice 2023-05-11 21:22:28 +00:00
dump_stack() 27a22ba023
Dynamic rate limiter 2023-05-11 21:15:29 +00:00
dump_stack() da9a243ee4
Match bpo only 2023-05-11 20:52:33 +00:00
dump_stack() e123bf258c
Fix missing error check 2023-05-11 20:45:53 +00:00
dump_stack() 9c563ca68b
Lower rate limit 2023-05-11 20:43:29 +00:00
dump_stack() 2b0d7b0460
Fix regex 2023-05-11 20:42:25 +00:00
dump_stack() 68ad89973c
Lower rate limit 2023-05-11 20:42:12 +00:00
dump_stack() 96a6dd7fb9
Missing continue 2023-05-11 20:29:39 +00:00
dump_stack() 6a0846e129
Better error handling 2023-05-11 20:26:39 +00:00
dump_stack() 5b396e7b5e
Expose error variables 2023-05-11 20:25:34 +00:00
dump_stack() 7e545586bc
Trace 2023-05-11 20:24:54 +00:00
dump_stack() da6843f9b7
Add command to populate debian cache 2023-05-11 20:08:08 +00:00
dump_stack() 26c261b6f3
Remove query escape (endpoint does not apper to support it) 2023-05-11 19:43:42 +00:00
dump_stack() 77be74797b
Implement debian package 2023-05-11 19:30:25 +00:00
dump_stack() d04a9de932
Implement package cache API 2023-05-11 18:45:44 +00:00
dump_stack() ac74e450c3
Add dists at time of snapshot, remove codename field (-> function) 2023-05-11 16:52:47 +00:00
dump_stack() 41c84c70f6
Add codename, retries, timeouts 2023-05-11 16:26:16 +00:00
dump_stack() 3ba71a7787
Add logging, rate limiter, retries 2023-05-11 16:07:15 +00:00
dump_stack() aaca60cafc
Add deb package url and its repo component 2023-05-11 13:20:36 +00:00
dump_stack() 3a29b3b869
Handle http errors 2023-05-11 12:58:33 +00:00
dump_stack() 07bd886383
Base mr snapshot wrappers 2023-05-11 12:03:18 +00:00
dump_stack() c3042c97f0
Assert mr API 2023-05-11 11:54:35 +00:00
dump_stack() 1b68fc571f
Rename 2023-05-11 10:58:34 +00:00
dump_stack() b1d034a7fe
Implement debian snapshot mr api 2023-05-11 10:21:21 +00:00
dump_stack() 35d34fdbe0
Add yum to oraclelinux images 2023-05-11 08:20:35 +00:00
dump_stack() cc470d2105
Add yum/dnf to centos8 image 2023-05-11 08:04:13 +00:00
dump_stack() 5f0749155a
Fix CentOS 8 image generator 2023-05-11 07:57:25 +00:00
dump_stack() e2e0dad1ad
Check that fs is case-sensitive 2023-05-11 02:42:34 +00:00
dump_stack() d5ea97c532
Stop waiting for ssh if qemu is dead 2023-05-10 12:30:01 +00:00
dump_stack() 31370b7bad
Reduce layers 2023-05-10 11:20:07 +00:00
dump_stack() cbdef2936d
Upload all logs 2023-05-10 10:25:19 +00:00
dump_stack() bc9eea879d
Test all examples 2023-05-10 10:20:43 +00:00
dump_stack() b0dae2fc69
Rename jobs 2023-05-10 10:12:53 +00:00
dump_stack() c22cea2834
Remove genall test 2023-05-10 10:12:39 +00:00
dump_stack() 828511f0eb
Fix yml 2023-05-10 10:06:05 +00:00
dump_stack() 0735f1d581
Fix yml 2023-05-10 10:04:22 +00:00
dump_stack() f816b43609
Refactor 2023-05-10 10:02:12 +00:00
dump_stack() 52d3d339df
Typo 2023-05-10 09:51:08 +00:00
dump_stack() 308b916b0f
End-to-End Testing for CentOS and Oracle Linux 2023-05-10 09:49:15 +00:00
dump_stack() 5ecf36ebc3
End-to-End Testing (Ubuntu) 2023-05-10 09:36:19 +00:00
dump_stack() 1351819f17
Use old-releases for Ubuntu 12.04 2023-05-10 09:13:05 +00:00
dump_stack() 0323d3d941
Typo 2023-05-10 09:10:07 +00:00
dump_stack() fa9ee43817
End-to-End Testing (list-remote) 2023-05-10 09:09:12 +00:00
dump_stack() 25fad476b4
Image generator for CentOS 6 2023-05-10 08:35:56 +00:00
dump_stack() f484dd99de
tar generated images 2023-05-10 08:21:43 +00:00
dump_stack() 5aed593d81
Make container update parameter available for all kernel commands 2023-05-10 08:09:53 +00:00
dump_stack() e8a7c043d6
Fix CentOS support 2023-05-10 08:04:42 +00:00
dump_stack() dbd3449074
Remove required for --update flag 2023-05-10 07:57:14 +00:00
dump_stack() e274fe55f0
Implement --list-remote command 2023-05-10 07:52:44 +00:00
dump_stack() 66bd74d59d
Fix dracut cmdline 2023-05-10 05:37:17 +00:00
dump_stack() 98a0a561f1
Remove Oracle Linux tests 2023-05-10 05:26:45 +00:00
dump_stack() f8880b2486
no space left on device 2023-05-09 21:07:05 +00:00
dump_stack() f5f87867ac
Remove Oracle Linux 6 test 2023-05-09 20:43:39 +00:00
dump_stack() 70ac88a07f
No libdtrace-ctf on el8 2023-05-09 20:16:28 +00:00
dump_stack() 5d13843835
Do not download images 2023-05-09 19:58:35 +00:00
dump_stack() 0f36b3b55b
Test installation of Oracle Linux kernels 2023-05-09 19:29:17 +00:00
dump_stack() aa32c7a8ae
Make sure libdtrace-ctf available on all versions 2023-05-09 19:19:46 +00:00
dump_stack() f7fcfe8292
Add libdtrace-ctf on el6 2023-05-09 19:13:22 +00:00
dump_stack() d5b733a0a0
Force drivers only on 8+ 2023-05-09 17:53:39 +00:00
dump_stack() cd68dc1ddc
Add kernel modules to boot on el{8,9} 2023-05-09 17:45:47 +00:00
dump_stack() 2460b8230a
Ignore logs 2023-05-09 17:43:53 +00:00
dump_stack() 2f4c859dd8
Show warning in case no kernel packages matched 2023-05-09 16:31:34 +00:00
dump_stack() 169acca9df
Use bash 2023-05-09 16:19:10 +00:00
dump_stack() 48be2df1b0
Add Oracle Linux image generator 2023-05-09 16:14:15 +00:00
dump_stack() 0e85866822
Implement Oracle Linux support 2023-05-09 14:40:06 +00:00
dump_stack() e291352925
Sync 2023-05-08 22:31:46 +00:00
dump_stack() c14c5989a4
Flag to override rootfs image 2023-05-08 22:21:28 +00:00
dump_stack() 3677adece9
Implements the parameter to specify an endless stress script 2023-05-08 22:01:10 +00:00
dump_stack() ca95155ce0
endless: exit on first error 2023-05-08 21:36:25 +00:00
dump_stack() 15d8ab8137
Do not lose stdout 2023-05-08 21:35:47 +00:00
dump_stack() 1a1afce4f2
Implements endless tests 2023-05-08 21:19:06 +00:00
dump_stack() 17a70fdb2d
Add option for timeout after starting of the qemu vm before tests 2023-05-08 19:56:10 +00:00
dump_stack() 8ec4f13364
Lower log level for debug output 2023-05-08 19:18:49 +00:00
dump_stack() c1c5afc0e0
ssh dial with retry 2023-05-08 14:54:28 +00:00
dump_stack() 2e5c386c42
Revert "Lock ssh handshake"
This reverts commit b558269ac3.
2023-05-08 14:40:57 +00:00
dump_stack() b558269ac3
Lock ssh handshake 2023-05-08 14:30:53 +00:00
dump_stack() d4f826d44b
Debug logging 2023-05-08 14:30:09 +00:00
dump_stack() 7b94053376
Avoid use of external scp command 2023-05-07 18:14:59 +00:00
dump_stack() 0e08d87a64
Fix check whether vm is alive 2023-05-07 16:05:11 +00:00
dump_stack() 734240500b
Revert "Ensure log file is available for VM status check"
This reverts commit 8c1024b36c.
2023-05-07 16:03:28 +00:00
dump_stack() 8c1024b36c
Ensure log file is available for VM status check 2023-05-07 15:59:45 +00:00
dump_stack() ababd027f9
Revert "Do not close log file immediately"
This reverts commit e8a446ec76.
2023-05-07 15:46:54 +00:00
dump_stack() 0826948568
Try creating a new session in ssh check 2023-05-07 15:24:10 +00:00
dump_stack() e8a446ec76
Do not close log file immediately 2023-05-07 14:54:36 +00:00
dump_stack() de3f361e51
Remove colors from the cwd logs 2023-05-02 17:40:28 +00:00
dump_stack() 8279517ecf
Do not show distro type/release and kernel version in cwd logs 2023-05-02 12:38:06 +00:00
dump_stack() 4f80122039
Implements per-test logging to the current working directory 2023-05-02 11:10:57 +00:00
dump_stack() 8922b3e548
Retry scp if failed 2023-05-01 15:10:39 +00:00
dump_stack() 321fe0567a
Fix 2023-05-01 14:57:08 +00:00
dump_stack() 530b98e513
Handle empty build dir correctly 2023-05-01 14:53:52 +00:00
dump_stack() 249f11d059
Do not exit copying error, skip 2023-05-01 14:52:16 +00:00
dump_stack() b1f5a36a32
Improve logging 2023-05-01 14:51:58 +00:00
dump_stack() 8fa62e9a6e
Bump version 2023-04-26 14:36:25 +00:00
dump_stack() e04154b235
Fix stderr log 2023-04-26 14:35:20 +00:00
dump_stack() 096cad8701
Cleanup also -core/-modules 2023-04-09 17:13:54 +00:00
dump_stack() 70d464f0e2
Fix directory name 2023-04-09 14:38:39 +00:00
dump_stack() d65d683dfc
Fix name of ubuntu image generator 2023-04-09 14:16:29 +00:00
dump_stack() bde115f5df
Bump version 2023-04-08 14:57:39 +00:00
dump_stack() d972bae547
Send SIGINT first while killing the container by timeout 2023-04-08 14:50:18 +00:00
dump_stack() b3d4a0dbc2
Update changelog 2023-04-07 21:38:30 +00:00
dump_stack() 4a3d739b85
Implements dry run for image edit 2023-04-07 21:30:03 +00:00
dump_stack() bb319a9ff6
Export qemu arguments 2023-04-07 21:17:34 +00:00
dump_stack() 21daac4fbc
Check for shutdown before log current kernel 2023-04-07 21:03:31 +00:00
dump_stack() 841fd7f585
Graceful shutdown on ^C 2023-04-07 20:52:45 +00:00
dump_stack() b812048408
Typo 2023-04-07 19:11:42 +00:00
dump_stack() a5edc4837f
Update readme 2023-04-07 19:09:33 +00:00
dump_stack() 9e55ebd44e
Add a flag to set the container runtime binary 2023-04-07 18:57:18 +00:00
dump_stack() e35e030c54
Install the kernel in a single container run 2023-04-07 17:47:54 +00:00
dump_stack() a4f2a31819
Correctly handle empty workdir 2023-04-07 17:46:36 +00:00
dump_stack() c3cf25e523
Allow to disable container volumes mount 2023-04-07 17:35:00 +00:00
dump_stack() 056e38698e
Use single temp directory base 2023-04-07 16:44:21 +00:00
dump_stack() 32b692f752
Cleanup after cache kernel package dependencies 2023-04-07 16:27:56 +00:00
dump_stack() 3f8c7fd86b
brew cask is no longer a brew command 2023-04-07 14:30:59 +00:00
dump_stack() f9c2849658
Bump version 2023-04-07 13:55:58 +00:00
dump_stack() caba73cd7e
Skip the kernel after the end of retries 2023-04-07 13:30:30 +00:00
dump_stack() 5bb79302dd
Bump version 2023-04-07 10:42:34 +00:00
dump_stack() 4570e9adbe
Handling discrepancies between /lib/modules and /boot 2023-04-07 10:27:59 +00:00
dump_stack() 8029ad2185
Update readme 2023-04-07 00:48:38 +00:00
dump_stack() 2f8446864a
go mod tidy 2023-04-07 00:04:10 +00:00
dump_stack() dd602df291
Set go version to 1.17 2023-04-06 23:52:22 +00:00
dump_stack() c9d71601f2
Update readme 2023-04-06 23:32:02 +00:00
dump_stack() 9863c93c02
Fix brew tap url 2023-04-06 23:18:47 +00:00
dump_stack() 27a3cc498c
Bump version 2023-04-06 22:48:54 +00:00
dump_stack() b75289a9d1
Update changelog 2023-04-06 22:46:46 +00:00
dump_stack() fd973c367f
Add --noautoremove for CentOS 8 2023-04-06 22:37:15 +00:00
dump_stack() 4bc4ca738b
Typo 2023-04-06 21:57:47 +00:00
dump_stack() cd7cf0f2b6
Cache kernel package dependencies 2023-04-06 21:56:22 +00:00
dump_stack() 87a5c389df
Fix log level 2023-04-06 21:09:12 +00:00
dump_stack() be3f519573
Fix package cache 2023-04-06 21:08:51 +00:00
dump_stack() a5bfe334cb
Install both headers and image 2023-04-06 20:53:59 +00:00
dump_stack() c0dd0ae07b
Trim last added newline 2023-04-06 20:45:20 +00:00
dump_stack() a4c83c1637
Use different names for logs upload 2023-04-06 20:40:18 +00:00
dump_stack() 897ac0699d
Archive logs 2023-04-06 20:14:36 +00:00
dump_stack() 5b444a3193
Use headers package 2023-04-06 20:10:07 +00:00
dump_stack() 8aed31e41b
Install a single kernel to ensure all dependencies are cached 2023-04-06 20:03:07 +00:00
dump_stack() f57b3408be
Add stdout trace for qemu 2023-04-06 19:50:57 +00:00
dump_stack() 483e56163e
Revert "Add linux-image-generic to base container (for dependencies)"
This reverts commit 5931c08de1.
2023-04-06 19:40:44 +00:00
dump_stack() ac5f83349c
Check that files exist before copy 2023-04-06 19:28:03 +00:00
dump_stack() 5931c08de1
Add linux-image-generic to base container (for dependencies) 2023-04-06 19:07:58 +00:00
dump_stack() 0d3a075d76
Add commands to manage containers 2023-04-06 19:05:41 +00:00
dump_stack() bbd6f79443
Add stdout trace for qemu ssh commands 2023-04-06 18:20:55 +00:00
dump_stack() 5ce73d2fc5
Add stdout trace for sh commands 2023-04-06 18:14:24 +00:00
dump_stack() f65d4ad879
No need to have it at debug log level 2023-04-06 18:13:56 +00:00
dump_stack() 7dddf71d93
Add stdout trace for container build 2023-04-06 18:12:56 +00:00
dump_stack() f75c70db94
Log container stdout at trace level 2023-04-06 18:00:46 +00:00
dump_stack() 603e91af6f
Write debug log to file 2023-04-06 17:32:54 +00:00
dump_stack() 42dc8ac98c
Revert "end-to-end testing for macOS"
This reverts commit 87ef1e42b5.
2023-04-06 15:01:04 +00:00
dump_stack() b7404aa453
Run end-to-end tests by non-root 2023-04-06 14:50:58 +00:00
dump_stack() bf455d9788
Copy from inside the container to avoid permission problems 2023-04-06 14:48:12 +00:00
dump_stack() a0ed1eb1f5
Improve logging 2023-04-06 14:32:05 +00:00
dump_stack() 3220b9a5ae
Additional arguments for containers 2023-04-06 14:30:42 +00:00
dump_stack() 87ef1e42b5
end-to-end testing for macOS 2023-04-06 13:59:44 +00:00
dump_stack() 17a4b746cc
Proper CentOS 8 support 2023-04-06 13:38:33 +00:00
dump_stack() 7314cc72db
Remove genall test for Ubuntu 20.04 2023-04-06 13:21:04 +00:00
dump_stack() c353618c17
Fix log output 2023-04-06 13:12:57 +00:00
dump_stack() fe3092371c
Switch to 22.04 2023-04-06 13:03:10 +00:00
dump_stack() ab7a70cc0a
Try to install only 10 Ubuntu 20.04 kernels 2023-04-06 13:00:51 +00:00
dump_stack() 0907129529
Add test genall with --no-headers 2023-04-06 12:59:35 +00:00
dump_stack() a874ac9fc7
Precise kernel version 2023-04-06 12:56:48 +00:00
dump_stack() 23e933824b
Add --no-headers tests, split jobs 2023-04-06 12:55:24 +00:00
dump_stack() 80d7f9fb52
Add build info if run with debug 2023-04-06 12:50:44 +00:00
dump_stack() fad8502639
Fix wrong kernel version 2023-04-06 12:37:51 +00:00
dump_stack() 5b468a4ec1
Keep genall tests only for Ubuntu 2023-04-06 12:36:06 +00:00
dump_stack() 4a22df770b
GitHub Actions: split jobs 2023-04-06 12:15:52 +00:00
dump_stack() 88a3ff3869
GitHub Actions: fix path to binary 2023-04-06 12:12:28 +00:00
dump_stack() c5645f1985
GitHub Actions: add test for one kernel installation/reinstallation 2023-04-06 12:11:35 +00:00
dump_stack() bf421f80c8
GitHub Actions: use debug log level 2023-04-06 12:09:46 +00:00
dump_stack() 055ea6b83d
GitHub Actions: add genall tests 2023-04-06 11:50:47 +00:00
dump_stack() 96c267d093
Run end-to-end tests by root 2023-04-05 19:50:59 +00:00
dump_stack() 301eb2a60b
Refactor 2023-04-05 19:32:31 +00:00
dump_stack() fcfbf4f36d
Match also centos mask as string 2023-04-05 18:04:22 +00:00
dump_stack() b98abe4a83
Match also as string 2023-04-05 18:01:04 +00:00
dump_stack() 72d51c0e1c
Add timeout between retries 2023-04-05 16:40:20 +00:00
dump_stack() 2d345c584b
Do not retry on success 2023-04-05 16:36:09 +00:00
dump_stack() 97fb543fef
Typo 2023-04-05 16:26:26 +00:00
dump_stack() 3fd2fd5966
Implements retry for failed kernel installations 2023-04-05 16:24:45 +00:00
dump_stack() 29af467bee
Install kernels to tmp directory first 2023-04-05 16:14:30 +00:00
dump_stack() 604d21e4a2
Refactor deb package matcher 2023-04-05 12:31:46 +00:00
dump_stack() e44124c063
go mod tidy 2023-04-05 11:51:42 +00:00
dump_stack() fc0c76f114
Do not randomize kernels installation/test order by default 2023-04-05 11:29:31 +00:00
dump_stack() f399390c2c
Ignore linux-image-generic 2023-04-05 10:52:40 +00:00
dump_stack() 8d3986ce8e
Refactor 2023-03-29 17:25:32 +00:00
dump_stack() 3aba883b81
Add --no-headers flag 2023-03-29 17:15:27 +00:00
dump_stack() 3329dc4c24
Add kernel config-regen command 2023-03-24 04:20:28 +00:00
dump_stack() 34f3692d01
Fix timeout 2023-03-24 04:15:08 +00:00
dump_stack() 1e66c156fa
Run permission fix only once per container 2023-03-23 20:28:42 +00:00
dump_stack() 2b54d13b9e
Fix only kernels permissions 2023-03-23 20:26:30 +00:00
dump_stack() 44494b65a6
Improve logging 2023-03-23 20:20:46 +00:00
dump_stack() a36d5ddb12
Increase timeout 2023-03-23 20:16:13 +00:00
dump_stack() 488d2380e1
Do not reset old permissions 2023-03-23 20:05:29 +00:00
dump_stack() 292e3dc211
Set permissions on the internals of all container volumes 2023-03-23 19:18:14 +00:00
dump_stack() ec1732c8ec
Set kernels permissions inside container 2023-03-23 17:45:24 +00:00
dump_stack() bcdfb23112
Set debug log level for CI 2023-03-23 15:51:43 +00:00
dump_stack() d70150b496
Set kernel/initrd permissions 2023-03-22 21:16:00 +00:00
dump_stack() 105809ddec
Add script artifact type 2023-03-22 20:56:44 +00:00
dump_stack() 5ece0e0f15
Add context for testing logs 2023-03-22 18:32:40 +00:00
dump_stack() 2150162e8e
Remove obsolete tests 2023-03-22 18:26:57 +00:00
dump_stack() 7b16a439d8
Harmonise logging 2023-03-22 18:24:09 +00:00
dump_stack() 7e050d9e99
Add command to install specific kernel 2023-03-22 18:21:21 +00:00
dump_stack() 2c7341f0d8
Add force reinstall flag 2023-03-22 18:20:45 +00:00
dump_stack() b98dc87d54
Reduce verbosity 2023-03-22 18:08:48 +00:00
dump_stack() 0f1bdc795d
Fix check for installed kernels 2023-03-22 18:05:28 +00:00
dump_stack() 3e9410bf09
Install kernels in mounted volume instead of dockerfile layers 2023-03-22 17:45:56 +00:00
dump_stack() 0b198f71ca
Show file:line only for debug log level 2023-03-22 17:36:04 +00:00
dump_stack() d6c678b0cd
Do not use the default known hosts file 2023-03-19 18:00:10 +00:00
dump_stack() e2fcc20f36
Improve logging 2023-03-19 17:59:56 +00:00
dump_stack() 60bc7238a8
Use the legacy SCP protocol for directory transfers instead of SFTP 2023-03-19 17:56:09 +00:00
dump_stack() 04106e7537
VM commands debug logs 2023-03-19 13:30:10 +00:00
dump_stack() 21d8bec382
Set default log level to info 2023-03-19 13:20:39 +00:00
dump_stack() c82bd6a554
Consider copy standard modules error as fatal 2023-03-19 13:20:29 +00:00
dump_stack() 08beba2bab
Add debug logs for exec.Command 2023-03-19 13:14:14 +00:00
dump_stack() 305c6972ca
Improve logging 2023-03-19 12:36:19 +00:00
dump_stack() 78069c6240
More debug logging 2023-03-18 22:55:38 +00:00
dump_stack() 992a0f871c
More debug logging 2023-03-18 22:48:24 +00:00
dump_stack() 3f16599109
Remove verbose flag 2023-03-18 22:39:24 +00:00
dump_stack() c2c3837f44
Set default log level to debug 2023-03-18 22:34:44 +00:00
dump_stack() f1f67e38ee
Improve logging 2023-03-18 22:34:30 +00:00
dump_stack() ae20a6d11d
Add log level parameter 2023-03-18 21:53:53 +00:00
dump_stack() 8bffea0aea
Switch to zerolog 2023-03-18 21:30:07 +00:00
dump_stack() feb1ab7d37
Revert "Use the legacy SCP protocol for directory transfers instead of SFTP"
This reverts commit cc1261b0b0.
2023-03-16 18:54:48 +00:00
dump_stack() 12d5d43d7a
Build with cgo 2023-03-16 18:46:46 +00:00
dump_stack() 585a608083
go mod way to build 2023-03-16 09:52:34 +00:00
dump_stack() f10c4165a1
use docker run --rm ... 2023-03-16 09:42:07 +00:00
dump_stack() 51e4cfec30
Print current tag at start 2023-03-16 09:41:49 +00:00
dump_stack() d5d9cce517
Suggest use of official docker installation guide 2023-03-16 09:41:31 +00:00
dump_stack() 0e153b2763
debug: add parameters to set ssh addr/port 2023-02-28 19:11:28 +00:00
dump_stack() 71f5530fed
kernel genall: limit generated kernels to 100 2023-02-27 18:53:06 +00:00
dump_stack() 870fe202b7
Treat test files as relative to build directory 2023-02-27 18:28:10 +00:00
dump_stack() b0587a4ade
Add --artifact-config parameter to debug command 2023-02-23 08:26:54 +00:00
dump_stack() 4fdcc5d098
Refactor 2023-02-23 08:25:19 +00:00
dump_stack() 09feffb6a8
Treat test files as relative to build directory 2023-02-16 10:46:24 +00:00
dump_stack() 2d6db97b43
Add support for applying patches 2023-02-16 10:22:08 +00:00
dump_stack() cc1261b0b0
Use the legacy SCP protocol for directory transfers instead of SFTP 2023-02-16 08:30:29 +00:00
dump_stack() 24b6749504
Wait until ssh is available 2023-02-16 06:30:59 +00:00
dump_stack() f97cb3f10a
Typo 2023-02-15 16:57:31 +00:00
dump_stack() b246ecf956
Support make targets 2023-02-15 16:54:46 +00:00
dump_stack() c9618be454
Store kernels in separate directories 2023-02-15 16:09:11 +00:00
dump_stack() f6b6b823a9
Use already defined path 2023-02-15 11:57:33 +00:00
dump_stack() 3f79c8e461
Standard modules dependencies 2023-02-15 11:48:25 +00:00
dump_stack() 3d6961dfd7
Treat any SCP output as an error 2023-02-15 11:45:25 +00:00
dump_stack() 9910921e30
Cleanup symbolic links from modules dir 2023-02-15 11:38:02 +00:00
dump_stack() d59049e531
qemu: add CopyDirectory (via scp) 2023-02-15 11:20:30 +00:00
dump_stack() 668bc1e391
Store the default kernel modules 2023-02-15 11:00:00 +00:00
dump_stack() 3ec919abc1
Image editing command 2023-02-15 10:17:57 +00:00
dump_stack() 0529b30558
qemu: allow to run mutable 2023-02-15 09:48:13 +00:00
dump_stack() 063df192b4
mirror:// is slower and less reliable in the end 2023-02-14 05:30:28 +00:00
dump_stack() 1a952e0212
Set the default maximum number of kernels to be downloaded to 100
The overlayfs driver natively supports up to 128 layers
2023-02-13 22:10:23 +00:00
dump_stack() 8b5ce9923b
Revert podman layer squashing, does not work well with caching 2023-02-11 15:02:21 +00:00
dump_stack() b1493b79a3
podman --squash-all requires --layers to cache intermediate layers 2023-02-11 14:47:43 +00:00
dump_stack() fb5b2a2bbb
Case insensitive 2023-02-11 14:30:37 +00:00
dump_stack() a9db750ea5
Fix case where both docker and podman are installed, but docker is not podman alias 2023-02-11 08:52:49 +00:00
dump_stack() 55032f07af
Squash podman layers, not docker layers 2023-02-11 08:33:20 +00:00
dump_stack() bb7c2f94d5
podman: squash all layers into a single layer 2023-02-11 08:28:26 +00:00
dump_stack() 422f05d25b
Use squash only if podman is used 2023-02-11 08:22:37 +00:00
dump_stack() 3c8e80cace
docker/podman: squash newly built layers into a single new layer 2023-02-11 08:08:26 +00:00
dump_stack() a0ee660e50
Add artifact config flag 2023-02-01 08:04:27 +00:00
dump_stack() 82436cbd83
Keep help lowercase 2023-01-31 09:57:53 +00:00
dump_stack() ce8f8d3a38
Typo 2023-01-31 09:56:49 +00:00
dump_stack() 330da3b930
Reliability threshold flag 2023-01-31 09:34:23 +00:00
dump_stack() ce7794ce84
Add missing flags, refactor 2023-01-31 09:05:43 +00:00
dump_stack() abd8e69186
Update changelog 2023-01-31 07:21:53 +00:00
dump_stack() 2f52f6db6d
Refactor command line interface 2023-01-31 07:13:33 +00:00
dump_stack() 935266c850
Add alecthomas/kong support for config 2023-01-30 20:42:22 +00:00
dump_stack() a7b619fc40
lint 2023-01-30 20:15:17 +00:00
dump_stack() 0e185ab36b
Check for local docker image 2023-01-30 11:15:12 +00:00
dump_stack() b8bb11943a
Fix default test.sh path 2023-01-29 22:27:35 +00:00
dump_stack() 2bc55e2011
lint 2023-01-29 22:27:24 +00:00
dump_stack() 6e1216201e
podman compatibility 2023-01-29 20:42:16 +00:00
dump_stack() 92706c68fb
go mod tidy 2023-01-24 15:41:57 +00:00
dump_stack() 49ee65de76
Fix package name 2023-01-24 15:32:04 +00:00
dump_stack() 8fca9dbd2e
Ignore binary 2023-01-24 15:12:26 +00:00
dump_stack() 1deb201e25
Switch to Ubuntu 22.04 for testing 2023-01-19 17:22:25 +00:00
dump_stack() cc26ff8626
Add shell.nix 2023-01-19 16:59:25 +00:00
Denis Efremov 05ae073fe6 Use upstream lkrg in the module template 2022-02-16 20:45:55 +00:00
dump_stack() 603a2c98bd
Use upstream lkrg 2021-01-05 20:52:27 +00:00
dump_stack() cfee4c565c
Remove donate 2020-12-16 16:46:25 +00:00
dump_stack() 02663fad64
Run tests on pull request 2020-09-15 11:05:53 +00:00
Bradley Landherr e43993c6e5 adding TestFiles to artifact config, transfers extra test files to VM 2020-09-15 12:57:11 +02:00
dump_stack() 90829e2409
Follow redirect 2020-06-18 15:22:35 +00:00
dump_stack() 514e2c9c91
Update donate.yml 2020-06-18 13:33:50 +00:00
dump_stack() 5b0bf7de01
Bump version 2020-06-14 21:03:12 +00:00
dump_stack() 992c41c84b
Show last log if no ID specified 2020-06-14 20:46:56 +00:00
dump_stack() 22a8e32e2c
Implements modules preload list 2020-06-14 20:14:59 +00:00
dump_stack() 2f5f1db0db
Add docker timeout to artifact configuration 2020-06-14 17:32:57 +00:00
dump_stack() 551ec7f7ef
Update readme 2020-05-30 14:37:41 +00:00
dump_stack() 8a53b6081c
Update changelog 2020-05-30 14:26:12 +00:00
dump_stack() 27d8291bb2
Workaround for CentOS 8 Vault repos 2020-05-30 14:13:03 +00:00
dump_stack() db5d31d563
CentOS 8 image generator 2020-05-30 13:42:47 +00:00
dump_stack() d27fbf6671
No kernels in boot if there is no grub 2020-05-30 13:42:04 +00:00
dump_stack() cf79a9f94f
Yum in CentOS 8 does not support --show{-}duplicates with a dash 2020-05-30 12:48:24 +00:00
dump_stack() bfc6f11a7e
Fix deltarpm support for CentOS 8 2020-05-30 12:40:12 +00:00
dump_stack() bfae451749
Fix kernel module name for unit tests 2020-05-30 12:31:27 +00:00
dump_stack() 9b8d4a056e
Fix path to vmlinuz/initrd 2020-05-30 12:05:24 +00:00
dump_stack() 81234fc3a6
Update bootstrap scripts to Ubuntu 20.04 2020-05-30 11:37:00 +00:00
dump_stack() 81db5a6d6a
Update go.mod 2020-05-30 10:43:12 +00:00
alyakimenko 5bb7e08188 Sync with the latest logrusorgru/aurora
Fixes #15
2020-05-19 14:48:53 +00:00
alyakimenko dce1ce6c17 Make go mod tidy 2020-05-19 14:48:53 +00:00
dump_stack() 1c2ea77920
GitHub Actions: Use latest stable nixpkgs channel 2020-02-21 00:32:02 +00:00
dump_stack() f92b4e6640
Add dashboard access token 2020-01-20 09:27:06 +00:00
dump_stack() db72ff0aea
Donations 2020-01-16 23:58:19 +00:00
dump_stack() a6b81a3a24
GitHub Actions: better build job names 2020-01-05 07:21:48 +00:00
dump_stack() f93f4e7072
Remove Travis-CI 2020-01-05 06:49:37 +00:00
dump_stack() 70168afa4a
Add note about docker group 2019-12-28 08:50:10 +00:00
dump_stack() 26a724096e
Remove build status badge (status is already showed in UI) 2019-12-28 01:17:24 +00:00
dump_stack() 0a332c670a
Remove CircleCI because it does not support macOS on free plan 2019-12-28 01:07:00 +00:00
dump_stack() 196f17277c
CircleCI: specify xcode version 2019-12-28 01:00:28 +00:00
dump_stack() 7f418b30ac
Add circleci configuration for macOS 2019-12-28 00:54:14 +00:00
dump_stack() 2494c94f6e
Move build from source to documentation 2019-12-27 08:38:47 +00:00
dump_stack() 27ffff2d05
Actualize title 2019-12-27 08:33:45 +00:00
dump_stack() eafe9e57a8
Revert "Link for documentation directly to the introduction"
This reverts commit 7e5126c042.
2019-12-27 08:30:44 +00:00
dump_stack() 7e5126c042
Link for documentation directly to the introduction 2019-12-27 08:29:07 +00:00
dump_stack() 81219be062
Update README.md 2019-12-27 08:25:55 +00:00
dump_stack() 434aeb768b
Add commands for install Docker 2019-12-27 08:18:45 +00:00
dump_stack() bd27e890d1
Add timeout after start qemu for tests 2019-12-27 07:52:26 +00:00
dump_stack() 873b35a18d
Note about docker 2019-12-27 07:16:53 +00:00
dump_stack() fc2ee93b57
Add installation section 2019-12-27 07:12:09 +00:00
dump_stack() e03dff8409
Should return if error occured 2019-12-26 13:16:38 +00:00
dump_stack() f4a8b75244
GitHub Actions: split jobs, add end-to-end testing 2019-12-26 06:47:37 +00:00
133 changed files with 12067 additions and 3032 deletions

117
.github/workflows/debian-cache.yml vendored Normal file
View File

@ -0,0 +1,117 @@
name: Debian Cache
on:
workflow_dispatch:
schedule:
- cron: '0 0 * * *'
push:
paths:
- '.github/workflows/debian-cache.yml'
- 'distro/debian/snapshot/**'
- 'distro/debian/cache.go'
- 'distro/debian/kernel.go'
concurrency:
group: ${{ github.workflow_ref }}
jobs:
debian-kernel-metadata-cache:
name: Metadata
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- name: Build
run: go build
- name: Cache
run: ./out-of-tree --log-level=trace distro debian cache --refetch=0 --limit=128 --update-release
- name: Install s3cmd
run: sudo apt install s3cmd
- name: Archive cache
uses: actions/upload-artifact@v4
with:
name: debian-cache
path: ~/.out-of-tree/debian.cache
- name: Archive logs
if: always()
uses: actions/upload-artifact@v4
with:
name: debian-metadata-cache-logs
path: ~/.out-of-tree/logs
- name: Upload cache
run: s3cmd put --acl-public ~/.out-of-tree/debian.cache s3://out-of-tree/1.0.0/ --host=fra1.digitaloceanspaces.com --host-bucket='%(bucket)s.fra1.digitaloceanspaces.com' --access_key=${{ secrets.DIGITALOCEAN_SPACES_ACCESS_KEY }} --secret_key=${{ secrets.DIGITALOCEAN_SPACES_SECRET_KEY }}
debian-kernel-packages-mirror:
name: Packages
needs: debian-kernel-metadata-cache
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- name: Build
run: go build
- name: Install s3cmd
run: sudo apt install s3cmd
- name: Mirror deb packages
shell: python
run: |
import os
import logging
import time
import datetime
from subprocess import getstatusoutput
def get_kernels() -> bool:
status, output = getstatusoutput(
"./out-of-tree distro debian fetch --max=16 --limit=1"
)
logging.info(output)
return status == 0
def upload(f: str) -> bool:
status, output = getstatusoutput(
"s3cmd "
"--host=fra1.digitaloceanspaces.com "
"--host-bucket='%(bucket)s.fra1.digitaloceanspaces.com' "
"--access_key=${{ secrets.DIGITALOCEAN_SPACES_ACCESS_KEY }} "
"--secret_key=${{ secrets.DIGITALOCEAN_SPACES_SECRET_KEY }} "
f"put --acl-public {f} "
"s3://out-of-tree/1.0.0/packages/debian/"
)
logging.info(output)
return status == 0
logging.basicConfig(level=logging.NOTSET)
uploaded = []
timeout = time.time() + datetime.timedelta(hours=2).seconds
while get_kernels() and time.time() < timeout:
for f in os.listdir():
if not f.endswith('.deb'):
continue
if f in uploaded:
continue
logging.info(f)
ok = upload(f)
if ok:
uploaded += [f]
- name: Archive logs
if: always()
uses: actions/upload-artifact@v4
with:
name: debian-packages-cache-logs
path: ~/.out-of-tree/logs

172
.github/workflows/e2e.yml vendored Normal file
View File

@ -0,0 +1,172 @@
name: E2E
on:
workflow_dispatch:
push:
paths-ignore:
- ".github/workflows/images-*"
- ".github/workflows/ubuntu.yml"
- ".github/workflows/macos.yml"
- ".github/workflows/debian-cache.yml"
- "docs/**"
- ".readthedocs.yaml"
- "README.md"
concurrency:
group: ${{ github.workflow_ref }}
cancel-in-progress: true
jobs:
test-end-to-end:
name: Module
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
os: [
{ distro: Ubuntu, release: 12.04 },
{ distro: Ubuntu, release: 14.04 },
{ distro: Ubuntu, release: 16.04 },
{ distro: Ubuntu, release: 18.04 },
{ distro: Ubuntu, release: 20.04 },
{ distro: Ubuntu, release: 22.04 },
{ distro: CentOS, release: 6 },
{ distro: CentOS, release: 7 },
{ distro: CentOS, release: 8 },
{ distro: OracleLinux, release: 6 },
{ distro: OracleLinux, release: 7 },
{ distro: OracleLinux, release: 8 },
{ distro: OracleLinux, release: 9 },
{ distro: Debian, release: 7 },
{ distro: Debian, release: 8 },
{ distro: Debian, release: 9 },
{ distro: Debian, release: 10 },
{ distro: Debian, release: 11 },
{ distro: Debian, release: 12 },
{ distro: OpenSUSE, release: "12.1" },
{ distro: OpenSUSE, release: "12.2" },
{ distro: OpenSUSE, release: "12.3" },
{ distro: OpenSUSE, release: "13.1" },
{ distro: OpenSUSE, release: "13.2" },
{ distro: OpenSUSE, release: "42.1" },
{ distro: OpenSUSE, release: "42.2" },
{ distro: OpenSUSE, release: "42.3" },
{ distro: OpenSUSE, release: "15.0" },
{ distro: OpenSUSE, release: "15.1" },
{ distro: OpenSUSE, release: "15.2" },
{ distro: OpenSUSE, release: "15.3" },
{ distro: OpenSUSE, release: "15.4" },
{ distro: OpenSUSE, release: "15.5" }
]
steps:
- uses: actions/checkout@v1
- name: Build
run: go build
- uses: digitalocean/action-doctl@v2
with:
token: ${{ secrets.DIGITALOCEAN_ACCESS_TOKEN }}
- uses: webfactory/ssh-agent@v0.9.0
with:
ssh-private-key: ${{ secrets.SSH_PRIVATE_KEY }}
- name: Create droplet
run: >-
doctl compute droplet create
--size s-4vcpu-8gb-intel
--tag-name=github-actions
--image almalinux-9-x64
--ssh-keys='b4:4c:66:7d:be:19:25:43:1c:e0:02:61:9f:49:12:94,37:46:77:a8:4a:96:3b:20:16:46:35:04:95:ca:0c:5c'
--wait
--region fra1
ga-out-of-tree-e2e-${{ matrix.os.distro }}-${{ matrix.os.release }}-$GITHUB_SHA
- name: End-to-End Testing [${{ matrix.os.distro }} ${{ matrix.os.release }}]
shell: bash
run: |
sleep 1m
IP=$(doctl compute droplet list \
--tag-name=github-actions \
--format "Name,Public IPv4" \
| grep -v ID \
| grep ga-out-of-tree-e2e-${{ matrix.os.distro }}-${{ matrix.os.release }}-$GITHUB_SHA \
| awk '{print $2}')
while ! ssh -o StrictHostKeyChecking=accept-new root@$IP echo
do
sleep 1s
done
ssh root@$IP "cloud-init status --wait"
ssh root@$IP "dnf install -y podman qemu-kvm-core epel-release"
ssh root@$IP "dnf install -y s3cmd"
ssh root@$IP "ln -s /usr/libexec/qemu-kvm /usr/bin/qemu-system-x86_64"
scp ./out-of-tree root@$IP:/usr/local/bin/
echo 'name = "test"' > examples/kernel-module/.out-of-tree.toml
echo 'type = "module"' >> examples/kernel-module/.out-of-tree.toml
echo 'standard_modules = true' >> examples/kernel-module/.out-of-tree.toml
echo '[[targets]]' >> examples/kernel-module/.out-of-tree.toml
echo 'distro = { id = "${{ matrix.os.distro }}", release = "${{ matrix.os.release }}" }' >> examples/kernel-module/.out-of-tree.toml
echo 'kernel = { regex = ".*" }' >> examples/kernel-module/.out-of-tree.toml
echo '[qemu]' >> examples/kernel-module/.out-of-tree.toml
echo 'timeout = "10m"' >> examples/kernel-module/.out-of-tree.toml
echo 'after_start_timeout = "10s"' >> examples/kernel-module/.out-of-tree.toml
echo 'modprobe uio || modprobe 9p || modprobe xfs' >> examples/kernel-module/test.sh
scp -r examples/kernel-module root@$IP:test
echo '[Unit]' >> test.service
echo 'Description=e2e' >> test.service
echo '[Service]' >> test.service
echo 'RemainAfterExit=yes' >> test.service
echo 'StandardError=append:/var/log/test.log' >> test.service
echo 'StandardOutput=append:/var/log/test.log' >> test.service
echo 'Type=oneshot' >> test.service
echo 'WorkingDirectory=/root/test' >> test.service
echo 'TimeoutStopSec=1' >> test.service
echo 'ExecStart=/usr/local/bin/out-of-tree kernel --no-container-cache autogen --threads=8 --max=128 --shuffle' >> test.service
echo 'ExecStart=/usr/local/bin/out-of-tree pew --qemu-timeout=10m --threads=4 --include-internal-errors' >> test.service
scp test.service root@$IP:/etc/systemd/system/test.service
ssh root@$IP systemctl daemon-reload
ssh root@$IP setenforce 0
ssh root@$IP systemctl start test --no-block
while ! ssh root@$IP systemctl show test -p SubState --value | grep -E '(failed|exited)'
do
sleep 30s
done
ssh root@$IP "cat /var/log/test.log"
scp -r root@$IP:.out-of-tree/logs .
ssh root@$IP systemctl is-active test || exit 1
ssh root@$IP "/usr/local/bin/out-of-tree container save"
ssh root@$IP "s3cmd put --acl-public *.tar.gz s3://out-of-tree/1.0.0/containers/ --host=fra1.digitaloceanspaces.com --host-bucket='%(bucket)s.fra1.digitaloceanspaces.com' --access_key=${{ secrets.DIGITALOCEAN_SPACES_ACCESS_KEY }} --secret_key=${{ secrets.DIGITALOCEAN_SPACES_SECRET_KEY }}"
- name: Archive logs
if: always()
uses: actions/upload-artifact@v4
with:
name: test-end-to-end-${{ matrix.os.distro }}-${{ matrix.os.release }}-logs
path: logs
- name: Delete droplet
if: always()
run: doctl compute droplet delete -f ga-out-of-tree-e2e-${{ matrix.os.distro }}-${{ matrix.os.release }}-$GITHUB_SHA

86
.github/workflows/images-debian.yml vendored Normal file
View File

@ -0,0 +1,86 @@
name: Debian
on:
workflow_dispatch:
push:
paths:
- 'tools/qemu-debian-img/**'
- '.github/workflows/images-debian.yml'
concurrency:
group: ${{ github.workflow_ref }}
cancel-in-progress: true
jobs:
images:
name: Qemu Images
runs-on: ubuntu-latest
steps:
- uses: digitalocean/action-doctl@v2
with:
token: ${{ secrets.DIGITALOCEAN_ACCESS_TOKEN }}
- uses: webfactory/ssh-agent@v0.8.0
with:
ssh-private-key: ${{ secrets.SSH_PRIVATE_KEY }}
- name: create droplet
run: doctl compute droplet create --ssh-keys='b4:4c:66:7d:be:19:25:43:1c:e0:02:61:9f:49:12:94' --tag-name=github-actions ga-out-of-tree-images-debian-$GITHUB_SHA --size s-1vcpu-1gb --image ubuntu-22-04-x64 --wait
# TODO Move to common script
- name: generate images
shell: bash
run: |
sleep 1m
IP=$(doctl compute droplet list --tag-name=github-actions --format "Name,Public IPv4" | grep -v ID | grep ga-out-of-tree-images-debian-$GITHUB_SHA | awk '{print $2}')
while ! ssh -o StrictHostKeyChecking=accept-new root@$IP echo
do
sleep 1s
done
sleep 5m
ssh root@$IP pkill apt-get || true
ssh root@$IP apt-get update
ssh root@$IP apt-get install -y git podman s3cmd
ssh root@$IP git clone https://github.com/out-of-tree/out-of-tree
ssh root@$IP "cd out-of-tree && git checkout $GITHUB_SHA"
ssh root@$IP "echo -e '[Unit]\nDescription=Debian image generator and uploader\n[Service]\nRemainAfterExit=yes\nStandardError=append:/var/log/images-debian.log\nStandardOutput=append:/var/log/images-debian.log\nType=oneshot' >> /etc/systemd/system/images-debian.service"
ssh root@$IP "echo Environment=HOST=fra1.digitaloceanspaces.com >> /etc/systemd/system/images-debian.service"
ssh root@$IP "echo Environment=HOST_BUCKET=out-of-tree.fra1.digitaloceanspaces.com >> /etc/systemd/system/images-debian.service"
ssh root@$IP "echo Environment=ACCESS_KEY=${{ secrets.DIGITALOCEAN_SPACES_ACCESS_KEY }} >> /etc/systemd/system/images-debian.service"
ssh root@$IP "echo Environment=SECRET_KEY=${{ secrets.DIGITALOCEAN_SPACES_SECRET_KEY }} >> /etc/systemd/system/images-debian.service"
ssh root@$IP "echo 'ExecStart=/root/out-of-tree/tools/qemu-debian-img/generate-images.sh' >> /etc/systemd/system/images-debian.service"
ssh root@$IP 'echo ExecStart=/bin/sh -c \"s3cmd put --acl-public /root/out-of-tree/tools/qemu-debian-img/*.tar.gz s3://out-of-tree/1.0.0/ --host=\$HOST --host-bucket=\$HOST_BUCKET --access_key=\$ACCESS_KEY --secret_key=\$SECRET_KEY\" >> /etc/systemd/system/images-debian.service'
ssh root@$IP "echo TimeoutStopSec=1 >> /etc/systemd/system/images-debian.service"
ssh root@$IP systemctl daemon-reload
ssh root@$IP systemctl start images-debian --no-block
while ! ssh root@$IP systemctl show images-debian -p SubState --value | grep -E '(failed|exited)'
do
sleep 3m
done
scp root@$IP:/var/log/images-debian.log .
ssh root@$IP systemctl is-active images-debian
- name: Archive logs
if: always()
uses: actions/upload-artifact@v4
with:
name: images-debian-log
path: images-debian.log
- name: delete droplet
if: always()
run: doctl compute droplet delete -f ga-out-of-tree-images-debian-$GITHUB_SHA

View File

@ -0,0 +1,79 @@
name: Oracle Linux
on:
workflow_dispatch:
push:
paths:
- 'tools/qemu-oraclelinux-img/**'
- '.github/workflows/images-oraclelinux.yml'
concurrency:
group: ${{ github.workflow_ref }}
cancel-in-progress: true
jobs:
images-oraclelinux:
name: Qemu Images
runs-on: ubuntu-latest
steps:
- uses: digitalocean/action-doctl@v2
with:
token: ${{ secrets.DIGITALOCEAN_ACCESS_TOKEN }}
- uses: webfactory/ssh-agent@v0.8.0
with:
ssh-private-key: ${{ secrets.SSH_PRIVATE_KEY }}
- name: create droplet
run: doctl compute droplet create --ssh-keys='b4:4c:66:7d:be:19:25:43:1c:e0:02:61:9f:49:12:94' --tag-name=github-actions ga-out-of-tree-images-oraclelinux-$GITHUB_SHA --size s-1vcpu-1gb --image ubuntu-22-04-x64 --wait
# TODO Move to common script
- name: generate images
shell: bash
run: |
sleep 1m
IP=$(doctl compute droplet list --tag-name=github-actions --format "Name,Public IPv4" | grep -v ID | grep ga-out-of-tree-images-oraclelinux-$GITHUB_SHA | awk '{print $2}')
while ! ssh -o StrictHostKeyChecking=accept-new root@$IP echo
do
sleep 1s
done
sleep 5m
ssh root@$IP pkill apt-get || true
ssh root@$IP apt-get update
ssh root@$IP apt-get install -y git podman s3cmd
ssh root@$IP git clone https://github.com/out-of-tree/out-of-tree
ssh root@$IP "cd out-of-tree && git checkout $GITHUB_SHA"
ssh root@$IP "echo -e '[Unit]\nDescription=Oracle Linux image generator and uploader\n[Service]\nRemainAfterExit=yes\nStandardError=append:/var/log/images-oraclelinux.log\nStandardOutput=append:/var/log/images-oraclelinux.log\nType=oneshot' >> /etc/systemd/system/images-oraclelinux.service"
ssh root@$IP "echo Environment=HOST=fra1.digitaloceanspaces.com >> /etc/systemd/system/images-oraclelinux.service"
ssh root@$IP "echo Environment=HOST_BUCKET=out-of-tree.fra1.digitaloceanspaces.com >> /etc/systemd/system/images-oraclelinux.service"
ssh root@$IP "echo Environment=ACCESS_KEY=${{ secrets.DIGITALOCEAN_SPACES_ACCESS_KEY }} >> /etc/systemd/system/images-oraclelinux.service"
ssh root@$IP "echo Environment=SECRET_KEY=${{ secrets.DIGITALOCEAN_SPACES_SECRET_KEY }} >> /etc/systemd/system/images-oraclelinux.service"
ssh root@$IP "echo 'ExecStart=/root/out-of-tree/tools/qemu-oraclelinux-img/generate-images.sh' >> /etc/systemd/system/images-oraclelinux.service"
ssh root@$IP 'echo ExecStart=/bin/sh -c \"s3cmd put --acl-public /root/out-of-tree/tools/qemu-oraclelinux-img/*.tar.gz s3://out-of-tree/1.0.0/ --host=\$HOST --host-bucket=\$HOST_BUCKET --access_key=\$ACCESS_KEY --secret_key=\$SECRET_KEY\" >> /etc/systemd/system/images-oraclelinux.service'
ssh root@$IP "echo TimeoutStopSec=1 >> /etc/systemd/system/images-oraclelinux.service"
ssh root@$IP systemctl daemon-reload
ssh root@$IP systemctl start images-oraclelinux --no-block
while ! ssh root@$IP systemctl show images-oraclelinux -p SubState --value | grep -E '(failed|exited)'
do
sleep 3m
done
scp root@$IP:/var/log/images-oraclelinux.log .
ssh root@$IP systemctl is-active images-oraclelinux
- name: delete droplet
if: always()
run: doctl compute droplet delete -f ga-out-of-tree-images-oraclelinux-$GITHUB_SHA

View File

@ -1,10 +1,24 @@
name: macOS
on: [push]
on:
workflow_dispatch:
push:
paths-ignore:
- ".github/workflows/images-*"
- ".github/workflows/debian-cache.yml"
- ".github/workflows/e2e.yml"
- "docs/**"
- "README.md"
pull_request:
concurrency:
group: ${{ github.workflow_ref }}
cancel-in-progress: true
jobs:
build:
runs-on: macOS-latest
name: Build
runs-on: macOS-12
steps:
- uses: actions/checkout@v1

23
.github/workflows/scripts/setup.sh vendored Executable file
View File

@ -0,0 +1,23 @@
#!/usr/bin/env bash
set -eu
id
df -h
sudo systemd-run --wait rm -rf \
/usr/share/az* \
/usr/share/dotnet \
/usr/share/gradle* \
/usr/share/miniconda \
/usr/share/swift \
/var/lib/gems \
/var/lib/mysql \
/var/lib/snapd \
/opt/hostedtoolcache/CodeQL \
/opt/hostedtoolcache/Java_Temurin-Hotspot_jdk
sudo fstrim /
df -h

View File

@ -1,12 +1,74 @@
name: Ubuntu
on: [push]
on:
workflow_dispatch:
push:
paths-ignore:
- ".github/workflows/images-*"
- ".github/workflows/e2e.yml"
- ".github/workflows/macos.yml"
- ".github/workflows/debian-cache.yml"
- "docs/**"
- ".readthedocs.yaml"
- "README.md"
pull_request:
concurrency:
group: ${{ github.workflow_ref }}
cancel-in-progress: true
jobs:
build:
name: Build
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- uses: actions/setup-go@v5
- name: Build
run: go build
test-unit:
name: Unit Testing
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- uses: actions/setup-go@v5
- name: Install dependencies for tests
run: |
sudo apt-get update
sudo apt-get install qemu-system-x86
- name: Bootstrap
run: ./tools/qemu-ubuntu-img/bootstrap.sh
- name: Unit Testing
run: |
mkdir ~/.out-of-tree
go test -parallel 1 -v ./...
test-end-to-end-examples:
needs: [build]
name: Examples
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
example: [
{ dir: "kernel-module", params: "" },
{ dir: "kernel-exploit", params: "--threshold=0" },
{ dir: "script", params: "" },
{ dir: "preload", params: "" }
]
steps:
- uses: actions/checkout@v1
- uses: actions/setup-go@v5
- name: Setup
run: .github/workflows/scripts/setup.sh
- name: Build
run: go build
@ -14,10 +76,137 @@ jobs:
- name: Install dependencies for tests
run: |
sudo apt-get update
sudo apt-get install qemu
sudo apt-get install -y qemu-system-x86
echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_22.04/ /' | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:unstable.list
curl -fsSL https://download.opensuse.org/repositories/devel:kubic:libcontainers:unstable/xUbuntu_22.04/Release.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/devel_kubic_libcontainers_unstable.gpg > /dev/null
sudo apt-get update
sudo apt-get install -y podman
- name: Bootstrap
run: ./tools/qemu-debian-img/bootstrap.sh
- name: End-to-End Testing [${{ matrix.example.dir }}]
run: |
cd examples/${{ matrix.example.dir }}
../../out-of-tree --log-level=debug kernel autogen --max=1
../../out-of-tree --log-level=debug pew --qemu-timeout=10m ${{ matrix.example.params }}
- name: Test
run: go test -parallel 1 -v ./...
- name: Archive logs
if: always()
uses: actions/upload-artifact@v4
with:
name: test-end-to-end-examples-${{ matrix.example.dir }}-logs
path: ~/.out-of-tree/logs
test-end-to-end:
needs: [build]
name: E2E
runs-on: ubuntu-latest
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
#type: [ Script, Module ]
type: [ Module ]
os: [
{ distro: Ubuntu, release: 12.04 },
{ distro: Ubuntu, release: 14.04 },
{ distro: Ubuntu, release: 16.04 },
{ distro: Ubuntu, release: 18.04 },
{ distro: Ubuntu, release: 20.04 },
{ distro: Ubuntu, release: 22.04 },
{ distro: CentOS, release: 6 },
{ distro: CentOS, release: 7 },
{ distro: CentOS, release: 8 },
{ distro: OracleLinux, release: 6 },
{ distro: OracleLinux, release: 7 },
{ distro: OracleLinux, release: 8 },
{ distro: OracleLinux, release: 9 },
{ distro: Debian, release: 7 },
{ distro: Debian, release: 8 },
{ distro: Debian, release: 9 },
{ distro: Debian, release: 10 },
{ distro: Debian, release: 11 },
{ distro: Debian, release: 12 },
{ distro: OpenSUSE, release: "12.1" },
{ distro: OpenSUSE, release: "12.2" },
{ distro: OpenSUSE, release: "12.3" },
{ distro: OpenSUSE, release: "13.1" },
{ distro: OpenSUSE, release: "13.2" },
{ distro: OpenSUSE, release: "42.1" },
{ distro: OpenSUSE, release: "42.2" },
{ distro: OpenSUSE, release: "42.3" },
# { distro: OpenSUSE, release: "15.0" },
{ distro: OpenSUSE, release: "15.1" },
{ distro: OpenSUSE, release: "15.2" },
{ distro: OpenSUSE, release: "15.3" },
{ distro: OpenSUSE, release: "15.4" },
{ distro: OpenSUSE, release: "15.5" }
]
steps:
- name: Backup docker files
run: |
echo "backup moby/buildkit image"
sudo docker image save -o ${GITHUB_WORKSPACE}/images.tar moby/buildkit
echo "prune docker"
sudo docker system prune -a -f
echo "back up /var/lib/docker folder structure and other files"
sudo rsync -aPq /var/lib/docker/ ${GITHUB_WORKSPACE}/docker
- name: Maximize build space
uses: easimon/maximize-build-space@master
with:
overprovision-lvm: 'true'
remove-dotnet: 'true'
# instead of using default value to mount to build path,
# /var/lib/docker/ is really the place we need more spaces.
build-mount-path: '/var/lib/docker/'
- name: Restore docker files
run: |
sudo rsync -aPq ${GITHUB_WORKSPACE}/docker/ /var/lib/docker
sudo rm -rf ${GITHUB_WORKSPACE}/docker
sudo ls ${GITHUB_WORKSPACE} -l
sudo docker image load -i ${GITHUB_WORKSPACE}/images.tar
sudo rm ${GITHUB_WORKSPACE}/images.tar
- uses: actions/checkout@v1
- uses: actions/setup-go@v5
- name: Setup
run: .github/workflows/scripts/setup.sh
- name: Build
run: go build
- name: Install dependencies for tests
run: |
sudo apt-get update
sudo apt-get install qemu-system-x86
- name: End-to-End Testing ${{ matrix.type }} [${{ matrix.os.distro }} ${{ matrix.os.release }}]
shell: bash
run: |
mkdir test
cd test
echo 'name = "test"' >> .out-of-tree.toml
echo 'type = "${{ matrix.type }}"' >> .out-of-tree.toml
echo 'script = "script.sh"' >> .out-of-tree.toml
echo '[[targets]]' >> .out-of-tree.toml
echo 'distro = { id = "${{ matrix.os.distro }}", release = "${{ matrix.os.release }}" }' >> .out-of-tree.toml
echo 'kernel = { regex = ".*" }' >> .out-of-tree.toml
echo -e '#!/bin/sh\necho ok' >> script.sh
cp ../examples/kernel-module/{module.c,Makefile,test.sh} .
../out-of-tree --log-level=debug kernel list-remote --distro=${{ matrix.os.distro }} --ver=${{ matrix.os.release }}
../out-of-tree --log-level=debug kernel autogen --max=1 --shuffle
../out-of-tree --log-level=debug pew --qemu-timeout=20m --include-internal-errors
- name: Archive logs
if: always()
uses: actions/upload-artifact@v4
with:
name: test-end-to-end-${{ matrix.type }}-${{ matrix.os.distro }}-${{ matrix.os.release }}-logs
path: ~/.out-of-tree/logs

4
.gitignore vendored
View File

@ -10,3 +10,7 @@
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
out-of-tree
*.cache
result

12
.readthedocs.yaml Normal file
View File

@ -0,0 +1,12 @@
version: 2
build:
os: ubuntu-22.04
tools:
python: latest
sphinx:
configuration: docs/conf.py
formats:
- pdf

View File

@ -1,30 +0,0 @@
language: go
go:
- 1.x
- master
os:
- linux
dist:
- bionic
addons:
apt:
packages:
- qemu
services:
- docker
env:
- GO111MODULE=on
install: true
before_script:
- ./tools/qemu-debian-img/bootstrap.sh
script:
- go test -parallel 1 -v ./...

View File

@ -4,6 +4,105 @@
[Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [2.1.0]
### Added
- Graceful shutdown on ^C while kernels generation.
- Flag to set the container runtime command.
- out-of-tree image --dry-run for printing full qemu command.
### Changed
- No exit at the end of the retries, will continue with the other
kernels.
- All temporary files moved to ~/.out-of-tree/tmp/.
### Fixed
- Discrepancies between /lib/modules and /boot should no longer lead
to fatal errors.
- Podman support on macOS.
## [2.0.0]
### Breaking
- Layers with kernels in containers have been abandoned in favor of
installation to mounted volumes.
- Command line interface has been changed to alecthomas/kong.
### Added
- Command `kernel install` to install specific kernel.
- Command `containers` to manage containers.
- Command `image edit` to edit qemu image.
- Flag `--force` to force reinstallation of the kernel.
- Flag `--artifact-config` to specify the path to .out-of-tree.toml.
- Flag `--no-headers` flag to install kernel and initrd only.
- Flag `--shuffle` to randomize the order of kernels for
installation/testing.
- Support make targets in artifact config.
- Support patches in artifact config.
- Support for copying standard modules to qemu.
- Script artifact type for various automation and information gathering.
- Add TestFiles to artifact config, transfers additional test files to VM.
- Improved logging, with logfile at ~/.out-of-tree/logs/out-of-tree.log
- Kernel installation will retry (10 times by default) in case of
network problems.
- Stdout trace (with --log-level=trace, and always to logfile) for
qemu and container execution.
- Compatibility with Podman.
- Support for Ubuntu 22.04.
## [1.4.0]
### Added
- Parameter `--docker-timeout` may also be set in the artifact
configuration file.
- Preload modules before inserting module or run exploit. Modules can
be specified by git repository path in the `repo` parameter of
section `[[preload]]`. Also, there is a `path` parameter for local
projects. Note that `repo` is using a cache that uses last commit
hash to check is project needs to be rebuilt, so it's not suitable
for local development (except if you will commit each time before
run out-of-tree).
- Flag `--disable-preload` to ignore `[[preload]]` section of
configuration file.
- Now `out-of-tree log dump` will show the last log if no ID
specified.
## [1.3.0] 2020-05-30
### Added
- Support for Ubuntu 20.04 and CentOS 8.
## [1.2.1] 2019-12-25
### Fixed
@ -156,7 +255,7 @@
- Temporary files is moved to `~/.out-of-tree/tmp/` to avoid docker
mounting issues on some systems.
## [0.2.0] - 2019-12-01
## [0.2.0] - 2018-12-01
The main purpose of the release is to simplify installation.
@ -178,7 +277,7 @@ The main purpose of the release is to simplify installation.
- No warning anymore if test.sh is not exists.
## [0.1.0] - 2019-11-20
## [0.1.0] - 2018-11-20
Initial release that was never tagged.

105
README.md
View File

@ -1,100 +1,59 @@
[![Codacy Badge](https://api.codacy.com/project/badge/Grade/aba4aad2046b4d1a9a99cf98e22c018b)](https://app.codacy.com/app/jollheef/out-of-tree?utm_source=github.com&utm_medium=referral&utm_content=jollheef/out-of-tree&utm_campaign=Badge_Grade_Dashboard)
[![Build Status](https://travis-ci.com/jollheef/out-of-tree.svg?branch=master)](https://travis-ci.com/jollheef/out-of-tree)
[![Go Report Card](https://goreportcard.com/badge/code.dumpstack.io/tools/out-of-tree)](https://goreportcard.com/report/code.dumpstack.io/tools/out-of-tree)
[![Ubuntu](https://github.com/out-of-tree/out-of-tree/actions/workflows/ubuntu.yml/badge.svg)](https://github.com/out-of-tree/out-of-tree/actions/workflows/ubuntu.yml)
[![E2E](https://github.com/out-of-tree/out-of-tree/actions/workflows/e2e.yml/badge.svg)](https://github.com/out-of-tree/out-of-tree/actions/workflows/e2e.yml)
[![Documentation Status](https://readthedocs.org/projects/out-of-tree/badge/?version=latest)](https://out-of-tree.readthedocs.io/en/latest/?badge=latest)
[![Donate](https://img.shields.io/badge/donate-paypal-green.svg)](https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=R8W2UQPZ5X5JE&source=url)
[![Donate](https://img.shields.io/badge/donate-bitcoin-green.svg)](https://blockchair.com/bitcoin/address/bc1q23fyuq7kmngrgqgp6yq9hk8a5q460f39m8nv87)
# [out-of-tree](https://out-of-tree.io)
out-of-tree kernel {module, exploit} development tool
*out-of-tree* is the kernel {module, exploit} development tool.
out-of-tree is for automating some routine actions for creating development environments for debugging kernel modules and exploits, generating reliability statistics for exploits, and also provides the ability to easily integrate into CI (Continuous Integration).
*out-of-tree* was created to reduce the complexity of the environment for developing, testing and debugging Linux kernel exploits and out-of-tree kernel modules (hence the name "out-of-tree").
![Screenshot](https://cloudflare-ipfs.com/ipfs/Qmb88fgdDjbWkxz91sWsgmoZZNfVThnCtj37u3mF2s3T3T)
## Requirements
## Installation
[Qemu](https://www.qemu.org), [docker](https://docker.com) and [golang](https://golang.org) is required.
### GNU/Linux (with [Nix](https://nixos.org/nix/))
Also do not forget to set GOPATH and PATH e.g.:
sudo apt install podman || sudo dnf install podman
$ echo 'export GOPATH=$HOME' >> ~/.bashrc
$ echo 'export PATH=$PATH:$HOME/bin' >> ~/.bashrc
$ source ~/.bashrc
curl -L https://nixos.org/nix/install | sh
mkdir -p ~/.config/nix
echo "experimental-features = nix-command flakes" >> ~/.config/nix/nix.conf
### Gentoo
# stable
nix profile install nixpkgs#out-of-tree
# emerge app-emulation/qemu app-emulation/docker dev-lang/go
# latest
nix profile install git+https://code.dumpstack.io/tools/out-of-tree
### macOS
$ brew install go qemu
$ brew cask install docker
Note: case-sensitive FS is required for the ~/.out-of-tree directory.
### Fedora
$ brew install podman
$ podman machine stop || true
$ podman machine rm || true
$ podman machine init --cpus=4 --memory=4096 -v $HOME:$HOME
$ podman machine start
$ brew tap out-of-tree/repo
$ brew install out-of-tree
$ sudo dnf install go qemu moby-engine
Also check out [docker post-installation steps](https://docs.docker.com/install/linux/linux-postinstall/).
## Build from source
$ go get -u code.dumpstack.io/tools/out-of-tree
Then you can check it on kernel module example:
$ cd $GOPATH/src/code.dumpstack.io/tools/out-of-tree/examples/kernel-module
$ out-of-tree kernel autogen # generate kernels based on .out-of-tree.toml
$ out-of-tree pew
Read [documentation](https://out-of-tree.readthedocs.io) for further info.
## Examples
Run by absolute path
Generate all Ubuntu 22.04 kernels:
$ out-of-tree --path /path/to/exploit/directory pew
$ out-of-tree kernel genall --distro=Ubuntu --ver=22.04
Test only with one kernel:
Run tests based on .out-of-tree.toml definitions:
$ out-of-tree pew --kernel='Ubuntu:4.10.0-30-generic'
$ out-of-tree pew
Test with a specific kernel:
$ out-of-tree pew --kernel='Ubuntu:5.4.0-29-generic'
Run debug environment:
$ out-of-tree debug --kernel='Ubuntu:4.10.0-30-generic'
Test binary module/exploit with implicit defined test ($BINARY_test)
$ out-of-tree pew --binary /path/to/exploit
Test binary module/exploit with explicit defined test
$ out-of-tree pew --binary /path/to/exploit --test /path/to/exploit_test
Guess work kernels:
$ out-of-tree pew --guess
Use custom kernels config
$ out-of-tree --kernels /path/to/kernels.toml pew
Generate all kernels
$ out-of-tree kernel genall --distro Ubuntu --ver 16.04
## Troubleshooting
If anything happens that you cannot solve -- just remove `$HOME/.out-of-tree`.
But it'll be better if you'll write the bug report.
## Development
Read [Qemu API](qemu/README.md).
### Generate images
$ cd $GOPATH/src/code.dumpstack.io/tools/out-of-tree/tools/qemu-debian-img/
$ docker run --privileged -v $(pwd):/shared -e IMAGE=/shared/ubuntu1404.img -e RELEASE=trusty -t gen-ubuntu1804-image
$ docker run --privileged -v $(pwd):/shared -e IMAGE=/shared/ubuntu1604.img -e RELEASE=xenial -t gen-ubuntu1804-image
$ out-of-tree debug --kernel='Ubuntu:5.4.0-29-generic'

191
api/api.go Normal file
View File

@ -0,0 +1,191 @@
package api
import (
"bytes"
"encoding/gob"
"errors"
"fmt"
"net"
"reflect"
"time"
"code.dumpstack.io/tools/out-of-tree/artifact"
"code.dumpstack.io/tools/out-of-tree/distro"
"github.com/google/uuid"
)
var ErrInvalid = errors.New("")
type Status string
const (
StatusNew Status = "new"
StatusWaiting Status = "waiting"
StatusRunning Status = "running"
StatusSuccess Status = "success"
StatusFailure Status = "failure"
)
type Command string
const (
RawMode Command = "rawmode"
AddJob Command = "add_job"
ListJobs Command = "list_jobs"
JobLogs Command = "job_logs"
JobStatus Command = "job_status"
AddRepo Command = "add_repo"
ListRepos Command = "list_repos"
Kernels Command = "kernels"
)
type Job struct {
ID int64
UpdatedAt time.Time
// Job UUID
UUID string
// Group UUID
Group string
RepoName string
Commit string
Description string
Artifact artifact.Artifact
Target distro.KernelInfo
Created time.Time
Started time.Time
Finished time.Time
Status Status
}
func (job *Job) GenUUID() {
job.UUID = uuid.New().String()
}
// ListJobsParams is the parameters for ListJobs command
type ListJobsParams struct {
// Group UUID
Group string
// Repo name
Repo string
// Commit hash
Commit string
// Status of the job
Status Status
UpdatedAfter int64
}
type Repo struct {
ID int64
Name string
Path string
}
type JobLog struct {
Name string
Text string
}
type Req struct {
Command Command
Type string
Data []byte
}
func (r *Req) SetData(data any) (err error) {
r.Type = fmt.Sprintf("%v", reflect.TypeOf(data))
var buf bytes.Buffer
err = gob.NewEncoder(&buf).Encode(data)
r.Data = buf.Bytes()
return
}
func (r *Req) GetData(data any) (err error) {
if len(r.Data) == 0 {
return
}
t := fmt.Sprintf("%v", reflect.TypeOf(data))
if r.Type != t {
err = fmt.Errorf("type mismatch (%v != %v)", r.Type, t)
return
}
buf := bytes.NewBuffer(r.Data)
return gob.NewDecoder(buf).Decode(data)
}
func (r *Req) Encode(conn net.Conn) (err error) {
return gob.NewEncoder(conn).Encode(r)
}
func (r *Req) Decode(conn net.Conn) (err error) {
return gob.NewDecoder(conn).Decode(r)
}
type Resp struct {
UUID string
Error string
Err error `json:"-"`
Type string
Data []byte
}
func NewResp() (resp Resp) {
resp.UUID = uuid.New().String()
return
}
func (r *Resp) SetData(data any) (err error) {
r.Type = fmt.Sprintf("%v", reflect.TypeOf(data))
var buf bytes.Buffer
err = gob.NewEncoder(&buf).Encode(data)
r.Data = buf.Bytes()
return
}
func (r *Resp) GetData(data any) (err error) {
if len(r.Data) == 0 {
return
}
t := fmt.Sprintf("%v", reflect.TypeOf(data))
if r.Type != t {
err = fmt.Errorf("type mismatch (%v != %v)", r.Type, t)
return
}
buf := bytes.NewBuffer(r.Data)
return gob.NewDecoder(buf).Decode(data)
}
func (r *Resp) Encode(conn net.Conn) (err error) {
if r.Err != nil && r.Err != ErrInvalid && r.Error == "" {
r.Error = fmt.Sprintf("%v", r.Err)
}
return gob.NewEncoder(conn).Encode(r)
}
func (r *Resp) Decode(conn net.Conn) (err error) {
err = gob.NewDecoder(conn).Decode(r)
r.Err = ErrInvalid
return
}

443
artifact/artifact.go Normal file
View File

@ -0,0 +1,443 @@
package artifact
import (
"errors"
"fmt"
"io"
"os"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/naoina/toml"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
"code.dumpstack.io/tools/out-of-tree/distro"
"code.dumpstack.io/tools/out-of-tree/qemu"
)
type Kernel struct {
// TODO
// Version string
// From string
// To string
// prev. ReleaseMask
Regex string
ExcludeRegex string
}
// Target defines the kernel
type Target struct {
Distro distro.Distro
Kernel Kernel
}
// DockerName is returns stable name for docker container
func (km Target) DockerName() string {
distro := strings.ToLower(km.Distro.ID.String())
release := strings.Replace(km.Distro.Release, ".", "__", -1)
return fmt.Sprintf("out_of_tree_%s_%s", distro, release)
}
// ArtifactType is the kernel module or exploit
type ArtifactType int
const (
// KernelModule is any kind of kernel module
KernelModule ArtifactType = iota
// KernelExploit is the privilege escalation exploit
KernelExploit
// Script for information gathering or automation
Script
)
func (at ArtifactType) String() string {
return [...]string{"module", "exploit", "script"}[at]
}
// UnmarshalTOML is for support github.com/naoina/toml
func (at *ArtifactType) UnmarshalTOML(data []byte) (err error) {
stype := strings.Trim(string(data), `"`)
stypelower := strings.ToLower(stype)
if strings.Contains(stypelower, "module") {
*at = KernelModule
} else if strings.Contains(stypelower, "exploit") {
*at = KernelExploit
} else if strings.Contains(stypelower, "script") {
*at = Script
} else {
err = fmt.Errorf("type %s is unsupported", stype)
}
return
}
// MarshalTOML is for support github.com/naoina/toml
func (at ArtifactType) MarshalTOML() (data []byte, err error) {
s := ""
switch at {
case KernelModule:
s = "module"
case KernelExploit:
s = "exploit"
case Script:
s = "script"
default:
err = fmt.Errorf("cannot marshal %d", at)
}
data = []byte(`"` + s + `"`)
return
}
// Duration type with toml unmarshalling support
type Duration struct {
time.Duration
}
// UnmarshalTOML for Duration
func (d *Duration) UnmarshalTOML(data []byte) (err error) {
duration := strings.Replace(string(data), "\"", "", -1)
d.Duration, err = time.ParseDuration(duration)
return
}
// MarshalTOML for Duration
func (d Duration) MarshalTOML() (data []byte, err error) {
data = []byte(`"` + d.Duration.String() + `"`)
return
}
type PreloadModule struct {
Repo string
Path string
TimeoutAfterLoad Duration
}
// Extra test files to copy over
type FileTransfer struct {
User string
Local string
Remote string
}
type Patch struct {
Path string
Source string
Script string
}
// Artifact is for .out-of-tree.toml
type Artifact struct {
Name string
Type ArtifactType
SourcePath string
SourceFiles []string
TestFiles []FileTransfer
Targets []Target
Script string
Qemu struct {
Cpus int
Memory int
Timeout Duration
AfterStartTimeout Duration
}
Docker struct {
Timeout Duration
}
Mitigations struct {
DisableSmep bool
DisableSmap bool
DisableKaslr bool
DisableKpti bool
}
Patches []Patch
Make struct {
Target string
}
StandardModules bool
Preload []PreloadModule
}
// Read is for read .out-of-tree.toml
func (Artifact) Read(path string) (ka Artifact, err error) {
f, err := os.Open(path)
if err != nil {
return
}
defer f.Close()
buf, err := io.ReadAll(f)
if err != nil {
return
}
err = toml.Unmarshal(buf, &ka)
if len(strings.Fields(ka.Name)) != 1 {
err = errors.New("artifact name should not contain spaces")
}
return
}
func (ka Artifact) checkSupport(ki distro.KernelInfo, target Target) (
supported bool, err error) {
if target.Distro.Release == "" {
if ki.Distro.ID != target.Distro.ID {
return
}
} else {
if !ki.Distro.Equal(target.Distro) {
return
}
}
r, err := regexp.Compile(target.Kernel.Regex)
if err != nil {
return
}
exr, err := regexp.Compile(target.Kernel.ExcludeRegex)
if err != nil {
return
}
if !r.MatchString(ki.KernelRelease) {
return
}
if target.Kernel.ExcludeRegex != "" && exr.MatchString(ki.KernelRelease) {
return
}
supported = true
return
}
// Supported returns true if given kernel is supported by artifact
func (ka Artifact) Supported(ki distro.KernelInfo) (supported bool, err error) {
for _, km := range ka.Targets {
supported, err = ka.checkSupport(ki, km)
if supported {
break
}
}
return
}
func (ka Artifact) Process(slog zerolog.Logger, ki distro.KernelInfo,
endless bool, cBinary,
cEndlessStress string, cEndlessTimeout time.Duration,
dump func(q *qemu.System, ka Artifact, ki distro.KernelInfo,
result *Result)) {
slog.Info().Msg("start")
testStart := time.Now()
defer func() {
slog.Debug().Str("test_duration",
time.Since(testStart).String()).
Msg("")
}()
kernel := qemu.Kernel{KernelPath: ki.KernelPath, InitrdPath: ki.InitrdPath}
q, err := qemu.NewSystem(qemu.X86x64, kernel, ki.RootFS)
if err != nil {
slog.Error().Err(err).Msg("qemu init")
return
}
q.Log = slog
if ka.Qemu.Timeout.Duration != 0 {
q.Timeout = ka.Qemu.Timeout.Duration
}
if ka.Qemu.Cpus != 0 {
q.Cpus = ka.Qemu.Cpus
}
if ka.Qemu.Memory != 0 {
q.Memory = ka.Qemu.Memory
}
q.SetKASLR(!ka.Mitigations.DisableKaslr)
q.SetSMEP(!ka.Mitigations.DisableSmep)
q.SetSMAP(!ka.Mitigations.DisableSmap)
q.SetKPTI(!ka.Mitigations.DisableKpti)
if ki.CPU.Model != "" {
q.CPU.Model = ki.CPU.Model
}
if len(ki.CPU.Flags) != 0 {
q.CPU.Flags = ki.CPU.Flags
}
if endless {
q.Timeout = 0
}
qemuStart := time.Now()
slog.Debug().Msgf("qemu start %v", qemuStart)
err = q.Start()
if err != nil {
slog.Error().Err(err).Msg("qemu start")
return
}
defer q.Stop()
slog.Debug().Msgf("wait %v", ka.Qemu.AfterStartTimeout)
time.Sleep(ka.Qemu.AfterStartTimeout.Duration)
go func() {
time.Sleep(time.Minute)
for !q.Died {
slog.Debug().Msg("still alive")
time.Sleep(time.Minute)
}
}()
tmp, err := os.MkdirTemp(dotfiles.Dir("tmp"), "")
if err != nil {
slog.Error().Err(err).Msg("making tmp directory")
return
}
defer os.RemoveAll(tmp)
result := Result{}
if !endless {
defer dump(q, ka, ki, &result)
}
var cTest string
if ka.Type == Script {
result.BuildDir = ka.SourcePath
result.Build.Ok = true
ka.Script = filepath.Join(ka.SourcePath, ka.Script)
cTest = ka.Script
} else if cBinary == "" {
// TODO: build should return structure
start := time.Now()
result.BuildDir, result.BuildArtifact, result.Build.Output, err =
Build(slog, tmp, ka, ki, ka.Docker.Timeout.Duration)
slog.Debug().Str("duration", time.Since(start).String()).
Msg("build done")
if err != nil {
log.Error().Err(err).Msg("build")
return
}
result.Build.Ok = true
} else {
result.BuildArtifact = cBinary
result.Build.Ok = true
}
if cTest == "" {
cTest = result.BuildArtifact + "_test"
if _, err := os.Stat(cTest); err != nil {
slog.Debug().Msgf("%s does not exist", cTest)
cTest = tmp + "/source/" + "test.sh"
} else {
slog.Debug().Msgf("%s exist", cTest)
}
}
if ka.Qemu.Timeout.Duration == 0 {
ka.Qemu.Timeout.Duration = time.Minute
}
err = q.WaitForSSH(ka.Qemu.Timeout.Duration)
if err != nil {
result.InternalError = err
return
}
slog.Debug().Str("qemu_startup_duration",
time.Since(qemuStart).String()).
Msg("ssh is available")
remoteTest, err := copyTest(q, cTest, ka)
if err != nil {
result.InternalError = err
slog.Error().Err(err).Msg("copy test script")
return
}
if ka.StandardModules {
// Module depends on one of the standard modules
start := time.Now()
err = CopyStandardModules(q, ki)
if err != nil {
result.InternalError = err
slog.Error().Err(err).Msg("copy standard modules")
return
}
slog.Debug().Str("duration", time.Since(start).String()).
Msg("copy standard modules")
}
err = PreloadModules(q, ka, ki, ka.Docker.Timeout.Duration)
if err != nil {
result.InternalError = err
slog.Error().Err(err).Msg("preload modules")
return
}
start := time.Now()
copyArtifactAndTest(slog, q, ka, &result, remoteTest)
slog.Debug().Str("duration", time.Since(start).String()).
Msgf("test completed (success: %v)", result.Test.Ok)
if !endless {
return
}
dump(q, ka, ki, &result)
if !result.Build.Ok || !result.Run.Ok || !result.Test.Ok {
return
}
slog.Info().Msg("start endless tests")
if cEndlessStress != "" {
slog.Debug().Msg("copy and run endless stress script")
err = q.CopyAndRunAsync("root", cEndlessStress)
if err != nil {
q.Stop()
//f.Sync()
slog.Fatal().Err(err).Msg("cannot copy/run stress")
return
}
}
for {
output, err := q.Command("root", remoteTest)
if err != nil {
q.Stop()
//f.Sync()
slog.Fatal().Err(err).Msg(output)
return
}
slog.Debug().Msg(output)
slog.Info().Msg("test success")
slog.Debug().Msgf("wait %v", cEndlessTimeout)
time.Sleep(cEndlessTimeout)
}
}

36
artifact/artifact_test.go Normal file
View File

@ -0,0 +1,36 @@
package artifact
import (
"testing"
"code.dumpstack.io/tools/out-of-tree/distro"
"github.com/naoina/toml"
)
func TestMarshalUnmarshal(t *testing.T) {
artifactCfg := Artifact{
Name: "Put name here",
Type: KernelModule,
}
artifactCfg.Targets = append(artifactCfg.Targets,
Target{
Distro: distro.Distro{
ID: distro.Ubuntu,
Release: "18.04",
},
Kernel: Kernel{
Regex: ".*",
},
})
buf, err := toml.Marshal(&artifactCfg)
if err != nil {
t.Fatal(err)
}
var artifactCfgNew Artifact
err = toml.Unmarshal(buf, &artifactCfgNew)
if err != nil {
t.Fatal(err)
}
}

175
artifact/preload.go Normal file
View File

@ -0,0 +1,175 @@
// Copyright 2020 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package artifact
import (
"crypto/sha1"
"encoding/hex"
"errors"
"os"
"path/filepath"
"time"
"github.com/go-git/go-git/v5"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
"code.dumpstack.io/tools/out-of-tree/distro"
"code.dumpstack.io/tools/out-of-tree/qemu"
)
func PreloadModules(q *qemu.System, ka Artifact, ki distro.KernelInfo,
dockerTimeout time.Duration) (err error) {
for _, pm := range ka.Preload {
err = preload(q, ki, pm, dockerTimeout)
if err != nil {
return
}
}
return
}
func preload(q *qemu.System, ki distro.KernelInfo, pm PreloadModule,
dockerTimeout time.Duration) (err error) {
var workPath, cache string
if pm.Path != "" {
log.Print("Use non-git path for preload module (no cache)")
workPath = pm.Path
} else if pm.Repo != "" {
workPath, cache, err = cloneOrPull(pm.Repo, ki)
if err != nil {
return
}
} else {
err = errors.New("no repo/path in preload entry")
return
}
err = buildAndInsmod(workPath, q, ki, dockerTimeout, cache)
if err != nil {
return
}
time.Sleep(pm.TimeoutAfterLoad.Duration)
return
}
func buildAndInsmod(workPath string, q *qemu.System, ki distro.KernelInfo,
dockerTimeout time.Duration, cache string) (err error) {
tmp, err := tempDir()
if err != nil {
return
}
defer os.RemoveAll(tmp)
var af string
if pathExists(cache) {
af = cache
} else {
af, err = buildPreload(workPath, tmp, ki, dockerTimeout)
if err != nil {
return
}
if cache != "" {
err = CopyFile(af, cache)
if err != nil {
return
}
}
}
output, err := q.CopyAndInsmod(af)
if err != nil {
log.Print(output)
return
}
return
}
func buildPreload(workPath, tmp string, ki distro.KernelInfo,
dockerTimeout time.Duration) (af string, err error) {
ka, err := Artifact{}.Read(workPath + "/.out-of-tree.toml")
if err != nil {
log.Warn().Err(err).Msg("preload")
}
ka.SourcePath = workPath
km := Target{
Distro: ki.Distro,
Kernel: Kernel{Regex: ki.KernelRelease},
}
ka.Targets = []Target{km}
if ka.Docker.Timeout.Duration != 0 {
dockerTimeout = ka.Docker.Timeout.Duration
}
_, af, _, err = Build(log.Logger, tmp, ka, ki, dockerTimeout)
return
}
func pathExists(path string) bool {
if _, err := os.Stat(path); err != nil {
return false
}
return true
}
func tempDir() (string, error) {
return os.MkdirTemp(dotfiles.Dir("tmp"), "")
}
func cloneOrPull(repo string, ki distro.KernelInfo) (workPath, cache string,
err error) {
base := dotfiles.Dir("preload")
workPath = filepath.Join(base, "/repos/", sha1sum(repo))
var r *git.Repository
if pathExists(workPath) {
r, err = git.PlainOpen(workPath)
if err != nil {
return
}
var w *git.Worktree
w, err = r.Worktree()
if err != nil {
return
}
err = w.Pull(&git.PullOptions{})
if err != nil && err != git.NoErrAlreadyUpToDate {
log.Print(repo, "pull error:", err)
}
} else {
r, err = git.PlainClone(workPath, false, &git.CloneOptions{URL: repo})
if err != nil {
return
}
}
ref, err := r.Head()
if err != nil {
return
}
cachedir := filepath.Join(base, "/cache/")
os.MkdirAll(cachedir, 0700)
filename := sha1sum(repo + ki.KernelPath + ref.Hash().String())
cache = filepath.Join(cachedir, filename)
return
}
func sha1sum(data string) string {
h := sha1.Sum([]byte(data))
return hex.EncodeToString(h[:])
}

411
artifact/process.go Normal file
View File

@ -0,0 +1,411 @@
package artifact
import (
"bufio"
"errors"
"fmt"
"io"
"io/fs"
"math/rand"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/otiai10/copy"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/container"
"code.dumpstack.io/tools/out-of-tree/distro"
"code.dumpstack.io/tools/out-of-tree/qemu"
)
func sh(workdir, command string) (output string, err error) {
flog := log.With().
Str("workdir", workdir).
Str("command", command).
Logger()
cmd := exec.Command("sh", "-c", "cd "+workdir+" && "+command)
flog.Debug().Msgf("%v", cmd)
stdout, err := cmd.StdoutPipe()
if err != nil {
return
}
cmd.Stderr = cmd.Stdout
err = cmd.Start()
if err != nil {
return
}
go func() {
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
m := scanner.Text()
output += m + "\n"
flog.Trace().Str("stdout", m).Msg("")
}
}()
err = cmd.Wait()
if err != nil {
err = fmt.Errorf("%v %v output: %v", cmd, err, output)
}
return
}
func applyPatches(src string, ka Artifact) (err error) {
for i, patch := range ka.Patches {
name := fmt.Sprintf("patch_%02d", i)
path := src + "/" + name + ".diff"
if patch.Source != "" && patch.Path != "" {
err = errors.New("path and source are mutually exclusive")
return
} else if patch.Source != "" {
err = os.WriteFile(path, []byte(patch.Source), 0644)
if err != nil {
return
}
} else if patch.Path != "" {
err = copy.Copy(patch.Path, path)
if err != nil {
return
}
}
if patch.Source != "" || patch.Path != "" {
_, err = sh(src, "patch < "+path)
if err != nil {
return
}
}
if patch.Script != "" {
script := src + "/" + name + ".sh"
err = os.WriteFile(script, []byte(patch.Script), 0755)
if err != nil {
return
}
_, err = sh(src, script)
if err != nil {
return
}
}
}
return
}
func Build(flog zerolog.Logger, tmp string, ka Artifact,
ki distro.KernelInfo, dockerTimeout time.Duration) (
outdir, outpath, output string, err error) {
target := strings.Replace(ka.Name, " ", "_", -1)
if target == "" {
target = fmt.Sprintf("%d", rand.Int())
}
outdir = tmp + "/source"
if len(ka.SourceFiles) == 0 {
err = copy.Copy(ka.SourcePath, outdir)
} else {
err = CopyFiles(ka.SourcePath, ka.SourceFiles, outdir)
}
if err != nil {
return
}
err = applyPatches(outdir, ka)
if err != nil {
return
}
outpath = outdir + "/" + target
if ka.Type == KernelModule {
outpath += ".ko"
}
if ki.KernelVersion == "" {
ki.KernelVersion = ki.KernelRelease
}
kernel := "/lib/modules/" + ki.KernelVersion + "/build"
if ki.KernelSource != "" {
kernel = ki.KernelSource
}
buildCommand := "make KERNEL=" + kernel + " TARGET=" + target
if ka.Make.Target != "" {
buildCommand += " " + ka.Make.Target
}
if ki.ContainerName != "" {
var c container.Container
container.Timeout = dockerTimeout
c, err = container.NewFromKernelInfo(ki)
c.Log = flog
if err != nil {
log.Fatal().Err(err).Msg("container creation failure")
}
output, err = c.Run(outdir, []string{
buildCommand + " && chmod -R 777 /work",
})
} else {
cmd := exec.Command("bash", "-c", "cd "+outdir+" && "+
buildCommand)
log.Debug().Msgf("%v", cmd)
timer := time.AfterFunc(dockerTimeout, func() {
cmd.Process.Kill()
})
defer timer.Stop()
var raw []byte
raw, err = cmd.CombinedOutput()
if err != nil {
e := fmt.Sprintf("error `%v` for cmd `%v` with output `%v`",
err, buildCommand, string(raw))
err = errors.New(e)
return
}
output = string(raw)
}
return
}
func runScript(q *qemu.System, script string) (output string, err error) {
return q.Command("root", script)
}
func testKernelModule(q *qemu.System, ka Artifact,
test string) (output string, err error) {
output, err = q.Command("root", test)
// TODO generic checks for WARNING's and so on
return
}
func testKernelExploit(q *qemu.System, ka Artifact,
test, exploit string) (output string, err error) {
output, err = q.Command("user", "chmod +x "+exploit)
if err != nil {
return
}
randFilePath := fmt.Sprintf("/root/%d", rand.Int())
cmd := fmt.Sprintf("%s %s %s", test, exploit, randFilePath)
output, err = q.Command("user", cmd)
if err != nil {
return
}
_, err = q.Command("root", "stat "+randFilePath)
if err != nil {
return
}
return
}
type Result struct {
BuildDir string
BuildArtifact string
Build, Run, Test struct {
Output string
Ok bool
}
InternalError error
InternalErrorString string
}
func CopyFiles(path string, files []string, dest string) (err error) {
err = os.MkdirAll(dest, os.ModePerm)
if err != nil {
return
}
for _, sf := range files {
if sf[0] == '/' {
err = CopyFile(sf, filepath.Join(dest, filepath.Base(sf)))
if err != nil {
return
}
continue
}
err = os.MkdirAll(filepath.Join(dest, filepath.Dir(sf)), os.ModePerm)
if err != nil {
return
}
err = CopyFile(filepath.Join(path, sf), filepath.Join(dest, sf))
if err != nil {
return
}
}
return
}
func CopyFile(sourcePath, destinationPath string) (err error) {
sourceFile, err := os.Open(sourcePath)
if err != nil {
return
}
defer sourceFile.Close()
destinationFile, err := os.Create(destinationPath)
if err != nil {
return err
}
if _, err := io.Copy(destinationFile, sourceFile); err != nil {
destinationFile.Close()
return err
}
return destinationFile.Close()
}
func copyArtifactAndTest(slog zerolog.Logger, q *qemu.System, ka Artifact,
res *Result, remoteTest string) (err error) {
// Copy all test files to the remote machine
for _, f := range ka.TestFiles {
if f.Local[0] != '/' {
if res.BuildDir != "" {
f.Local = res.BuildDir + "/" + f.Local
}
}
err = q.CopyFile(f.User, f.Local, f.Remote)
if err != nil {
res.InternalError = err
slog.Error().Err(err).Msg("copy test file")
return
}
}
switch ka.Type {
case KernelModule:
res.Run.Output, err = q.CopyAndInsmod(res.BuildArtifact)
if err != nil {
slog.Error().Err(err).Msg(res.Run.Output)
// TODO errors.As
if strings.Contains(err.Error(), "connection refused") {
res.InternalError = err
}
return
}
res.Run.Ok = true
res.Test.Output, err = testKernelModule(q, ka, remoteTest)
if err != nil {
slog.Error().Err(err).Msg(res.Test.Output)
return
}
res.Test.Ok = true
case KernelExploit:
remoteExploit := fmt.Sprintf("/tmp/exploit_%d", rand.Int())
err = q.CopyFile("user", res.BuildArtifact, remoteExploit)
if err != nil {
return
}
res.Test.Output, err = testKernelExploit(q, ka, remoteTest,
remoteExploit)
if err != nil {
slog.Error().Err(err).Msg(res.Test.Output)
return
}
res.Run.Ok = true // does not really used
res.Test.Ok = true
case Script:
res.Test.Output, err = runScript(q, remoteTest)
if err != nil {
slog.Error().Err(err).Msg(res.Test.Output)
return
}
slog.Info().Msgf("\n%v\n", res.Test.Output)
res.Run.Ok = true
res.Test.Ok = true
default:
slog.Fatal().Msg("Unsupported artifact type")
}
_, err = q.Command("root", "echo")
if err != nil {
slog.Error().Err(err).Msg("after-test ssh reconnect")
res.Test.Ok = false
return
}
return
}
func copyTest(q *qemu.System, testPath string, ka Artifact) (
remoteTest string, err error) {
remoteTest = fmt.Sprintf("/tmp/test_%d", rand.Int())
err = q.CopyFile("user", testPath, remoteTest)
if err != nil {
if ka.Type == KernelExploit {
q.Command("user",
"echo -e '#!/bin/sh\necho touch $2 | $1' "+
"> "+remoteTest+
" && chmod +x "+remoteTest)
} else {
q.Command("user", "echo '#!/bin/sh' "+
"> "+remoteTest+" && chmod +x "+remoteTest)
}
}
_, err = q.Command("root", "chmod +x "+remoteTest)
return
}
func CopyStandardModules(q *qemu.System, ki distro.KernelInfo) (err error) {
_, err = q.Command("root", "mkdir -p /lib/modules/"+ki.KernelVersion)
if err != nil {
return
}
remotePath := "/lib/modules/" + ki.KernelVersion + "/"
err = q.CopyDirectory("root", ki.ModulesPath+"/kernel", remotePath+"/kernel")
if err != nil {
return
}
files, err := os.ReadDir(ki.ModulesPath)
if err != nil {
return
}
for _, de := range files {
var fi fs.FileInfo
fi, err = de.Info()
if err != nil {
continue
}
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
continue
}
if !strings.HasPrefix(fi.Name(), "modules") {
continue
}
err = q.CopyFile("root", ki.ModulesPath+"/"+fi.Name(), remotePath)
}
return
}

125
cache/cache.go vendored Normal file
View File

@ -0,0 +1,125 @@
// Copyright 2023 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package cache
import (
"fmt"
"net/http"
"net/url"
"os"
"os/exec"
"path/filepath"
"github.com/cavaliergopher/grab/v3"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/distro"
"code.dumpstack.io/tools/out-of-tree/fs"
)
var URL = "https://out-of-tree.fra1.digitaloceanspaces.com/1.0.0/"
func unpackTar(archive, destination string) (err error) {
// NOTE: If you're change anything in tar command please check also
// BSD tar (or if you're using macOS, do not forget to check GNU Tar)
// Also make sure that sparse files are extracting correctly
cmd := exec.Command("tar", "-Sxf", archive)
cmd.Dir = destination + "/"
log.Debug().Msgf("%v", cmd)
rawOutput, err := cmd.CombinedOutput()
if err != nil {
err = fmt.Errorf("%v: %s", err, rawOutput)
return
}
return
}
func DownloadRootFS(path, file string) (err error) {
tmp, err := fs.TempDir()
if err != nil {
return
}
defer os.RemoveAll(tmp)
fileurl, err := url.JoinPath(URL, file+".tar.gz")
if err != nil {
return
}
log.Debug().Msgf("download qemu image from %s", fileurl)
resp, err := grab.Get(tmp, fileurl)
if err != nil {
err = fmt.Errorf("cannot download %s. It looks like you need "+
"to generate it manually and place it "+
"to ~/.out-of-tree/images/; "+
"check documentation for additional information",
fileurl)
return
}
err = unpackTar(resp.Filename, path)
if err != nil {
return
}
return os.Remove(resp.Filename)
}
func DownloadDebianCache(cachePath string) (err error) {
tmp, err := fs.TempDir()
if err != nil {
return
}
defer os.RemoveAll(tmp)
file := filepath.Base(cachePath)
fileurl, err := url.JoinPath(URL, file)
if err != nil {
return
}
log.Debug().Msgf("download debian cache from %s", fileurl)
resp, err := grab.Get(tmp, fileurl)
if err != nil {
return
}
return os.Rename(resp.Filename, cachePath)
}
func PackageURL(dt distro.ID, orig string) (found bool, fileurl string) {
if dt != distro.Debian {
return
}
filename := filepath.Base(orig)
fileurl, err := url.JoinPath(URL, "packages/debian", filename)
if err != nil {
return
}
resp, err := http.Head(fileurl)
if err != nil {
return
}
if resp.StatusCode != http.StatusOK {
return
}
found = true
return
}
func ContainerURL(name string) (path string) {
path, _ = url.JoinPath(URL, "containers", fmt.Sprintf("%s.tar.gz", name))
return
}

49
cache/cache_test.go vendored Normal file
View File

@ -0,0 +1,49 @@
package cache
import (
"os"
"path/filepath"
"testing"
"code.dumpstack.io/tools/out-of-tree/fs"
)
func TestDownloadRootFS(t *testing.T) {
tmp, err := os.MkdirTemp("", "out-of-tree_")
if err != nil {
return
}
defer os.RemoveAll(tmp)
file := "out_of_tree_ubuntu_12__04.img"
err = DownloadRootFS(tmp, file)
if err != nil {
t.Fatal(err)
}
if !fs.PathExists(filepath.Join(tmp, file)) {
t.Fatalf("%s does not exist", file)
}
}
func TestDownloadDebianCache(t *testing.T) {
tmp, err := fs.TempDir()
if err != nil {
return
}
defer os.RemoveAll(tmp)
file := "debian.cache"
cachePath := filepath.Join(tmp, file)
err = DownloadDebianCache(cachePath)
if err != nil {
t.Fatal(err)
}
if !fs.PathExists(filepath.Join(tmp, file)) {
t.Fatalf("%s does not exist", file)
}
}

262
client/client.go Normal file
View File

@ -0,0 +1,262 @@
package client
import (
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"io"
"net"
"os"
"os/exec"
"strconv"
"sync"
"github.com/davecgh/go-spew/spew"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/api"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
"code.dumpstack.io/tools/out-of-tree/distro"
"code.dumpstack.io/tools/out-of-tree/fs"
"code.dumpstack.io/tools/out-of-tree/qemu"
)
type Client struct {
RemoteAddr string
}
func (c Client) client() *tls.Conn {
if !fs.PathExists(dotfiles.File("daemon/cert.pem")) {
log.Fatal().Msgf("no {cert,key}.pem at %s",
dotfiles.Dir("daemon"))
}
cert, err := tls.LoadX509KeyPair(
dotfiles.File("daemon/cert.pem"),
dotfiles.File("daemon/key.pem"))
if err != nil {
log.Fatal().Err(err).Msg("")
}
cacert, err := os.ReadFile(dotfiles.File("daemon/cert.pem"))
if err != nil {
log.Fatal().Err(err).Msg("")
}
certpool := x509.NewCertPool()
certpool.AppendCertsFromPEM(cacert)
tlscfg := &tls.Config{
RootCAs: certpool,
Certificates: []tls.Certificate{cert},
}
conn, err := tls.Dial("tcp", c.RemoteAddr, tlscfg)
if err != nil {
log.Fatal().Err(err).Msg("")
}
return conn // conn.Close()
}
func (c Client) request(cmd api.Command, data any) (resp api.Resp, err error) {
req := api.Req{Command: cmd}
if data != nil {
req.SetData(data)
}
conn := c.client()
defer conn.Close()
req.Encode(conn)
err = resp.Decode(conn)
if err != nil {
log.Fatal().Err(err).Msgf("request %v", req)
}
log.Debug().Msgf("resp: %v", resp)
if resp.Error != "" {
err = errors.New(resp.Error)
log.Fatal().Err(err).Msg("")
}
return
}
func (c Client) Jobs(params api.ListJobsParams) (jobs []api.Job, err error) {
resp, _ := c.request(api.ListJobs, &params)
err = resp.GetData(&jobs)
if err != nil {
log.Error().Err(err).Msg("")
}
return
}
func (c Client) AddJob(job api.Job) (uuid string, err error) {
resp, err := c.request(api.AddJob, &job)
if err != nil {
return
}
err = resp.GetData(&uuid)
return
}
func (c Client) Repos() (repos []api.Repo, err error) {
resp, _ := c.request(api.ListRepos, nil)
log.Debug().Msgf("resp: %v", spew.Sdump(resp))
err = resp.GetData(&repos)
if err != nil {
log.Error().Err(err).Msg("")
}
return
}
type logWriter struct {
tag string
}
func (lw logWriter) Write(p []byte) (n int, err error) {
n = len(p)
log.Trace().Str("tag", lw.tag).Msgf("%v", strconv.Quote(string(p)))
return
}
func (c Client) handler(cConn net.Conn) {
defer cConn.Close()
dConn := c.client()
defer dConn.Close()
req := api.Req{Command: api.RawMode}
req.Encode(dConn)
go io.Copy(cConn, io.TeeReader(dConn, logWriter{"recv"}))
io.Copy(dConn, io.TeeReader(cConn, logWriter{"send"}))
}
var ErrRepoNotFound = errors.New("repo not found")
// GetRepo virtual API call
func (c Client) GetRepo(name string) (repo api.Repo, err error) {
// TODO add API call
repos, err := c.Repos()
if err != nil {
return
}
for _, r := range repos {
if r.Name == name {
repo = r
return
}
}
err = ErrRepoNotFound
return
}
func (c Client) GitProxy(addr string, ready *sync.Mutex) {
l, err := net.Listen("tcp", addr)
if err != nil {
log.Fatal().Err(err).Msg("git proxy listen")
}
defer l.Close()
log.Debug().Msgf("git proxy listen on %v", addr)
for {
ready.Unlock()
conn, err := l.Accept()
if err != nil {
log.Fatal().Err(err).Msg("accept")
}
log.Debug().Msgf("git proxy accept %s", conn.RemoteAddr())
go c.handler(conn)
}
}
func (c Client) PushRepo(repo api.Repo) (err error) {
addr := qemu.GetFreeAddrPort()
ready := &sync.Mutex{}
ready.Lock()
go c.GitProxy(addr, ready)
ready.Lock()
remote := fmt.Sprintf("git://%s/%s", addr, repo.Name)
log.Debug().Msgf("git proxy remote: %v", remote)
raw, err := exec.Command("git", "--work-tree", repo.Path, "push", "--force", remote).
CombinedOutput()
if err != nil {
return
}
log.Info().Msgf("push repo %v\n%v", repo, string(raw))
return
}
func (c Client) AddRepo(repo api.Repo) (err error) {
_, err = c.request(api.AddRepo, &repo)
if err != nil {
return
}
log.Info().Msgf("add repo %v", repo)
return
}
func (c Client) Kernels() (kernels []distro.KernelInfo, err error) {
resp, err := c.request(api.Kernels, nil)
if err != nil {
return
}
err = resp.GetData(&kernels)
if err != nil {
log.Error().Err(err).Msg("")
}
log.Info().Msgf("got %d kernels", len(kernels))
return
}
func (c Client) JobStatus(uuid string) (st api.Status, err error) {
resp, err := c.request(api.JobStatus, &uuid)
if err != nil {
return
}
err = resp.GetData(&st)
if err != nil {
log.Error().Err(err).Msg("")
}
return
}
func (c Client) JobLogs(uuid string) (logs []api.JobLog, err error) {
resp, err := c.request(api.JobLogs, &uuid)
if err != nil {
return
}
err = resp.GetData(&logs)
if err != nil {
log.Error().Err(err).Msg("")
}
return
}

94
cmd/container.go Normal file
View File

@ -0,0 +1,94 @@
// Copyright 2023 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package cmd
import (
"fmt"
"os/exec"
"path/filepath"
"strings"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/container"
)
type ContainerCmd struct {
Filter string `help:"filter by name"`
List ContainerListCmd `cmd:"" help:"list containers"`
Save ContainerSaveCmd `cmd:"" help:"save containers"`
Cleanup ContainerCleanupCmd `cmd:"" help:"cleanup containers"`
}
func (cmd ContainerCmd) Containers() (names []string) {
images, err := container.Images()
if err != nil {
log.Fatal().Err(err).Msg("")
}
for _, img := range images {
if cmd.Filter != "" && !strings.Contains(img.Name, cmd.Filter) {
continue
}
names = append(names, img.Name)
}
return
}
type ContainerListCmd struct{}
func (cmd ContainerListCmd) Run(containerCmd *ContainerCmd) (err error) {
for _, name := range containerCmd.Containers() {
fmt.Println(name)
}
return
}
type ContainerSaveCmd struct {
OutDir string `help:"directory to save containers" default:"./" type:"existingdir"`
}
func (cmd ContainerSaveCmd) Run(containerCmd *ContainerCmd) (err error) {
for _, name := range containerCmd.Containers() {
nlog := log.With().Str("name", name).Logger()
output := filepath.Join(cmd.OutDir, name+".tar")
nlog.Info().Msgf("saving to %v", output)
err = container.Save(name, output)
if err != nil {
return
}
compressed := output + ".gz"
nlog.Info().Msgf("compressing to %v", compressed)
var raw []byte
raw, err = exec.Command("gzip", output).CombinedOutput()
if err != nil {
nlog.Error().Err(err).Msg(string(raw))
return
}
nlog.Info().Msg("done")
}
return
}
type ContainerCleanupCmd struct{}
func (cmd ContainerCleanupCmd) Run(containerCmd *ContainerCmd) (err error) {
var output []byte
for _, name := range containerCmd.Containers() {
output, err = exec.Command(container.Runtime, "image", "rm", name).
CombinedOutput()
if err != nil {
log.Error().Err(err).Str("output", string(output)).Msg("")
return
}
}
return
}

123
cmd/daemon.go Normal file
View File

@ -0,0 +1,123 @@
// Copyright 2024 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package cmd
import (
"encoding/json"
"fmt"
"time"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/api"
"code.dumpstack.io/tools/out-of-tree/client"
)
type daemonCmd struct {
Addr string `default:":63527"`
Job DaemonJobCmd `cmd:"" aliases:"jobs" help:"manage jobs"`
Repo DaemonRepoCmd `cmd:"" aliases:"repos" help:"manage repositories"`
}
type DaemonJobCmd struct {
List DaemonJobsListCmd `cmd:"" help:"list jobs"`
Status DaemonJobsStatusCmd `cmd:"" help:"show job status"`
Log DaemonJobsLogsCmd `cmd:"" help:"job logs"`
}
type DaemonJobsListCmd struct {
Group string `help:"group uuid"`
Repo string `help:"repo name"`
Commit string `help:"commit sha"`
Status string `help:"job status"`
After time.Time `help:"updated after" format:"2006-01-02 15:04:05"`
}
func (cmd *DaemonJobsListCmd) Run(dm *DaemonCmd, g *Globals) (err error) {
c := client.Client{RemoteAddr: g.RemoteAddr}
params := api.ListJobsParams{
Group: cmd.Group,
Repo: cmd.Repo,
Commit: cmd.Commit,
Status: api.Status(cmd.Status),
}
if !cmd.After.IsZero() {
params.UpdatedAfter = cmd.After.Unix()
}
jobs, err := c.Jobs(params)
if err != nil {
log.Error().Err(err).Msg("")
return
}
b, err := json.MarshalIndent(jobs, "", " ")
if err != nil {
log.Error().Err(err).Msg("")
}
fmt.Println(string(b))
return
}
type DaemonJobsStatusCmd struct {
UUID string `arg:""`
}
func (cmd *DaemonJobsStatusCmd) Run(dm *DaemonCmd, g *Globals) (err error) {
c := client.Client{RemoteAddr: g.RemoteAddr}
st, err := c.JobStatus(cmd.UUID)
if err != nil {
log.Error().Err(err).Msg("")
return
}
fmt.Println(st)
return
}
type DaemonJobsLogsCmd struct {
UUID string `arg:""`
}
func (cmd *DaemonJobsLogsCmd) Run(dm *DaemonCmd, g *Globals) (err error) {
c := client.Client{RemoteAddr: g.RemoteAddr}
logs, err := c.JobLogs(cmd.UUID)
if err != nil {
log.Error().Err(err).Msg("")
return
}
for _, l := range logs {
log.Info().Msg(l.Name)
fmt.Println(l.Text)
}
return
}
type DaemonRepoCmd struct {
List DaemonRepoListCmd `cmd:"" help:"list repos"`
}
type DaemonRepoListCmd struct{}
func (cmd *DaemonRepoListCmd) Run(dm *DaemonCmd, g *Globals) (err error) {
c := client.Client{RemoteAddr: g.RemoteAddr}
repos, err := c.Repos()
if err != nil {
return
}
b, err := json.MarshalIndent(repos, "", " ")
if err != nil {
log.Error().Err(err).Msg("")
}
fmt.Println(string(b))
return
}

47
cmd/daemon_linux.go Normal file
View File

@ -0,0 +1,47 @@
//go:build linux
// +build linux
package cmd
import (
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/daemon"
)
type DaemonCmd struct {
daemonCmd
Threads int `help:"number of threads to use"`
OvercommitMemory float64 `help:"overcommit memory factor"`
OvercommitCPU float64 `help:"overcommit CPU factor"`
Serve DaemonServeCmd `cmd:"" help:"start daemon"`
}
type DaemonServeCmd struct{}
func (cmd *DaemonServeCmd) Run(dm *DaemonCmd, g *Globals) (err error) {
d, err := daemon.Init(g.Config.Kernels)
if err != nil {
log.Fatal().Err(err).Msg("")
}
defer d.Kill()
if dm.Threads > 0 {
d.Threads = dm.Threads
}
if dm.OvercommitMemory > 0 {
d.Resources.CPU.SetOvercommit(dm.OvercommitMemory)
}
if dm.OvercommitCPU > 0 {
d.Resources.CPU.SetOvercommit(dm.OvercommitCPU)
}
go d.Daemon()
d.Listen(dm.Addr)
return
}

8
cmd/daemon_macos.go Normal file
View File

@ -0,0 +1,8 @@
//go:build darwin
// +build darwin
package cmd
type DaemonCmd struct {
daemonCmd
}

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package main
package cmd
import (
"database/sql"
@ -12,12 +12,13 @@ import (
_ "github.com/mattn/go-sqlite3"
"code.dumpstack.io/tools/out-of-tree/config"
"code.dumpstack.io/tools/out-of-tree/artifact"
"code.dumpstack.io/tools/out-of-tree/distro"
"code.dumpstack.io/tools/out-of-tree/qemu"
)
// Change on ANY database update
const currentDatabaseVersion = 2
const currentDatabaseVersion = 3
const versionField = "db_version"
@ -27,9 +28,9 @@ type logEntry struct {
Timestamp time.Time
qemu.System
config.Artifact
config.KernelInfo
phasesResult
artifact.Artifact
distro.KernelInfo
artifact.Result
}
func createLogTable(db *sql.DB) (err error) {
@ -46,6 +47,8 @@ func createLogTable(db *sql.DB) (err error) {
distro_release TEXT,
kernel_release TEXT,
internal_err TEXT,
build_output TEXT,
build_ok BOOLEAN,
@ -120,18 +123,19 @@ func getVersion(db *sql.DB) (version int, err error) {
return
}
func addToLog(db *sql.DB, q *qemu.System, ka config.Artifact,
ki config.KernelInfo, res *phasesResult, tag string) (err error) {
func addToLog(db *sql.DB, q *qemu.System, ka artifact.Artifact,
ki distro.KernelInfo, res *artifact.Result, tag string) (err error) {
stmt, err := db.Prepare("INSERT INTO log (name, type, tag, " +
"distro_type, distro_release, kernel_release, " +
"internal_err, " +
"build_output, build_ok, " +
"run_output, run_ok, " +
"test_output, test_ok, " +
"qemu_stdout, qemu_stderr, " +
"kernel_panic, timeout_kill) " +
"VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, " +
"$10, $11, $12, $13, $14, $15, $16);")
"$10, $11, $12, $13, $14, $15, $16, $17);")
if err != nil {
return
}
@ -140,7 +144,8 @@ func addToLog(db *sql.DB, q *qemu.System, ka config.Artifact,
_, err = stmt.Exec(
ka.Name, ka.Type, tag,
ki.DistroType, ki.DistroRelease, ki.KernelRelease,
ki.Distro.ID, ki.Distro.Release, ki.KernelRelease,
res.InternalErrorString,
res.Build.Output, res.Build.Ok,
res.Run.Output, res.Run.Ok,
res.Test.Output, res.Test.Ok,
@ -157,6 +162,7 @@ func addToLog(db *sql.DB, q *qemu.System, ka config.Artifact,
func getAllLogs(db *sql.DB, tag string, num int) (les []logEntry, err error) {
stmt, err := db.Prepare("SELECT id, time, name, type, tag, " +
"distro_type, distro_release, kernel_release, " +
"internal_err, " +
"build_ok, run_ok, test_ok, kernel_panic, " +
"timeout_kill FROM log ORDER BY datetime(time) DESC " +
"LIMIT $1")
@ -172,10 +178,12 @@ func getAllLogs(db *sql.DB, tag string, num int) (les []logEntry, err error) {
defer rows.Close()
for rows.Next() {
var internalErr sql.NullString
le := logEntry{}
err = rows.Scan(&le.ID, &le.Timestamp,
&le.Name, &le.Type, &le.Tag,
&le.DistroType, &le.DistroRelease, &le.KernelRelease,
&le.Distro.ID, &le.Distro.Release, &le.KernelRelease,
&internalErr,
&le.Build.Ok, &le.Run.Ok, &le.Test.Ok,
&le.KernelPanic, &le.KilledByTimeout,
)
@ -183,6 +191,8 @@ func getAllLogs(db *sql.DB, tag string, num int) (les []logEntry, err error) {
return
}
le.InternalErrorString = internalErr.String
if tag == "" || tag == le.Tag {
les = append(les, le)
}
@ -191,11 +201,12 @@ func getAllLogs(db *sql.DB, tag string, num int) (les []logEntry, err error) {
return
}
func getAllArtifactLogs(db *sql.DB, tag string, num int, ka config.Artifact) (
func getAllArtifactLogs(db *sql.DB, tag string, num int, ka artifact.Artifact) (
les []logEntry, err error) {
stmt, err := db.Prepare("SELECT id, time, name, type, tag, " +
"distro_type, distro_release, kernel_release, " +
"internal_err, " +
"build_ok, run_ok, test_ok, kernel_panic, " +
"timeout_kill FROM log WHERE name=$1 AND type=$2 " +
"ORDER BY datetime(time) DESC LIMIT $3")
@ -211,10 +222,12 @@ func getAllArtifactLogs(db *sql.DB, tag string, num int, ka config.Artifact) (
defer rows.Close()
for rows.Next() {
var internalErr sql.NullString
le := logEntry{}
err = rows.Scan(&le.ID, &le.Timestamp,
&le.Name, &le.Type, &le.Tag,
&le.DistroType, &le.DistroRelease, &le.KernelRelease,
&le.Distro.ID, &le.Distro.Release, &le.KernelRelease,
&internalErr,
&le.Build.Ok, &le.Run.Ok, &le.Test.Ok,
&le.KernelPanic, &le.KilledByTimeout,
)
@ -222,6 +235,8 @@ func getAllArtifactLogs(db *sql.DB, tag string, num int, ka config.Artifact) (
return
}
le.InternalErrorString = internalErr.String
if tag == "" || tag == le.Tag {
les = append(les, le)
}
@ -233,6 +248,7 @@ func getAllArtifactLogs(db *sql.DB, tag string, num int, ka config.Artifact) (
func getLogByID(db *sql.DB, id int) (le logEntry, err error) {
stmt, err := db.Prepare("SELECT id, time, name, type, tag, " +
"distro_type, distro_release, kernel_release, " +
"internal_err, " +
"build_ok, run_ok, test_ok, " +
"build_output, run_output, test_output, " +
"qemu_stdout, qemu_stderr, " +
@ -243,14 +259,48 @@ func getLogByID(db *sql.DB, id int) (le logEntry, err error) {
}
defer stmt.Close()
var internalErr sql.NullString
err = stmt.QueryRow(id).Scan(&le.ID, &le.Timestamp,
&le.Name, &le.Type, &le.Tag,
&le.DistroType, &le.DistroRelease, &le.KernelRelease,
&le.Distro.ID, &le.Distro.Release, &le.KernelRelease,
&internalErr,
&le.Build.Ok, &le.Run.Ok, &le.Test.Ok,
&le.Build.Output, &le.Run.Output, &le.Test.Output,
&le.Stdout, &le.Stderr,
&le.KernelPanic, &le.KilledByTimeout,
)
if err != nil {
return
}
le.InternalErrorString = internalErr.String
return
}
func getLastLog(db *sql.DB) (le logEntry, err error) {
var internalErr sql.NullString
err = db.QueryRow("SELECT MAX(id), time, name, type, tag, "+
"distro_type, distro_release, kernel_release, "+
"internal_err, "+
"build_ok, run_ok, test_ok, "+
"build_output, run_output, test_output, "+
"qemu_stdout, qemu_stderr, "+
"kernel_panic, timeout_kill "+
"FROM log").Scan(&le.ID, &le.Timestamp,
&le.Name, &le.Type, &le.Tag,
&le.Distro.ID, &le.Distro.Release, &le.KernelRelease,
&internalErr,
&le.Build.Ok, &le.Run.Ok, &le.Test.Ok,
&le.Build.Output, &le.Run.Output, &le.Test.Output,
&le.Stdout, &le.Stderr,
&le.KernelPanic, &le.KilledByTimeout,
)
if err != nil {
return
}
le.InternalErrorString = internalErr.String
return
}
@ -305,10 +355,23 @@ func openDatabase(path string) (db *sql.DB, err error) {
}
version = 2
} else if version == 2 {
_, err = db.Exec(`ALTER TABLE log ADD internal_err TEXT`)
if err != nil {
return
}
err = metaSetValue(db, versionField, "3")
if err != nil {
return
}
version = 3
}
if version != currentDatabaseVersion {
err = fmt.Errorf("Database is not supported (%d instead of %d)",
err = fmt.Errorf("database is not supported (%d instead of %d)",
version, currentDatabaseVersion)
return
}

287
cmd/debug.go Normal file
View File

@ -0,0 +1,287 @@
// Copyright 2018 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package cmd
import (
"errors"
"fmt"
"os"
"strings"
"time"
"github.com/rs/zerolog/log"
"gopkg.in/logrusorgru/aurora.v2"
"code.dumpstack.io/tools/out-of-tree/artifact"
"code.dumpstack.io/tools/out-of-tree/config"
"code.dumpstack.io/tools/out-of-tree/distro"
"code.dumpstack.io/tools/out-of-tree/fs"
"code.dumpstack.io/tools/out-of-tree/qemu"
)
type DebugCmd struct {
Kernel string `help:"regexp (first match)" required:""`
Gdb string `help:"gdb listen address" default:"tcp::1234"`
SshAddr string `help:"ssh address to listen" default:"127.0.0.1"`
SshPort int `help:"ssh port to listen" default:"50022"`
ArtifactConfig string `help:"path to artifact config" type:"path"`
Kaslr bool `help:"Enable KASLR"`
Smep bool `help:"Enable SMEP"`
Smap bool `help:"Enable SMAP"`
Kpti bool `help:"Enable KPTI"`
NoKaslr bool `help:"Disable KASLR"`
NoSmep bool `help:"Disable SMEP"`
NoSmap bool `help:"Disable SMAP"`
NoKpti bool `help:"Disable KPTI"`
}
// TODO: merge with pew.go
func (cmd *DebugCmd) Run(g *Globals) (err error) {
kcfg, err := config.ReadKernelConfig(g.Config.Kernels)
if err != nil {
log.Print(err)
}
var configPath string
if cmd.ArtifactConfig == "" {
configPath = g.WorkDir + "/.out-of-tree.toml"
} else {
configPath = cmd.ArtifactConfig
}
ka, err := artifact.Artifact{}.Read(configPath)
if err != nil {
return
}
if ka.SourcePath == "" {
ka.SourcePath = g.WorkDir
}
ki, err := firstSupported(kcfg, ka, cmd.Kernel)
if err != nil {
return
}
kernel := qemu.Kernel{KernelPath: ki.KernelPath, InitrdPath: ki.InitrdPath}
q, err := qemu.NewSystem(qemu.X86x64, kernel, ki.RootFS)
if err != nil {
return
}
err = q.SetSSHAddrPort(cmd.SshAddr, cmd.SshPort)
if err != nil {
return
}
if ka.Qemu.Cpus != 0 {
q.Cpus = ka.Qemu.Cpus
}
if ka.Qemu.Memory != 0 {
q.Memory = ka.Qemu.Memory
}
if ka.Docker.Timeout.Duration != 0 {
g.Config.Docker.Timeout.Duration = ka.Docker.Timeout.Duration
}
q.SetKASLR(false) // set KASLR to false by default because of gdb
q.SetSMEP(!ka.Mitigations.DisableSmep)
q.SetSMAP(!ka.Mitigations.DisableSmap)
q.SetKPTI(!ka.Mitigations.DisableKpti)
if cmd.Kaslr {
q.SetKASLR(true)
} else if cmd.NoKaslr {
q.SetKASLR(false)
}
if cmd.Smep {
q.SetSMEP(true)
} else if cmd.NoSmep {
q.SetSMEP(false)
}
if cmd.Smap {
q.SetSMAP(true)
} else if cmd.NoSmap {
q.SetSMAP(false)
}
if cmd.Kpti {
q.SetKPTI(true)
} else if cmd.NoKpti {
q.SetKPTI(false)
}
redgreen := func(name string, enabled bool) aurora.Value {
if enabled {
return aurora.BgGreen(aurora.Black(name))
}
return aurora.BgRed(aurora.White(name))
}
fmt.Printf("[*] %s %s %s %s\n",
redgreen("KASLR", q.GetKASLR()),
redgreen("SMEP", q.GetSMEP()),
redgreen("SMAP", q.GetSMAP()),
redgreen("KPTI", q.GetKPTI()))
fmt.Printf("[*] SMP: %d CPUs\n", q.Cpus)
fmt.Printf("[*] Memory: %d MB\n", q.Memory)
q.Debug(cmd.Gdb)
coloredGdbAddress := aurora.BgGreen(aurora.Black(cmd.Gdb))
fmt.Printf("[*] gdb is listening on %s\n", coloredGdbAddress)
err = q.Start()
if err != nil {
return
}
defer q.Stop()
tmp, err := fs.TempDir()
if err != nil {
return
}
defer os.RemoveAll(tmp)
err = q.WaitForSSH(time.Minute)
if err != nil {
return
}
if ka.StandardModules {
// Module depends on one of the standard modules
err = artifact.CopyStandardModules(q, ki)
if err != nil {
log.Print(err)
return
}
}
err = artifact.PreloadModules(q, ka, ki, g.Config.Docker.Timeout.Duration)
if err != nil {
log.Print(err)
return
}
var buildDir, outFile, output, remoteFile string
if ka.Type == artifact.Script {
err = q.CopyFile("root", ka.Script, ka.Script)
if err != nil {
return
}
} else {
buildDir, outFile, output, err = artifact.Build(log.Logger, tmp, ka, ki, g.Config.Docker.Timeout.Duration)
if err != nil {
log.Print(err, output)
return
}
remoteFile = "/tmp/" + strings.Replace(ka.Name, " ", "_", -1)
if ka.Type == artifact.KernelModule {
remoteFile += ".ko"
}
err = q.CopyFile("user", outFile, remoteFile)
if err != nil {
return
}
}
// Copy all test files to the remote machine
for _, f := range ka.TestFiles {
if f.Local[0] != '/' {
if buildDir != "" {
f.Local = buildDir + "/" + f.Local
}
}
err = q.CopyFile(f.User, f.Local, f.Remote)
if err != nil {
log.Print("error copy err:", err, f.Local, f.Remote)
return
}
}
coloredRemoteFile := aurora.BgGreen(aurora.Black(remoteFile))
fmt.Printf("[*] build result copied to %s\n", coloredRemoteFile)
fmt.Printf("\n%s\n", q.GetSSHCommand())
fmt.Printf("gdb %s -ex 'target remote %s'\n\n", ki.VmlinuxPath, cmd.Gdb)
// TODO set substitute-path /build/.../linux-... /path/to/linux-source
err = interactive(q)
return
}
func firstSupported(kcfg config.KernelConfig, ka artifact.Artifact,
kernel string) (ki distro.KernelInfo, err error) {
km, err := kernelMask(kernel)
if err != nil {
return
}
ka.Targets = []artifact.Target{km}
for _, ki = range kcfg.Kernels {
var supported bool
supported, err = ka.Supported(ki)
if err != nil || supported {
return
}
}
err = errors.New("no supported kernel found")
return
}
func handleLine(q *qemu.System) (err error) {
fmt.Print("out-of-tree> ")
rawLine := "help"
fmt.Scanf("%s", &rawLine)
params := strings.Fields(rawLine)
cmd := params[0]
switch cmd {
case "h", "help":
fmt.Printf("help\t: print this help message\n")
fmt.Printf("log\t: print qemu log\n")
fmt.Printf("clog\t: print qemu log and cleanup buffer\n")
fmt.Printf("cleanup\t: cleanup qemu log buffer\n")
fmt.Printf("ssh\t: print arguments to ssh command\n")
fmt.Printf("quit\t: quit\n")
case "l", "log":
fmt.Println(q.Stdout)
case "cl", "clog":
fmt.Println(q.Stdout)
q.Stdout = ""
case "c", "cleanup":
q.Stdout = ""
case "s", "ssh":
fmt.Println(q.GetSSHCommand())
case "q", "quit":
return errors.New("end of session")
default:
fmt.Println("No such command")
}
return
}
func interactive(q *qemu.System) (err error) {
for {
err = handleLine(q)
if err != nil {
return
}
}
}

216
cmd/distro.go Normal file
View File

@ -0,0 +1,216 @@
package cmd
import (
"context"
"fmt"
"math"
"os"
"path/filepath"
"regexp"
"time"
"github.com/cavaliergopher/grab/v3"
"github.com/davecgh/go-spew/spew"
"github.com/remeh/sizedwaitgroup"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/cache"
"code.dumpstack.io/tools/out-of-tree/distro"
"code.dumpstack.io/tools/out-of-tree/distro/debian"
"code.dumpstack.io/tools/out-of-tree/distro/debian/snapshot"
"code.dumpstack.io/tools/out-of-tree/fs"
)
type DistroCmd struct {
List DistroListCmd `cmd:"" help:"list available distros"`
Debian DebianCmd `cmd:"" hidden:""`
}
type DebianCmd struct {
Cache DebianCacheCmd `cmd:"" help:"populate cache"`
Fetch DebianFetchCmd `cmd:"" help:"download deb packages"`
Limit int `help:"limit amount of kernels to fetch"`
Regex string `help:"match deb pkg names by regex" default:".*"`
}
type DebianCacheCmd struct {
Path string `help:"path to cache"`
Refetch int `help:"days before refetch versions without deb package" default:"7"`
UpdateRelease bool `help:"update release data"`
UpdateKbuild bool `help:"update kbuild package"`
Dump bool `help:"dump cache"`
}
func (cmd *DebianCacheCmd) Run(dcmd *DebianCmd) (err error) {
if cmd.Path != "" {
debian.CachePath = cmd.Path
}
debian.RefetchDays = cmd.Refetch
log.Info().Msg("Fetching kernels...")
if dcmd.Limit == 0 {
dcmd.Limit = math.MaxInt32
}
mode := debian.NoMode
if cmd.UpdateRelease {
mode |= debian.UpdateRelease
}
if cmd.UpdateKbuild {
mode |= debian.UpdateKbuild
}
kernels, err := debian.GetKernelsWithLimit(dcmd.Limit, mode)
if err != nil {
log.Error().Err(err).Msg("")
return
}
if cmd.Dump {
re, err := regexp.Compile(dcmd.Regex)
if err != nil {
log.Fatal().Err(err).Msg("regex")
}
for _, kernel := range kernels {
if !re.MatchString(kernel.Image.Deb.Name) {
continue
}
fmt.Println(spew.Sdump(kernel))
}
}
log.Info().Msg("Success")
return
}
type DebianFetchCmd struct {
Path string `help:"path to download directory" type:"existingdir" default:"./"`
IgnoreMirror bool `help:"ignore check if packages on the mirror"`
Max int `help:"do not download more than X" default:"100500"`
Threads int `help:"parallel download threads" default:"8"`
Timeout time.Duration `help:"timeout for each download" default:"1m"`
swg sizedwaitgroup.SizedWaitGroup
hasResults bool
}
func (cmd *DebianFetchCmd) fetch(pkg snapshot.Package) {
flog := log.With().
Str("pkg", pkg.Deb.Name).
Logger()
defer cmd.swg.Done()
if !cmd.IgnoreMirror {
flog.Debug().Msg("check mirror")
found, _ := cache.PackageURL(distro.Debian, pkg.Deb.URL)
if found {
flog.Debug().Msg("found on the mirror")
return
}
}
target := filepath.Join(cmd.Path, filepath.Base(pkg.Deb.URL))
if fs.PathExists(target) {
flog.Debug().Msg("already exists")
return
}
tmp, err := os.MkdirTemp(cmd.Path, "tmp-")
if err != nil {
flog.Fatal().Err(err).Msg("mkdir")
return
}
defer os.RemoveAll(tmp)
flog.Info().Msg("fetch")
flog.Debug().Msg(pkg.Deb.URL)
ctx, cancel := context.WithTimeout(context.Background(), cmd.Timeout)
defer cancel()
req, err := grab.NewRequest(tmp, pkg.Deb.URL)
if err != nil {
flog.Warn().Err(err).Msg("cannot create request")
return
}
req = req.WithContext(ctx)
resp := grab.DefaultClient.Do(req)
if err := resp.Err(); err != nil {
flog.Warn().Err(err).Msg("request cancelled")
return
}
err = os.Rename(resp.Filename, target)
if err != nil {
flog.Fatal().Err(err).Msg("mv")
}
cmd.hasResults = true
cmd.Max--
}
func (cmd *DebianFetchCmd) Run(dcmd *DebianCmd) (err error) {
re, err := regexp.Compile(dcmd.Regex)
if err != nil {
log.Fatal().Err(err).Msg("regex")
}
log.Info().Msg("will not download packages that exist on the mirror")
log.Info().Msg("use --ignore-mirror if you really need it")
if dcmd.Limit == 0 {
dcmd.Limit = math.MaxInt32
}
kernels, err := debian.GetKernelsWithLimit(dcmd.Limit, debian.NoMode)
if err != nil {
log.Error().Err(err).Msg("")
return
}
var packages []snapshot.Package
for _, kernel := range kernels {
for _, pkg := range kernel.Packages() {
if !re.MatchString(pkg.Deb.Name) {
continue
}
packages = append(packages, pkg)
}
}
cmd.swg = sizedwaitgroup.New(cmd.Threads)
for _, pkg := range packages {
if cmd.Max <= 0 {
break
}
cmd.swg.Add()
go cmd.fetch(pkg)
}
cmd.swg.Wait()
if !cmd.hasResults {
log.Fatal().Msg("no packages found to download")
}
return
}
type DistroListCmd struct{}
func (cmd *DistroListCmd) Run() (err error) {
for _, d := range distro.List() {
fmt.Println(d.ID, d.Release)
}
return
}

57
cmd/gen.go Normal file
View File

@ -0,0 +1,57 @@
// Copyright 2018 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package cmd
import (
"fmt"
"github.com/naoina/toml"
"code.dumpstack.io/tools/out-of-tree/artifact"
"code.dumpstack.io/tools/out-of-tree/distro"
)
type GenCmd struct {
Type string `enum:"module,exploit" required:"" help:"module/exploit"`
}
func (cmd *GenCmd) Run(g *Globals) (err error) {
switch cmd.Type {
case "module":
err = genConfig(artifact.KernelModule)
case "exploit":
err = genConfig(artifact.KernelExploit)
}
return
}
func genConfig(at artifact.ArtifactType) (err error) {
a := artifact.Artifact{
Name: "Put name here",
Type: at,
}
a.Targets = append(a.Targets, artifact.Target{
Distro: distro.Distro{ID: distro.Ubuntu, Release: "18.04"},
Kernel: artifact.Kernel{Regex: ".*"},
})
a.Targets = append(a.Targets, artifact.Target{
Distro: distro.Distro{ID: distro.Debian, Release: "8"},
Kernel: artifact.Kernel{Regex: ".*"},
})
a.Preload = append(a.Preload, artifact.PreloadModule{
Repo: "Repo name (e.g. https://github.com/openwall/lkrg)",
})
a.Patches = append(a.Patches, artifact.Patch{
Path: "/path/to/profiling.patch",
})
buf, err := toml.Marshal(&a)
if err != nil {
return
}
fmt.Print(string(buf))
return
}

18
cmd/globals.go Normal file
View File

@ -0,0 +1,18 @@
package cmd
import (
"net/url"
"code.dumpstack.io/tools/out-of-tree/config"
)
type Globals struct {
Config config.OutOfTree `help:"path to out-of-tree configuration" default:"~/.out-of-tree/out-of-tree.toml"`
WorkDir string `help:"path to work directory" default:"./" type:"path" existingdir:""`
CacheURL url.URL
Remote bool `help:"run at remote server"`
RemoteAddr string `default:"localhost:63527"`
}

114
cmd/images.go Normal file
View File

@ -0,0 +1,114 @@
// Copyright 2023 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package cmd
import (
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"time"
"code.dumpstack.io/tools/out-of-tree/config"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
"code.dumpstack.io/tools/out-of-tree/distro"
"code.dumpstack.io/tools/out-of-tree/fs"
"code.dumpstack.io/tools/out-of-tree/qemu"
)
type ImageCmd struct {
List ImageListCmd `cmd:"" help:"list images"`
Edit ImageEditCmd `cmd:"" help:"edit image"`
}
type ImageListCmd struct{}
func (cmd *ImageListCmd) Run(g *Globals) (err error) {
entries, err := os.ReadDir(dotfiles.Dir("images"))
if err != nil {
return
}
for _, e := range entries {
fmt.Println(e.Name())
}
return
}
type ImageEditCmd struct {
Name string `help:"image name" required:""`
DryRun bool `help:"do nothing, just print commands"`
}
func (cmd *ImageEditCmd) Run(g *Globals) (err error) {
image := filepath.Join(dotfiles.Dir("images"), cmd.Name)
if !fs.PathExists(image) {
fmt.Println("image does not exist")
}
kcfg, err := config.ReadKernelConfig(g.Config.Kernels)
if err != nil {
return
}
if len(kcfg.Kernels) == 0 {
return errors.New("no kernels found")
}
ki := distro.KernelInfo{}
for _, k := range kcfg.Kernels {
if k.RootFS == image {
ki = k
break
}
}
kernel := qemu.Kernel{
KernelPath: ki.KernelPath,
InitrdPath: ki.InitrdPath,
}
q, err := qemu.NewSystem(qemu.X86x64, kernel, ki.RootFS)
q.Mutable = true
if cmd.DryRun {
s := q.Executable()
for _, arg := range q.Args() {
if strings.Contains(arg, " ") ||
strings.Contains(arg, ",") {
s += fmt.Sprintf(` "%s"`, arg)
} else {
s += fmt.Sprintf(" %s", arg)
}
}
fmt.Println(s)
fmt.Println(q.GetSSHCommand())
return
}
err = q.Start()
if err != nil {
fmt.Println("Qemu start error:", err)
return
}
defer q.Stop()
fmt.Print("ssh command:\n\n\t")
fmt.Println(q.GetSSHCommand())
fmt.Print("\npress enter to stop")
fmt.Scanln()
q.Command("root", "poweroff")
for !q.Died {
time.Sleep(time.Second)
}
return
}

448
cmd/kernel.go Normal file
View File

@ -0,0 +1,448 @@
// Copyright 2023 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package cmd
import (
"errors"
"fmt"
"os"
"strings"
"time"
"github.com/cavaliergopher/grab/v3"
"github.com/naoina/toml"
"github.com/remeh/sizedwaitgroup"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/artifact"
"code.dumpstack.io/tools/out-of-tree/cache"
"code.dumpstack.io/tools/out-of-tree/config"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
"code.dumpstack.io/tools/out-of-tree/container"
"code.dumpstack.io/tools/out-of-tree/distro"
"code.dumpstack.io/tools/out-of-tree/fs"
"code.dumpstack.io/tools/out-of-tree/kernel"
)
type KernelCmd struct {
NoDownload bool `help:"do not download qemu image while kernel generation"`
UseHost bool `help:"also use host kernels"`
Force bool `help:"force reinstall kernel"`
NoHeaders bool `help:"do not install kernel headers"`
Shuffle bool `help:"randomize kernels installation order"`
Retries int `help:"amount of tries for each kernel" default:"2"`
Threads int `help:"threads for parallel installation" default:"1"`
Update bool `help:"update container"`
ContainerCache bool `help:"try prebuilt container images first" default:"true" negatable:""`
Max int `help:"maximum kernels to download" default:"100500"`
NoPrune bool `help:"do not remove dangling or unused images from local storage after build"`
NoCfgRegen bool `help:"do not update kernels.toml"`
ContainerTimeout time.Duration `help:"container timeout"`
List KernelListCmd `cmd:"" help:"list kernels"`
ListRemote KernelListRemoteCmd `cmd:"" help:"list remote kernels"`
Autogen KernelAutogenCmd `cmd:"" help:"generate kernels based on the current config"`
Genall KernelGenallCmd `cmd:"" help:"generate all kernels for distro"`
Install KernelInstallCmd `cmd:"" help:"install specific kernel"`
ConfigRegen KernelConfigRegenCmd `cmd:"" help:"regenerate config"`
shutdown bool
kcfg config.KernelConfig
stats struct {
overall int
success int
}
}
func (cmd KernelCmd) UpdateConfig() (err error) {
if cmd.stats.success != cmd.stats.overall {
log.Warn().Msgf("%d kernels failed to install",
cmd.stats.overall-cmd.stats.success)
}
if cmd.NoCfgRegen {
log.Info().Msgf("kernels.toml is not updated")
return
}
log.Info().Msgf("updating kernels.toml")
kcfg := config.KernelConfig{}
if cmd.UseHost {
// Get host kernels
kcfg.Kernels, err = kernel.GenHostKernels(!cmd.NoDownload)
if err != nil {
return
}
}
for _, dist := range distro.List() {
var kernels []distro.KernelInfo
kernels, err = dist.Kernels()
if err != nil {
return
}
kcfg.Kernels = append(kcfg.Kernels, kernels...)
}
buf, err := toml.Marshal(&kcfg)
if err != nil {
return
}
err = os.WriteFile(dotfiles.File("kernels.toml"), buf, os.ModePerm)
if err != nil {
return
}
log.Info().Msgf("kernels.toml successfully updated")
return
}
func (cmd *KernelCmd) GenKernel(km artifact.Target, pkg string) {
flog := log.With().
Str("kernel", pkg).
Str("distro", km.Distro.String()).
Logger()
reinstall := false
for _, kinfo := range cmd.kcfg.Kernels {
if !km.Distro.Equal(kinfo.Distro) {
continue
}
var found bool
if kinfo.Distro.ID == distro.Debian { // FIXME
found = pkg == kinfo.Package
} else if kinfo.Distro.ID == distro.OpenSUSE {
found = strings.Contains(pkg, kinfo.KernelRelease)
} else {
found = strings.Contains(pkg, kinfo.KernelVersion)
}
if found {
if !cmd.Force {
flog.Info().Msg("already installed")
return
}
reinstall = true
break
}
}
if reinstall {
flog.Info().Msg("reinstall")
} else {
flog.Info().Msg("install")
}
cmd.stats.overall += 1
var attempt int
for {
attempt++
if cmd.shutdown {
return
}
err := km.Distro.Install(pkg, !cmd.NoHeaders)
if err == nil {
cmd.stats.success += 1
flog.Info().Msg("success")
break
} else if attempt >= cmd.Retries {
flog.Error().Err(err).Msg("install kernel")
flog.Debug().Msg("skip")
break
} else {
flog.Warn().Err(err).Msg("install kernel")
time.Sleep(time.Second)
flog.Info().Msg("retry")
}
}
}
func (cmd *KernelCmd) fetchContainerCache(c container.Container) {
if !cmd.ContainerCache {
return
}
if c.Exist() {
return
}
tmp, err := fs.TempDir()
if err != nil {
return
}
defer os.RemoveAll(tmp)
resp, err := grab.Get(tmp, cache.ContainerURL(c.Name()))
if err != nil {
return
}
defer os.Remove(resp.Filename)
err = container.Load(resp.Filename, c.Name())
if err == nil {
log.Info().Msgf("use prebuilt container %s", c.Name())
}
}
func (cmd *KernelCmd) Generate(g *Globals, km artifact.Target) (err error) {
defer func() {
if err != nil {
log.Warn().Err(err).Msg("")
} else {
log.Debug().Err(err).Msg("")
}
}()
if cmd.Update {
container.UseCache = false
}
if cmd.NoPrune {
container.Prune = false
}
cmd.kcfg, err = config.ReadKernelConfig(g.Config.Kernels)
if err != nil {
log.Debug().Err(err).Msg("read kernels config")
}
container.Commands = g.Config.Docker.Commands
container.Registry = g.Config.Docker.Registry
container.Timeout = g.Config.Docker.Timeout.Duration
if cmd.ContainerTimeout != 0 {
container.Timeout = cmd.ContainerTimeout
}
log.Info().Msgf("Generating for target %v", km)
_, err = kernel.GenRootfsImage(km.Distro.RootFS(), !cmd.NoDownload)
if err != nil || cmd.shutdown {
return
}
c, err := container.New(km.Distro)
if err != nil || cmd.shutdown {
return
}
cmd.fetchContainerCache(c)
pkgs, err := kernel.MatchPackages(km)
if err != nil || cmd.shutdown {
return
}
if cmd.Shuffle {
pkgs = kernel.ShuffleStrings(pkgs)
}
swg := sizedwaitgroup.New(cmd.Threads)
for i, pkg := range pkgs {
if cmd.shutdown {
err = nil
return
}
swg.Add()
if cmd.shutdown {
err = nil
swg.Done()
return
}
if cmd.stats.success >= cmd.Max {
log.Print("Max is reached")
swg.Done()
break
}
log.Info().Msgf("%d/%d %s", i+1, len(pkgs), pkg)
go func(p string) {
defer swg.Done()
cmd.GenKernel(km, p)
}(pkg)
}
swg.Wait()
return
}
type KernelListCmd struct{}
func (cmd *KernelListCmd) Run(g *Globals) (err error) {
kcfg, err := config.ReadKernelConfig(g.Config.Kernels)
if err != nil {
log.Debug().Err(err).Msg("read kernel config")
}
if len(kcfg.Kernels) == 0 {
return errors.New("no kernels found")
}
for _, k := range kcfg.Kernels {
fmt.Println(k.Distro.ID, k.Distro.Release, k.KernelRelease)
}
return
}
type KernelListRemoteCmd struct {
Distro string `required:"" help:"distribution"`
Ver string `help:"distro version"`
}
func (cmd *KernelListRemoteCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
if kernelCmd.Update {
container.UseCache = false
}
if kernelCmd.NoPrune {
container.Prune = false
}
distroType, err := distro.NewID(cmd.Distro)
if err != nil {
return
}
km := artifact.Target{
Distro: distro.Distro{ID: distroType, Release: cmd.Ver},
Kernel: artifact.Kernel{Regex: ".*"},
}
_, err = kernel.GenRootfsImage(km.Distro.RootFS(), false)
if err != nil {
return
}
container.Registry = g.Config.Docker.Registry
container.Commands = g.Config.Docker.Commands
c, err := container.New(km.Distro)
if err != nil {
return
}
kernelCmd.fetchContainerCache(c)
pkgs, err := kernel.MatchPackages(km)
// error check skipped on purpose
for _, k := range pkgs {
fmt.Println(k)
}
return
}
type KernelAutogenCmd struct{}
func (cmd *KernelAutogenCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
ka, err := artifact.Artifact{}.Read(g.WorkDir + "/.out-of-tree.toml")
if err != nil {
return
}
kernel.SetSigintHandler(&kernelCmd.shutdown)
for _, sk := range ka.Targets {
if sk.Distro.Release == "" {
err = errors.New("please set distro_release")
return
}
err = kernelCmd.Generate(g, sk)
if err != nil {
return
}
if kernelCmd.shutdown {
break
}
}
return kernelCmd.UpdateConfig()
}
type KernelGenallCmd struct {
Distro string `help:"distribution"`
Ver string `help:"distro version"`
}
func (cmd *KernelGenallCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
distroType, err := distro.NewID(cmd.Distro)
if err != nil {
return
}
kernel.SetSigintHandler(&kernelCmd.shutdown)
for _, dist := range distro.List() {
if kernelCmd.shutdown {
break
}
if distroType != distro.None && distroType != dist.ID {
continue
}
if cmd.Ver != "" && dist.Release != cmd.Ver {
continue
}
target := artifact.Target{
Distro: dist,
Kernel: artifact.Kernel{Regex: ".*"},
}
err = kernelCmd.Generate(g, target)
if err != nil {
continue
}
}
return kernelCmd.UpdateConfig()
}
type KernelInstallCmd struct {
Distro string `required:"" help:"distribution"`
Ver string `required:"" help:"distro version"`
Kernel string `required:"" help:"kernel release mask"`
}
func (cmd *KernelInstallCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
distroType, err := distro.NewID(cmd.Distro)
if err != nil {
return
}
kernel.SetSigintHandler(&kernelCmd.shutdown)
km := artifact.Target{
Distro: distro.Distro{ID: distroType, Release: cmd.Ver},
Kernel: artifact.Kernel{Regex: cmd.Kernel},
}
err = kernelCmd.Generate(g, km)
if err != nil {
return
}
return kernelCmd.UpdateConfig()
}
type KernelConfigRegenCmd struct{}
func (cmd *KernelConfigRegenCmd) Run(kernelCmd *KernelCmd, g *Globals) (err error) {
return kernelCmd.UpdateConfig()
}

322
cmd/log.go Normal file
View File

@ -0,0 +1,322 @@
// Copyright 2023 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package cmd
import (
"database/sql"
"encoding/json"
"fmt"
"math"
"os"
"github.com/olekukonko/tablewriter"
"github.com/rs/zerolog/log"
"gopkg.in/logrusorgru/aurora.v2"
"code.dumpstack.io/tools/out-of-tree/artifact"
)
type LogCmd struct {
Query LogQueryCmd `cmd:"" help:"query logs"`
Dump LogDumpCmd `cmd:"" help:"show all info for log entry with ID"`
Json LogJsonCmd `cmd:"" help:"generate json statistics"`
Markdown LogMarkdownCmd `cmd:"" help:"generate markdown statistics"`
}
type LogQueryCmd struct {
Num int `help:"how much lines" default:"50"`
Rate bool `help:"show artifact success rate"`
Tag string `help:"filter tag"`
}
func (cmd *LogQueryCmd) Run(g *Globals) (err error) {
db, err := openDatabase(g.Config.Database)
if err != nil {
panic(err)
}
defer db.Close()
var les []logEntry
ka, kaErr := artifact.Artifact{}.Read(g.WorkDir + "/.out-of-tree.toml")
if kaErr == nil {
log.Print(".out-of-tree.toml found, filter by artifact name")
les, err = getAllArtifactLogs(db, cmd.Tag, cmd.Num, ka)
} else {
les, err = getAllLogs(db, cmd.Tag, cmd.Num)
}
if err != nil {
return
}
s := "\nS"
if cmd.Rate {
if kaErr != nil {
err = kaErr
return
}
s = fmt.Sprintf("{[%s] %s} Overall s", ka.Type, ka.Name)
les, err = getAllArtifactLogs(db, cmd.Tag, math.MaxInt64, ka)
if err != nil {
return
}
} else {
for _, l := range les {
logLogEntry(l)
}
}
success := 0
for _, l := range les {
if l.Test.Ok {
success++
}
}
overall := float64(success) / float64(len(les))
fmt.Printf("%success rate: %.04f (~%.0f%%)\n",
s, overall, overall*100)
return
}
type LogDumpCmd struct {
ID int `help:"id" default:"-1"`
}
func (cmd *LogDumpCmd) Run(g *Globals) (err error) {
db, err := openDatabase(g.Config.Database)
if err != nil {
panic(err)
}
defer db.Close()
var l logEntry
if cmd.ID > 0 {
l, err = getLogByID(db, cmd.ID)
} else {
l, err = getLastLog(db)
}
if err != nil {
return
}
fmt.Println("ID:", l.ID)
fmt.Println("Date:", l.Timestamp.Format("2006-01-02 15:04"))
fmt.Println("Tag:", l.Tag)
fmt.Println()
fmt.Println("Type:", l.Type.String())
fmt.Println("Name:", l.Name)
fmt.Println()
fmt.Println("Distro:", l.Distro.ID.String(), l.Distro.Release)
fmt.Println("Kernel:", l.KernelRelease)
fmt.Println()
fmt.Println("Build ok:", l.Build.Ok)
if l.Type == artifact.KernelModule {
fmt.Println("Insmod ok:", l.Run.Ok)
}
fmt.Println("Test ok:", l.Test.Ok)
fmt.Println()
fmt.Printf("Build output:\n%s\n", l.Build.Output)
fmt.Println()
if l.Type == artifact.KernelModule {
fmt.Printf("Insmod output:\n%s\n", l.Run.Output)
fmt.Println()
}
fmt.Printf("Test output:\n%s\n", l.Test.Output)
fmt.Println()
fmt.Printf("Qemu stdout:\n%s\n", l.Stdout)
fmt.Println()
fmt.Printf("Qemu stderr:\n%s\n", l.Stderr)
fmt.Println()
return
}
type LogJsonCmd struct {
Tag string `required:"" help:"filter tag"`
}
func (cmd *LogJsonCmd) Run(g *Globals) (err error) {
db, err := openDatabase(g.Config.Database)
if err != nil {
panic(err)
}
defer db.Close()
distros, err := getStats(db, g.WorkDir, cmd.Tag)
if err != nil {
return
}
bytes, err := json.Marshal(&distros)
if err != nil {
return
}
fmt.Println(string(bytes))
return
}
type LogMarkdownCmd struct {
Tag string `required:"" help:"filter tag"`
}
func (cmd *LogMarkdownCmd) Run(g *Globals) (err error) {
db, err := openDatabase(g.Config.Database)
if err != nil {
panic(err)
}
defer db.Close()
distros, err := getStats(db, g.WorkDir, cmd.Tag)
if err != nil {
return
}
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Distro", "Release", "Kernel", "Reliability"})
table.SetBorders(tablewriter.Border{
Left: true, Top: false, Right: true, Bottom: false})
table.SetCenterSeparator("|")
for distro, releases := range distros {
for release, kernels := range releases {
for kernel, stats := range kernels {
all := float64(stats.All)
ok := float64(stats.TestOK)
r := fmt.Sprintf("%6.02f%%", (ok/all)*100)
table.Append([]string{distro, release, kernel, r})
}
}
}
table.Render()
return
}
func center(s string, w int) string {
return fmt.Sprintf("%[1]*s", -w, fmt.Sprintf("%[1]*s", (w+len(s))/2, s))
}
func genOkFailCentered(name string, ok bool) (aurv aurora.Value) {
name = center(name, 10)
if ok {
aurv = aurora.BgGreen(aurora.Black(name))
} else {
aurv = aurora.BgRed(aurora.White(aurora.Bold(name)))
}
return
}
func logLogEntry(l logEntry) {
distroInfo := fmt.Sprintf("%s-%s {%s}", l.Distro.ID,
l.Distro.Release, l.KernelRelease)
artifactInfo := fmt.Sprintf("{[%s] %s}", l.Type, l.Name)
timestamp := l.Timestamp.Format("2006-01-02 15:04")
var status aurora.Value
if l.InternalErrorString != "" {
status = genOkFailCentered("INTERNAL", false)
} else if l.Type == artifact.KernelExploit {
if l.Build.Ok {
status = genOkFailCentered("LPE", l.Test.Ok)
} else {
status = genOkFailCentered("BUILD", l.Build.Ok)
}
} else {
if l.Build.Ok {
if l.Run.Ok {
status = genOkFailCentered("TEST", l.Test.Ok)
} else {
status = genOkFailCentered("INSMOD", l.Run.Ok)
}
} else {
status = genOkFailCentered("BUILD", l.Build.Ok)
}
}
additional := ""
if l.KernelPanic {
additional = "(panic)"
} else if l.KilledByTimeout {
additional = "(timeout)"
}
colored := aurora.Sprintf("[%4d %4s] [%s] %s %-70s: %s %s",
l.ID, l.Tag, timestamp, artifactInfo, distroInfo, status,
additional)
fmt.Println(colored)
}
type runstat struct {
All, BuildOK, RunOK, TestOK, Timeout, Panic int
}
func getStats(db *sql.DB, path, tag string) (
distros map[string]map[string]map[string]runstat, err error) {
var les []logEntry
ka, kaErr := artifact.Artifact{}.Read(path + "/.out-of-tree.toml")
if kaErr == nil {
les, err = getAllArtifactLogs(db, tag, -1, ka)
} else {
les, err = getAllLogs(db, tag, -1)
}
if err != nil {
return
}
distros = make(map[string]map[string]map[string]runstat)
for _, l := range les {
_, ok := distros[l.Distro.ID.String()]
if !ok {
distros[l.Distro.ID.String()] = make(map[string]map[string]runstat)
}
_, ok = distros[l.Distro.ID.String()][l.Distro.Release]
if !ok {
distros[l.Distro.ID.String()][l.Distro.Release] = make(map[string]runstat)
}
rs := distros[l.Distro.ID.String()][l.Distro.Release][l.KernelRelease]
rs.All++
if l.Build.Ok {
rs.BuildOK++
}
if l.Run.Ok {
rs.RunOK++
}
if l.Test.Ok {
rs.TestOK++
}
if l.KernelPanic {
rs.Panic++
}
if l.KilledByTimeout {
rs.Timeout++
}
distros[l.Distro.ID.String()][l.Distro.Release][l.KernelRelease] = rs
}
return
}

88
cmd/pack.go Normal file
View File

@ -0,0 +1,88 @@
// Copyright 2023 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package cmd
import (
"fmt"
"os"
"time"
"code.dumpstack.io/tools/out-of-tree/fs"
"github.com/rs/zerolog/log"
)
type PackCmd struct {
Autogen bool `help:"kernel autogeneration"`
UseHost bool `help:"also use host kernels"`
NoDownload bool `help:"do not download qemu image while kernel generation"`
ExploitRuns int64 `default:"4" help:"amount of runs of each exploit"`
KernelRuns int64 `default:"1" help:"amount of runs of each kernel"`
Max int `help:"download random kernels from set defined by regex in release_mask, but no more than X for each of release_mask" default:"1"`
Threads int `help:"threads" default:"4"`
Tag string `help:"filter tag"`
Timeout time.Duration `help:"timeout after tool will not spawn new tests"`
QemuTimeout time.Duration `help:"timeout for qemu"`
DockerTimeout time.Duration `help:"timeout for docker"`
}
func (cmd *PackCmd) Run(g *Globals) (err error) {
tag := fmt.Sprintf("pack_run_%d", time.Now().Unix())
log.Print("Tag:", tag)
files, err := os.ReadDir(g.WorkDir)
if err != nil {
return
}
for _, f := range files {
workPath := g.WorkDir + "/" + f.Name()
if !fs.PathExists(workPath + "/.out-of-tree.toml") {
continue
}
if cmd.Autogen {
autogen := KernelAutogenCmd{}
err = autogen.Run(
&KernelCmd{
NoDownload: cmd.NoDownload,
UseHost: cmd.UseHost,
Max: cmd.Max,
},
&Globals{
Config: g.Config,
WorkDir: workPath,
},
)
if err != nil {
return
}
}
log.Print(f.Name())
pew := PewCmd{
Max: cmd.KernelRuns,
Runs: cmd.ExploitRuns,
Threads: cmd.Threads,
Tag: tag,
Timeout: cmd.Timeout,
QemuTimeout: cmd.QemuTimeout,
DockerTimeout: cmd.DockerTimeout,
Dist: pathDevNull,
}
pew.Run(&Globals{
Config: g.Config,
WorkDir: workPath,
})
}
return
}

601
cmd/pew.go Normal file
View File

@ -0,0 +1,601 @@
// Copyright 2023 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package cmd
import (
"database/sql"
"errors"
"fmt"
"io"
"math/rand"
"os"
"os/exec"
"strings"
"time"
"github.com/davecgh/go-spew/spew"
"github.com/google/uuid"
"github.com/remeh/sizedwaitgroup"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"gopkg.in/logrusorgru/aurora.v2"
"code.dumpstack.io/tools/out-of-tree/api"
"code.dumpstack.io/tools/out-of-tree/artifact"
"code.dumpstack.io/tools/out-of-tree/client"
"code.dumpstack.io/tools/out-of-tree/config"
"code.dumpstack.io/tools/out-of-tree/distro"
"code.dumpstack.io/tools/out-of-tree/qemu"
)
const pathDevNull = "/dev/null"
type LevelWriter struct {
io.Writer
Level zerolog.Level
}
func (lw *LevelWriter) WriteLevel(l zerolog.Level, p []byte) (n int, err error) {
if l >= lw.Level {
return lw.Writer.Write(p)
}
return len(p), nil
}
var ConsoleWriter, FileWriter LevelWriter
var LogLevel zerolog.Level
type runstate struct {
Overall, Success float64
InternalErrors int
}
var (
state runstate
)
func successRate(state runstate) float64 {
return state.Success / state.Overall
}
type PewCmd struct {
Max int64 `help:"test no more than X kernels" default:"100500"`
Runs int64 `help:"runs per each kernel" default:"1"`
Kernel string `help:"override kernel regex"`
RootFS string `help:"override rootfs image" type:"existingfile"`
Guess bool `help:"try all defined kernels"`
Shuffle bool `help:"randomize kernels test order"`
Binary string `help:"use binary, do not build"`
Test string `help:"override path for test"`
Dist string `help:"build result path" default:"/dev/null"`
Threads int `help:"threads" default:"1"`
Tag string `help:"log tagging"`
Timeout time.Duration `help:"timeout after tool will not spawn new tests"`
ArtifactConfig string `help:"path to artifact config" type:"path"`
QemuTimeout time.Duration `help:"timeout for qemu"`
QemuAfterStartTimeout time.Duration `help:"timeout after starting of the qemu vm before tests"`
DockerTimeout time.Duration `help:"timeout for docker"`
Threshold float64 `help:"reliablity threshold for exit code" default:"1.00"`
IncludeInternalErrors bool `help:"count internal errors as part of the success rate"`
Endless bool `help:"endless tests"`
EndlessTimeout time.Duration `help:"timeout between tests" default:"1m"`
EndlessStress string `help:"endless stress script" type:"existingfile"`
DB *sql.DB `kong:"-" json:"-"`
Kcfg config.KernelConfig `kong:"-" json:"-"`
TimeoutDeadline time.Time `kong:"-" json:"-"`
Watch bool `help:"watch job status"`
repoName string
commit string
useRemote bool
remoteAddr string
// UUID of the job set
groupUUID string
}
func (cmd *PewCmd) getRepoName(worktree string, ka artifact.Artifact) {
raw, err := exec.Command("git", "--work-tree="+worktree,
"rev-list", "--max-parents=0", "HEAD").CombinedOutput()
if err != nil {
log.Error().Err(err).Msg(string(raw))
return
}
cmd.repoName = fmt.Sprintf("%s-%s", ka.Name, string(raw[:7]))
}
func (cmd *PewCmd) syncRepo(worktree string, ka artifact.Artifact) (err error) {
c := client.Client{RemoteAddr: cmd.remoteAddr}
cmd.getRepoName(worktree, ka)
raw, err := exec.Command("git", "--work-tree="+worktree,
"rev-parse", "HEAD").CombinedOutput()
if err != nil {
return
}
cmd.commit = strings.TrimSuffix(string(raw), "\n")
_, err = c.GetRepo(cmd.repoName)
if err != nil && err != client.ErrRepoNotFound {
log.Error().Err(err).Msg("GetRepo API error")
return
}
if err == client.ErrRepoNotFound {
log.Warn().Msg("repo not found")
log.Info().Msg("add repo")
log.Warn().Msgf("%v", spew.Sdump(ka))
err = c.AddRepo(api.Repo{Name: cmd.repoName})
if err != nil {
return
}
}
err = c.PushRepo(api.Repo{Name: cmd.repoName, Path: worktree})
if err != nil {
log.Error().Err(err).Msg("push repo error")
return
}
return
}
func (cmd *PewCmd) Run(g *Globals) (err error) {
cmd.groupUUID = uuid.New().String()
log.Info().Str("group", cmd.groupUUID).Msg("")
cmd.useRemote = g.Remote
cmd.remoteAddr = g.RemoteAddr
if cmd.useRemote {
c := client.Client{RemoteAddr: cmd.remoteAddr}
cmd.Kcfg.Kernels, err = c.Kernels()
if err != nil {
log.Fatal().Err(err).Msg("read kernels config")
}
} else {
cmd.Kcfg, err = config.ReadKernelConfig(
g.Config.Kernels)
if err != nil {
log.Fatal().Err(err).Msg("read kernels config")
}
}
if cmd.Timeout != 0 {
log.Info().Msgf("Set global timeout to %s", cmd.Timeout)
cmd.TimeoutDeadline = time.Now().Add(cmd.Timeout)
}
cmd.DB, err = openDatabase(g.Config.Database)
if err != nil {
log.Fatal().Err(err).
Msgf("Cannot open database %s", g.Config.Database)
}
defer cmd.DB.Close()
var configPath string
if cmd.ArtifactConfig == "" {
configPath = g.WorkDir + "/.out-of-tree.toml"
} else {
configPath = cmd.ArtifactConfig
}
ka, err := artifact.Artifact{}.Read(configPath)
if err != nil {
return
}
if cmd.useRemote {
err = cmd.syncRepo(g.WorkDir, ka)
if err != nil {
return
}
}
if len(ka.Targets) == 0 || cmd.Guess {
log.Debug().Msg("will use all available targets")
for _, dist := range distro.List() {
ka.Targets = append(ka.Targets, artifact.Target{
Distro: dist,
Kernel: artifact.Kernel{
Regex: ".*",
},
})
}
}
if ka.SourcePath == "" {
ka.SourcePath = g.WorkDir
}
if cmd.Kernel != "" {
var km artifact.Target
km, err = kernelMask(cmd.Kernel)
if err != nil {
return
}
ka.Targets = []artifact.Target{km}
}
// TODO there was a lib for merge structures
ka.Qemu.Timeout.Duration = g.Config.Qemu.Timeout.Duration
ka.Docker.Timeout.Duration = g.Config.Docker.Timeout.Duration
if cmd.QemuTimeout != 0 {
log.Info().Msgf("Set qemu timeout to %s", cmd.QemuTimeout)
g.Config.Qemu.Timeout.Duration = cmd.QemuTimeout
ka.Qemu.Timeout.Duration = cmd.QemuTimeout
}
if cmd.DockerTimeout != 0 {
log.Info().Msgf("Set docker timeout to %s", cmd.DockerTimeout)
g.Config.Docker.Timeout.Duration = cmd.DockerTimeout
ka.Docker.Timeout.Duration = cmd.DockerTimeout
}
if cmd.Tag == "" {
cmd.Tag = fmt.Sprintf("%d", time.Now().Unix())
}
if !cmd.useRemote {
log.Info().Str("tag", cmd.Tag).Msg("")
}
err = cmd.performCI(ka)
if err != nil {
return
}
if cmd.useRemote {
return
}
if state.InternalErrors > 0 {
s := "not counted towards success rate"
if cmd.IncludeInternalErrors {
s = "included in success rate"
}
log.Warn().Msgf("%d internal errors "+
"(%s)", state.InternalErrors, s)
}
if cmd.IncludeInternalErrors {
state.Overall += float64(state.InternalErrors)
}
msg := fmt.Sprintf("Success rate: %.02f (%d/%d), Threshold: %.02f",
successRate(state),
int(state.Success), int(state.Overall),
cmd.Threshold)
if successRate(state) < cmd.Threshold {
log.Error().Msg(msg)
err = errors.New("reliability threshold not met")
} else {
log.Info().Msg(msg)
}
return
}
func (cmd PewCmd) watchJob(swg *sizedwaitgroup.SizedWaitGroup,
slog zerolog.Logger, uuid string) {
defer swg.Done() // FIXME
c := client.Client{RemoteAddr: cmd.remoteAddr}
var err error
var st api.Status
for {
st, err = c.JobStatus(uuid)
if err != nil {
slog.Error().Err(err).Msg("")
continue
}
if st == api.StatusSuccess || st == api.StatusFailure {
break
}
time.Sleep(time.Second)
}
switch st {
case api.StatusSuccess:
slog.Info().Msg("success")
case api.StatusFailure:
slog.Warn().Msg("failure")
}
}
func (cmd PewCmd) remote(swg *sizedwaitgroup.SizedWaitGroup,
ka artifact.Artifact, ki distro.KernelInfo) {
defer swg.Done()
slog := log.With().
Str("distro_type", ki.Distro.ID.String()).
Str("distro_release", ki.Distro.Release).
Str("kernel", ki.KernelRelease).
Logger()
job := api.Job{}
job.Group = cmd.groupUUID
job.RepoName = cmd.repoName
job.Commit = cmd.commit
job.Artifact = ka
job.Target = ki
c := client.Client{RemoteAddr: cmd.remoteAddr}
uuid, err := c.AddJob(job)
slog = slog.With().Str("uuid", uuid).Logger()
if err != nil {
slog.Error().Err(err).Msg("cannot add job")
return
}
slog.Info().Msg("add")
if cmd.Watch {
// FIXME dummy (almost)
go cmd.watchJob(swg, slog, uuid)
}
}
func (cmd PewCmd) testArtifact(swg *sizedwaitgroup.SizedWaitGroup,
ka artifact.Artifact, ki distro.KernelInfo) {
defer swg.Done()
logdir := "logs/" + cmd.Tag
err := os.MkdirAll(logdir, os.ModePerm)
if err != nil {
log.Error().Err(err).Msgf("mkdir %s", logdir)
return
}
logfile := fmt.Sprintf("logs/%s/%s-%s-%s.log",
cmd.Tag,
ki.Distro.ID.String(),
ki.Distro.Release,
ki.KernelRelease,
)
f, err := os.Create(logfile)
if err != nil {
log.Error().Err(err).Msgf("create %s", logfile)
return
}
defer f.Close()
slog := zerolog.New(zerolog.MultiLevelWriter(
&ConsoleWriter,
&FileWriter,
&zerolog.ConsoleWriter{
Out: f,
FieldsExclude: []string{
"distro_release",
"distro_type",
"kernel",
},
NoColor: true,
},
))
switch LogLevel {
case zerolog.TraceLevel, zerolog.DebugLevel:
slog = slog.With().Caller().Logger()
}
slog = slog.With().Timestamp().
Str("distro_type", ki.Distro.ID.String()).
Str("distro_release", ki.Distro.Release).
Str("kernel", ki.KernelRelease).
Logger()
ka.Process(slog, ki,
cmd.Endless, cmd.Binary, cmd.EndlessStress, cmd.EndlessTimeout,
func(q *qemu.System, ka artifact.Artifact, ki distro.KernelInfo, result *artifact.Result) {
dumpResult(q, ka, ki, result, cmd.Dist, cmd.Tag, cmd.Binary, cmd.DB)
},
)
}
func shuffleKernels(a []distro.KernelInfo) []distro.KernelInfo {
// FisherYates shuffle
for i := len(a) - 1; i > 0; i-- {
j := rand.Intn(i + 1)
a[i], a[j] = a[j], a[i]
}
return a
}
func (cmd PewCmd) process(swg *sizedwaitgroup.SizedWaitGroup,
ka artifact.Artifact, kernel distro.KernelInfo) {
if cmd.useRemote {
go cmd.remote(swg, ka, kernel)
} else {
go cmd.testArtifact(swg, ka, kernel)
}
}
func (cmd PewCmd) performCI(ka artifact.Artifact) (err error) {
found := false
max := cmd.Max
threadCounter := 0
swg := sizedwaitgroup.New(cmd.Threads)
if cmd.Shuffle {
cmd.Kcfg.Kernels = shuffleKernels(cmd.Kcfg.Kernels)
}
for _, kernel := range cmd.Kcfg.Kernels {
if max <= 0 {
break
}
var supported bool
supported, err = ka.Supported(kernel)
if err != nil {
return
}
if kernel.Blocklisted {
log.Debug().Str("kernel", kernel.KernelVersion).
Msgf("skip (blocklisted)")
continue
}
if cmd.RootFS != "" {
kernel.RootFS = cmd.RootFS
}
if supported {
found = true
max--
for i := int64(0); i < cmd.Runs; i++ {
if !cmd.TimeoutDeadline.IsZero() &&
time.Now().After(cmd.TimeoutDeadline) {
break
}
swg.Add()
if threadCounter < cmd.Threads {
time.Sleep(time.Second)
threadCounter++
}
go cmd.process(&swg, ka, kernel)
}
}
}
swg.Wait()
if !found {
err = errors.New("no supported kernels found")
}
return
}
func kernelMask(kernel string) (km artifact.Target, err error) {
parts := strings.Split(kernel, ":")
if len(parts) != 2 {
err = errors.New("kernel is not 'distroType:regex'")
return
}
dt, err := distro.NewID(parts[0])
if err != nil {
return
}
km = artifact.Target{
Distro: distro.Distro{ID: dt},
Kernel: artifact.Kernel{Regex: parts[1]},
}
return
}
func genOkFail(name string, ok bool) (aurv aurora.Value) {
s := " " + name
if name == "" {
s = ""
}
if ok {
s += " SUCCESS "
aurv = aurora.BgGreen(aurora.Black(s))
} else {
s += " FAILURE "
aurv = aurora.BgRed(aurora.White(aurora.Bold(s)))
}
return
}
func dumpResult(q *qemu.System, ka artifact.Artifact, ki distro.KernelInfo,
res *artifact.Result, dist, tag, binary string, db *sql.DB) {
// TODO refactor
if res.InternalError != nil {
q.Log.Warn().Err(res.InternalError).
Str("panic", fmt.Sprintf("%v", q.KernelPanic)).
Str("timeout", fmt.Sprintf("%v", q.KilledByTimeout)).
Msg("internal")
res.InternalErrorString = res.InternalError.Error()
state.InternalErrors += 1
} else {
colored := ""
state.Overall += 1
if res.Test.Ok {
state.Success += 1
}
switch ka.Type {
case artifact.KernelExploit:
colored = aurora.Sprintf("%s %s",
genOkFail("BUILD", res.Build.Ok),
genOkFail("LPE", res.Test.Ok))
case artifact.KernelModule:
colored = aurora.Sprintf("%s %s %s",
genOkFail("BUILD", res.Build.Ok),
genOkFail("INSMOD", res.Run.Ok),
genOkFail("TEST", res.Test.Ok))
case artifact.Script:
colored = aurora.Sprintf("%s",
genOkFail("", res.Test.Ok))
}
additional := ""
if q.KernelPanic {
additional = "(panic)"
} else if q.KilledByTimeout {
additional = "(timeout)"
}
if additional != "" {
q.Log.Info().Msgf("%v %v", colored, additional)
} else {
q.Log.Info().Msgf("%v", colored)
}
}
err := addToLog(db, q, ka, ki, res, tag)
if err != nil {
q.Log.Warn().Err(err).Msgf("[db] addToLog (%v)", ka)
}
if binary == "" && dist != pathDevNull {
err = os.MkdirAll(dist, os.ModePerm)
if err != nil {
log.Warn().Err(err).Msgf("os.MkdirAll (%v)", ka)
}
path := fmt.Sprintf("%s/%s-%s-%s", dist, ki.Distro.ID,
ki.Distro.Release, ki.KernelRelease)
if ka.Type != artifact.KernelExploit {
path += ".ko"
}
err = artifact.CopyFile(res.BuildArtifact, path)
if err != nil {
log.Warn().Err(err).Msgf("copy file (%v)", ka)
}
}
}

View File

@ -5,242 +5,17 @@
package config
import (
"errors"
"fmt"
"io/ioutil"
"io"
"os"
"regexp"
"strconv"
"strings"
"time"
"code.dumpstack.io/tools/out-of-tree/distro"
"github.com/naoina/toml"
)
type kernel struct {
Version []int
Major []int
Minor []int
Patch []int
}
// KernelMask defines the kernel
type KernelMask struct {
DistroType DistroType
DistroRelease string // 18.04/7.4.1708/9.1
ReleaseMask string
// Overrides ReleaseMask
Kernel kernel
}
// DockerName is returns stable name for docker container
func (km KernelMask) DockerName() string {
distro := strings.ToLower(km.DistroType.String())
release := strings.Replace(km.DistroRelease, ".", "__", -1)
return fmt.Sprintf("out_of_tree_%s_%s", distro, release)
}
// ArtifactType is the kernel module or exploit
type ArtifactType int
const (
// KernelModule is any kind of kernel module
KernelModule ArtifactType = iota
// KernelExploit is the privilege escalation exploit
KernelExploit
)
func (at ArtifactType) String() string {
return [...]string{"module", "exploit"}[at]
}
// UnmarshalTOML is for support github.com/naoina/toml
func (at *ArtifactType) UnmarshalTOML(data []byte) (err error) {
stype := strings.Trim(string(data), `"`)
stypelower := strings.ToLower(stype)
if strings.Contains(stypelower, "module") {
*at = KernelModule
} else if strings.Contains(stypelower, "exploit") {
*at = KernelExploit
} else {
err = fmt.Errorf("Type %s is unsupported", stype)
}
return
}
// MarshalTOML is for support github.com/naoina/toml
func (at ArtifactType) MarshalTOML() (data []byte, err error) {
s := ""
switch at {
case KernelModule:
s = "module"
case KernelExploit:
s = "exploit"
default:
err = fmt.Errorf("Cannot marshal %d", at)
}
data = []byte(`"` + s + `"`)
return
}
// Duration type with toml unmarshalling support
type Duration struct {
time.Duration
}
// UnmarshalTOML for Duration
func (d *Duration) UnmarshalTOML(data []byte) (err error) {
duration := strings.Replace(string(data), "\"", "", -1)
d.Duration, err = time.ParseDuration(duration)
return
}
// MarshalTOML for Duration
func (d Duration) MarshalTOML() (data []byte, err error) {
data = []byte(`"` + d.Duration.String() + `"`)
return
}
// Artifact is for .out-of-tree.toml
type Artifact struct {
Name string
Type ArtifactType
SourcePath string
SupportedKernels []KernelMask
Qemu struct {
Cpus int
Memory int
Timeout Duration
}
Mitigations struct {
DisableSmep bool
DisableSmap bool
DisableKaslr bool
DisableKpti bool
}
}
func (ka Artifact) checkSupport(ki KernelInfo, km KernelMask) (
supported bool, err error) {
if ki.DistroType != km.DistroType {
supported = false
return
}
// DistroRelease is optional
if km.DistroRelease != "" && ki.DistroRelease != km.DistroRelease {
supported = false
return
}
supported, err = regexp.MatchString(km.ReleaseMask, ki.KernelRelease)
return
}
// Supported returns true if given kernel is supported by artifact
func (ka Artifact) Supported(ki KernelInfo) (supported bool, err error) {
for _, km := range ka.SupportedKernels {
supported, err = ka.checkSupport(ki, km)
if supported {
break
}
}
return
}
// DistroType is enum with all supported distros
type DistroType int
const (
// Ubuntu https://ubuntu.com/
Ubuntu DistroType = iota
// CentOS https://www.centos.org/
CentOS
// Debian https://www.debian.org/
Debian
)
// DistroTypeStrings is the string version of enum DistroType
var DistroTypeStrings = [...]string{"Ubuntu", "CentOS", "Debian"}
// NewDistroType is create new Distro object
func NewDistroType(dType string) (dt DistroType, err error) {
err = dt.UnmarshalTOML([]byte(dType))
return
}
func (dt DistroType) String() string {
return DistroTypeStrings[dt]
}
// UnmarshalTOML is for support github.com/naoina/toml
func (dt *DistroType) UnmarshalTOML(data []byte) (err error) {
sDistro := strings.Trim(string(data), `"`)
if strings.EqualFold(sDistro, "Ubuntu") {
*dt = Ubuntu
} else if strings.EqualFold(sDistro, "CentOS") {
*dt = CentOS
} else if strings.EqualFold(sDistro, "Debian") {
*dt = Debian
} else {
err = fmt.Errorf("Distro %s is unsupported", sDistro)
}
return
}
// MarshalTOML is for support github.com/naoina/toml
func (dt DistroType) MarshalTOML() (data []byte, err error) {
s := ""
switch dt {
case Ubuntu:
s = "Ubuntu"
case CentOS:
s = "CentOS"
case Debian:
s = "Debian"
default:
err = fmt.Errorf("Cannot marshal %d", dt)
}
data = []byte(`"` + s + `"`)
return
}
// ByRootFS is sorting by .RootFS lexicographically
type ByRootFS []KernelInfo
func (a ByRootFS) Len() int { return len(a) }
func (a ByRootFS) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByRootFS) Less(i, j int) bool { return a[i].RootFS < a[j].RootFS }
// KernelInfo defines kernels.toml entries
type KernelInfo struct {
DistroType DistroType
DistroRelease string // 18.04/7.4.1708/9.1
// Must be *exactly* same as in `uname -r`
KernelRelease string
// Build-time information
KernelSource string // module/exploit will be build on host
ContainerName string
// Runtime information
KernelPath string
InitrdPath string
RootFS string
// Debug symbols
VmlinuxPath string
}
// KernelConfig is the ~/.out-of-tree/kernels.toml configuration description
type KernelConfig struct {
Kernels []KernelInfo
Kernels []distro.KernelInfo
}
func readFileAll(path string) (buf []byte, err error) {
@ -250,7 +25,7 @@ func readFileAll(path string) (buf []byte, err error) {
}
defer f.Close()
buf, err = ioutil.ReadAll(f)
buf, err = io.ReadAll(f)
return
}
@ -268,94 +43,3 @@ func ReadKernelConfig(path string) (kernelCfg KernelConfig, err error) {
return
}
func rangeRegexp(start, end int) (s string) {
s += "("
for i := start; i <= end; i++ {
s += strconv.Itoa(i)
if i != end {
s += "|"
}
}
s += ")"
return
}
func versionRegexp(l []int) (s string, err error) {
switch len(l) {
case 1:
s += strconv.Itoa(l[0])
case 2:
s += rangeRegexp(l[0], l[1])
default:
err = errors.New("version must contain one value or range")
return
}
return
}
func genReleaseMask(km kernel) (mask string, err error) {
s, err := versionRegexp(km.Version)
if err != nil {
return
}
mask += s + "[.]"
s, err = versionRegexp(km.Major)
if err != nil {
return
}
mask += s + "[.]"
s, err = versionRegexp(km.Minor)
if err != nil {
return
}
mask += s
switch len(km.Patch) {
case 0:
// ok
case 1:
mask += "-" + strconv.Itoa(km.Patch[0]) + "-"
case 2:
mask += "-" + rangeRegexp(km.Patch[0], km.Patch[1]) + "-"
default:
err = errors.New("version must contain one value or range")
return
}
mask += ".*"
return
}
// ReadArtifactConfig is for read .out-of-tree.toml
func ReadArtifactConfig(path string) (ka Artifact, err error) {
buf, err := readFileAll(path)
if err != nil {
return
}
err = toml.Unmarshal(buf, &ka)
if err != nil {
return
}
for i, _ := range ka.SupportedKernels {
km := &ka.SupportedKernels[i]
if len(km.Kernel.Version) != 0 && km.ReleaseMask != "" {
s := "Only one way to define kernel version is allowed"
err = errors.New(s)
return
}
if km.ReleaseMask == "" {
km.ReleaseMask, err = genReleaseMask(km.Kernel)
if err != nil {
return
}
}
}
return
}

View File

@ -1,65 +0,0 @@
// Copyright 2018 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package config
import (
"testing"
"github.com/naoina/toml"
)
func TestMarshalUnmarshal(t *testing.T) {
artifactCfg := Artifact{
Name: "Put name here",
Type: KernelModule,
}
artifactCfg.SupportedKernels = append(artifactCfg.SupportedKernels,
KernelMask{Ubuntu, "18.04", ".*", kernel{}})
buf, err := toml.Marshal(&artifactCfg)
if err != nil {
t.Fatal(err)
}
var artifactCfgNew Artifact
err = toml.Unmarshal(buf, &artifactCfgNew)
if err != nil {
t.Fatal(err)
}
}
func TestKernelRegex(t *testing.T) {
mask := "4[.]4[.]0-(1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20|21|22|23|24|25|26|27|28|29|30|31|32|33|34|35|36|37|38|39|40|41|42|43|44|45|46|47|48|49|50|51|52|53|54|55|56|57|58|59|60|61|62|63|64|65|66|67|68|69|70|71|72|73|74|75|76|77|78|79|80|81|82|83|84|85|86|87|88|89|90|91|92|93|94|95|96|97|98|99|100|101|102|103|104|105|106|107|108|109|110|111|112|113|114|115|116)-.*"
k := kernel{
Version: []int{4},
Major: []int{4},
Minor: []int{0},
Patch: []int{1, 116},
}
gmask, err := genReleaseMask(k)
if err != nil {
t.Fatal(err)
}
if mask != gmask {
t.Fatal("Got", gmask, "instead of", mask)
}
mask = "4[.]4[.]0.*"
k = kernel{
Version: []int{4},
Major: []int{4},
Minor: []int{0},
}
gmask, err = genReleaseMask(k)
if err != nil {
t.Fatal(err)
}
if mask != gmask {
t.Fatal("Got", gmask, "instead of", mask)
}
}

View File

@ -0,0 +1,47 @@
package dotfiles
import (
"os"
"os/user"
"path/filepath"
"github.com/rs/zerolog/log"
)
// Directory for config files
var Directory string
func directory() string {
if Directory != "" {
return Directory
}
usr, err := user.Current()
if err != nil {
log.Fatal().Err(err).Msg("get current user")
}
Directory = filepath.Join(usr.HomeDir, ".out-of-tree")
return Directory
}
// Dir that exist relative to config directory
func Dir(s ...string) (dir string) {
dir = filepath.Join(append([]string{directory()}, s...)...)
err := os.MkdirAll(dir, os.ModePerm)
if err != nil {
log.Fatal().Err(err).Msg("mkdir")
}
return
}
// File in existing dir relative to config directory
func File(s ...string) (file string) {
file = filepath.Join(append([]string{directory()}, s...)...)
err := os.MkdirAll(filepath.Dir(file), os.ModePerm)
if err != nil {
log.Fatal().Err(err).Msg("mkdir")
}
return
}

View File

@ -0,0 +1,113 @@
package dotfiles
import (
"os"
"path/filepath"
"testing"
)
func TestDirectory(t *testing.T) {
testdir := "test"
Directory = testdir
if directory() != testdir {
t.Fatalf("%s != %s", directory(), testdir)
}
}
func TestDir(t *testing.T) {
tmpdir, err := os.MkdirTemp("", "out-of-tree_")
if err != nil {
return
}
defer os.RemoveAll(tmpdir)
Directory = tmpdir
for _, testdir := range []string{"a", "a/b", "a/b/c"} {
expected := filepath.Join(tmpdir, testdir)
t.Log(testdir, "->", expected)
resdir := Dir(testdir)
if resdir != expected {
t.Fatalf("%s != %s", resdir, expected)
}
fi, err := os.Stat(expected)
if err != nil {
t.Fatal(err)
}
if !fi.IsDir() {
t.Fatal("not a directory")
}
}
testdir := []string{"a", "b", "c", "d"}
expected := filepath.Join(append([]string{tmpdir}, testdir...)...)
t.Log(testdir, "->", expected)
resdir := Dir(testdir...)
if resdir != expected {
t.Fatalf("%s != %s", resdir, expected)
}
fi, err := os.Stat(expected)
if err != nil {
t.Fatal(err)
}
if !fi.IsDir() {
t.Fatal("not a directory")
}
}
func TestFile(t *testing.T) {
tmpdir, err := os.MkdirTemp("", "out-of-tree_")
if err != nil {
return
}
defer os.RemoveAll(tmpdir)
Directory = tmpdir
for _, testfile := range []string{"a", "a/b", "a/b/c"} {
expected := filepath.Join(tmpdir, testfile)
t.Log(testfile, "->", expected)
resfile := File(testfile)
if resfile != expected {
t.Fatalf("%s != %s", resfile, expected)
}
_, err := os.Stat(expected)
if err == nil {
t.Fatal("should not exist")
}
fi, err := os.Stat(filepath.Dir(expected))
if err != nil {
t.Fatal(err)
}
if !fi.IsDir() {
t.Fatal("not a directory")
}
}
testfile := []string{"a", "b", "c"}
expected := filepath.Join(append([]string{tmpdir}, testfile...)...)
t.Log(testfile, "->", expected)
resdir := Dir(testfile...)
if resdir != expected {
t.Fatalf("%s != %s", resdir, expected)
}
fi, err := os.Stat(expected)
if err != nil {
t.Fatal(err)
}
if !fi.IsDir() {
t.Fatal("not a directory")
}
}

View File

@ -5,36 +5,66 @@
package config
import (
"os/user"
"errors"
"os"
"time"
"code.dumpstack.io/tools/out-of-tree/artifact"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
"code.dumpstack.io/tools/out-of-tree/distro"
"github.com/alecthomas/kong"
"github.com/mitchellh/go-homedir"
"github.com/naoina/toml"
)
type DockerCommand struct {
DistroType DistroType
Command string
}
type OutOfTree struct {
// Directory for all files if not explicitly specified
Directory string
Kernels string
UserKernels string
Database string
Qemu struct {
Timeout string
Timeout artifact.Duration
}
Docker struct {
Timeout string
Timeout artifact.Duration
Registry string
// Commands that will be executed before
// the base layer of Dockerfile
Commands []DockerCommand
Commands []distro.Command
}
}
func (c *OutOfTree) Decode(ctx *kong.DecodeContext) (err error) {
if ctx.Value.Set {
return
}
s, err := homedir.Expand(ctx.Scan.Pop().String())
if err != nil {
return
}
defaultValue, err := homedir.Expand(ctx.Value.Default)
if err != nil {
return
}
_, err = os.Stat(s)
if s != defaultValue && errors.Is(err, os.ErrNotExist) {
return errors.New("'" + s + "' does not exist")
}
*c, err = ReadOutOfTreeConf(s)
return
}
func ReadOutOfTreeConf(path string) (c OutOfTree, err error) {
buf, err := readFileAll(path)
if err == nil {
@ -48,29 +78,30 @@ func ReadOutOfTreeConf(path string) (c OutOfTree, err error) {
err = nil
}
usr, err := user.Current()
if err != nil {
return
if c.Directory != "" {
dotfiles.Directory = c.Directory
} else {
c.Directory = dotfiles.Dir("")
}
if c.Kernels == "" {
c.Kernels = usr.HomeDir + "/.out-of-tree/kernels.toml"
c.Kernels = dotfiles.File("kernels.toml")
}
if c.UserKernels == "" {
c.UserKernels = usr.HomeDir + "/.out-of-tree/kernels.user.toml"
c.UserKernels = dotfiles.File("kernels.user.toml")
}
if c.Database == "" {
c.Database = usr.HomeDir + "/.out-of-tree/db.sqlite"
c.Database = dotfiles.File("db.sqlite")
}
if c.Qemu.Timeout == "" {
c.Qemu.Timeout = "1m"
if c.Qemu.Timeout.Duration == 0 {
c.Qemu.Timeout.Duration = time.Minute
}
if c.Docker.Timeout == "" {
c.Docker.Timeout = "1m"
if c.Docker.Timeout.Duration == 0 {
c.Docker.Timeout.Duration = 8 * time.Minute
}
return

540
container/container.go Normal file
View File

@ -0,0 +1,540 @@
// Copyright 2023 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package container
import (
"bufio"
"errors"
"fmt"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
"code.dumpstack.io/tools/out-of-tree/distro"
)
var Runtime = "docker"
var Registry = ""
var Timeout time.Duration
var Commands []distro.Command
var UseCache = true
var Prune = true
type Image struct {
Name string
Distro distro.Distro
}
func Images() (diis []Image, err error) {
cmd := exec.Command(Runtime, "images")
log.Debug().Msgf("%v", cmd)
rawOutput, err := cmd.CombinedOutput()
if err != nil {
return
}
r, err := regexp.Compile("out_of_tree_.*")
if err != nil {
return
}
containers := r.FindAll(rawOutput, -1)
for _, c := range containers {
containerName := strings.Fields(string(c))[0]
s := strings.Replace(containerName, "__", ".", -1)
values := strings.Split(s, "_")
distroName, ver := values[3], values[4]
dii := Image{
Name: containerName,
}
dii.Distro.Release = ver
dii.Distro.ID, err = distro.NewID(distroName)
if err != nil {
return
}
diis = append(diis, dii)
}
return
}
func Load(localpath string, name string) (err error) {
exist := Container{name: name}.Exist()
if exist && UseCache {
return
}
cmd := exec.Command(Runtime, "load", "-i", localpath)
log.Debug().Msgf("%v", cmd)
raw, err := cmd.CombinedOutput()
if err != nil {
log.Debug().Err(err).Msg(string(raw))
return
}
cmd = exec.Command(Runtime, "tag", "localhost/"+name, name)
log.Debug().Msgf("%v", cmd)
raw, err = cmd.CombinedOutput()
if err != nil {
log.Debug().Err(err).Msg(string(raw))
return
}
return
}
func Import(path, name string) (err error) {
exist := Container{name: name}.Exist()
if exist && UseCache {
return
}
cmd := exec.Command(Runtime, "import", path, name)
log.Debug().Msgf("%v", cmd)
raw, err := cmd.CombinedOutput()
if err != nil {
log.Debug().Err(err).Msg(string(raw))
return
}
return
}
func Save(name, path string) (err error) {
exist := Container{name: name}.Exist()
if !exist {
err = errors.New("container does not exist")
log.Error().Err(err).Msg("")
return
}
cmd := exec.Command(Runtime, "save", name, "-o", path)
log.Debug().Msgf("%v", cmd)
raw, err := cmd.CombinedOutput()
if err != nil {
log.Error().Err(err).Msg(string(raw))
return
}
return
}
type Volume struct {
Src, Dest string
}
type Container struct {
name string
dist distro.Distro
Volumes []Volume
// Additional arguments
Args []string
Log zerolog.Logger
}
func New(dist distro.Distro) (c Container, err error) {
distro := strings.ToLower(dist.ID.String())
release := strings.Replace(dist.Release, ".", "__", -1)
c.name = fmt.Sprintf("out_of_tree_%s_%s", distro, release)
c.Log = log.With().
Str("container", c.name).
Logger()
c.dist = dist
c.Volumes = append(c.Volumes, Volume{
Src: dotfiles.Dir("volumes", c.name, "lib", "modules"),
Dest: "/lib/modules",
})
c.Volumes = append(c.Volumes, Volume{
Src: dotfiles.Dir("volumes", c.name, "usr", "src"),
Dest: "/usr/src",
})
c.Volumes = append(c.Volumes, Volume{
Src: dotfiles.Dir("volumes", c.name, "boot"),
Dest: "/boot",
})
return
}
func NewFromKernelInfo(ki distro.KernelInfo) (
c Container, err error) {
c.name = ki.ContainerName
c.Log = log.With().
Str("container", c.name).
Logger()
c.Volumes = append(c.Volumes, Volume{
Src: path.Dir(ki.ModulesPath),
Dest: "/lib/modules",
})
c.Volumes = append(c.Volumes, Volume{
Src: filepath.Join(path.Dir(ki.KernelPath), "../usr/src"),
Dest: "/usr/src",
})
c.Volumes = append(c.Volumes, Volume{
Src: path.Dir(ki.KernelPath),
Dest: "/boot",
})
return
}
func (c Container) Name() string {
return c.name
}
func (c Container) Exist() (yes bool) {
cmd := exec.Command(Runtime, "images", "-q", c.name)
c.Log.Debug().Msgf("run %v", cmd)
raw, err := cmd.CombinedOutput()
if err != nil {
c.Log.Error().Err(err).Msg(string(raw))
return false
}
yes = string(raw) != ""
if yes {
c.Log.Debug().Msg("exist")
} else {
c.Log.Debug().Msg("does not exist")
}
return
}
func (c Container) Build(image string, envs, runs []string) (err error) {
cdir := dotfiles.Dir("containers", c.name)
cfile := filepath.Join(cdir, "Dockerfile")
cf := "FROM "
if Registry != "" {
cf += Registry + "/"
}
cf += image + "\n"
for _, c := range Commands {
// TODO check for distro type
cf += "RUN " + c.Command + "\n"
}
for _, e := range envs {
cf += "ENV " + e + "\n"
}
for _, c := range runs {
cf += "RUN " + c + "\n"
}
buf, err := os.ReadFile(cfile)
if err != nil {
err = os.WriteFile(cfile, []byte(cf), os.ModePerm)
if err != nil {
return
}
}
if string(buf) == cf && c.Exist() && UseCache {
return
}
err = os.WriteFile(cfile, []byte(cf), os.ModePerm)
if err != nil {
return
}
if c.Exist() {
c.Log.Info().Msg("update")
} else {
c.Log.Info().Msg("build")
}
output, err := c.build(cdir)
if err != nil {
c.Log.Error().Err(err).Msg(output)
return
}
c.Log.Info().Msg("success")
return
}
func (c Container) prune() error {
c.Log.Debug().Msg("remove dangling or unused images from local storage")
return exec.Command(Runtime, "image", "prune", "-f").Run()
}
func (c Container) build(imagePath string) (output string, err error) {
if Prune {
defer c.prune()
}
args := []string{"build"}
if !UseCache {
args = append(args, "--pull", "--no-cache")
}
args = append(args, "-t", c.name, imagePath)
cmd := exec.Command(Runtime, args...)
flog := c.Log.With().
Str("command", fmt.Sprintf("%v", cmd)).
Logger()
stdout, err := cmd.StdoutPipe()
if err != nil {
return
}
cmd.Stderr = cmd.Stdout
err = cmd.Start()
if err != nil {
return
}
go func() {
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
m := scanner.Text()
output += m + "\n"
flog.Trace().Str("stdout", m).Msg("")
}
}()
err = cmd.Wait()
return
}
func (c Container) Run(workdir string, cmds []string) (out string, err error) {
flog := c.Log.With().
Str("workdir", workdir).
Str("command", fmt.Sprintf("%v", cmds)).
Logger()
var args []string
args = append(args, "run", "--rm")
args = append(args, c.Args...)
if workdir != "" {
args = append(args, "-v", workdir+":/work")
}
for _, volume := range c.Volumes {
mount := fmt.Sprintf("%s:%s", volume.Src, volume.Dest)
args = append(args, "-v", mount)
}
command := "true"
for _, c := range cmds {
command += fmt.Sprintf(" && %s", c)
}
args = append(args, c.name, "bash", "-c")
if workdir != "" {
args = append(args, "cd /work && "+command)
} else {
args = append(args, command)
}
cmd := exec.Command(Runtime, args...)
flog.Debug().Msgf("%v", cmd)
stdout, err := cmd.StdoutPipe()
if err != nil {
return
}
cmd.Stderr = cmd.Stdout
if Timeout != 0 {
timer := time.AfterFunc(Timeout, func() {
flog.Info().Msg("killing container by timeout")
flog.Debug().Msg("SIGINT")
cmd.Process.Signal(os.Interrupt)
time.Sleep(time.Minute)
flog.Debug().Msg("SIGKILL")
cmd.Process.Kill()
})
defer timer.Stop()
}
err = cmd.Start()
if err != nil {
return
}
go func() {
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
m := scanner.Text()
out += m + "\n"
flog.Trace().Str("stdout", m).Msg("")
}
}()
err = cmd.Wait()
if err != nil {
e := fmt.Sprintf("error `%v` for cmd `%v` with output `%v`",
err, cmds, out)
err = errors.New(e)
return
}
return
}
func FindKernel(entries []os.DirEntry, kname string) (name string, err error) {
for _, e := range entries {
var fi os.FileInfo
fi, err = e.Info()
if err != nil {
return
}
if strings.HasPrefix(fi.Name(), "vmlinuz") {
if strings.Contains(fi.Name(), kname) {
name = fi.Name()
return
}
}
}
err = errors.New("cannot find kernel")
return
}
func FindInitrd(entries []os.DirEntry, kname string) (name string, err error) {
for _, e := range entries {
var fi os.FileInfo
fi, err = e.Info()
if err != nil {
return
}
if strings.HasPrefix(fi.Name(), "initrd") ||
strings.HasPrefix(fi.Name(), "initramfs") {
if strings.Contains(fi.Name(), kname) {
name = fi.Name()
return
}
}
}
err = errors.New("cannot find kernel")
return
}
func (c Container) Kernels() (kernels []distro.KernelInfo, err error) {
if !c.Exist() {
return
}
var libmodules, boot string
for _, volume := range c.Volumes {
switch volume.Dest {
case "/lib/modules":
libmodules = volume.Src
case "/boot":
boot = volume.Src
}
}
moddirs, err := os.ReadDir(libmodules)
if err != nil {
return
}
bootfiles, err := os.ReadDir(boot)
if err != nil {
return
}
for _, e := range moddirs {
var krel os.FileInfo
krel, err = e.Info()
if err != nil {
return
}
c.Log.Debug().Msgf("generate config entry for %s", krel.Name())
var kernelFile, initrdFile string
kernelFile, err = FindKernel(bootfiles, krel.Name())
if err != nil {
c.Log.Warn().Msgf("cannot find kernel %s", krel.Name())
continue
}
initrdFile, err = FindInitrd(bootfiles, krel.Name())
if err != nil {
c.Log.Warn().Msgf("cannot find initrd %s", krel.Name())
continue
}
ki := distro.KernelInfo{
Distro: c.dist,
KernelVersion: krel.Name(),
KernelRelease: krel.Name(),
ContainerName: c.name,
KernelPath: filepath.Join(boot, kernelFile),
InitrdPath: filepath.Join(boot, initrdFile),
ModulesPath: filepath.Join(libmodules, krel.Name()),
RootFS: dotfiles.File("images", c.dist.RootFS()),
}
kernels = append(kernels, ki)
}
for _, cmd := range []string{
"find /boot -type f -exec chmod a+r {} \\;",
} {
_, err = c.Run(dotfiles.Dir("tmp"), []string{cmd})
if err != nil {
return
}
}
return
}

302
daemon/commands.go Normal file
View File

@ -0,0 +1,302 @@
package daemon
import (
"database/sql"
"errors"
"fmt"
"io"
"net"
"os"
"os/exec"
"path/filepath"
"sync"
"time"
"github.com/davecgh/go-spew/spew"
"github.com/google/uuid"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/api"
"code.dumpstack.io/tools/out-of-tree/config"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
"code.dumpstack.io/tools/out-of-tree/daemon/db"
)
type cmdenv struct {
Conn net.Conn
Log zerolog.Logger
DB *sql.DB
WG *sync.WaitGroup
KernelConfig string
}
func command(req *api.Req, resp *api.Resp, e cmdenv) (err error) {
e.Log.Trace().Msgf("%v", spew.Sdump(req))
defer e.Log.Trace().Msgf("%v", spew.Sdump(resp))
e.WG.Add(1)
defer e.WG.Done()
e.Log.Debug().Msgf("%v", req.Command)
switch req.Command {
case api.RawMode:
err = rawMode(req, e)
case api.AddJob:
err = addJob(req, resp, e)
case api.ListJobs:
err = listJobs(req, resp, e)
case api.AddRepo:
err = addRepo(req, resp, e)
case api.ListRepos:
err = listRepos(resp, e)
case api.Kernels:
err = kernels(resp, e)
case api.JobStatus:
err = jobStatus(req, resp, e)
case api.JobLogs:
err = jobLogs(req, resp, e)
default:
err = errors.New("unknown command")
}
resp.Err = err
return
}
type logWriter struct {
log zerolog.Logger
}
func (lw logWriter) Write(p []byte) (n int, err error) {
n = len(p)
//lw.log.Trace().Msgf("%v", strconv.Quote(string(p)))
return
}
func rawMode(req *api.Req, e cmdenv) (err error) {
uuid := uuid.New().String()
lwsend := logWriter{log.With().Str("uuid", uuid).Str("git", "send").Logger()}
lwrecv := logWriter{log.With().Str("uuid", uuid).Str("git", "recv").Logger()}
conn, err := net.Dial("tcp", ":9418")
if err != nil {
log.Error().Err(err).Msg("dial")
return
}
go io.Copy(e.Conn, io.TeeReader(conn, lwrecv))
io.Copy(conn, io.TeeReader(e.Conn, lwsend))
return
}
func listJobs(req *api.Req, resp *api.Resp, e cmdenv) (err error) {
var params api.ListJobsParams
err = req.GetData(&params)
if err != nil {
return
}
jobs, err := db.Jobs(e.DB, "updated >= ?", params.UpdatedAfter)
if err != nil {
return
}
var result []api.Job
for _, j := range jobs {
if params.Group != "" && j.Group != params.Group {
continue
}
if params.Repo != "" && j.RepoName != params.Repo {
continue
}
if params.Commit != "" && j.Commit != params.Commit {
continue
}
if params.Status != "" && j.Status != params.Status {
continue
}
result = append(result, j)
}
resp.SetData(&result)
return
}
func addJob(req *api.Req, resp *api.Resp, e cmdenv) (err error) {
var job api.Job
err = req.GetData(&job)
if err != nil {
return
}
job.GenUUID()
job.Created = time.Now()
var repos []api.Repo
repos, err = db.Repos(e.DB)
if err != nil {
return
}
var found bool
for _, r := range repos {
if job.RepoName == r.Name {
found = true
}
}
if !found {
err = errors.New("repo does not exist")
return
}
if job.RepoName == "" {
err = errors.New("repo name cannot be empty")
return
}
if job.Commit == "" {
err = errors.New("invalid commit")
return
}
err = db.AddJob(e.DB, &job)
if err != nil {
return
}
resp.SetData(&job.UUID)
return
}
func listRepos(resp *api.Resp, e cmdenv) (err error) {
repos, err := db.Repos(e.DB)
if err != nil {
e.Log.Error().Err(err).Msg("")
return
}
for i := range repos {
repos[i].Path = dotfiles.Dir("daemon/repos",
repos[i].Name)
}
log.Trace().Msgf("%v", spew.Sdump(repos))
resp.SetData(&repos)
return
}
func addRepo(req *api.Req, resp *api.Resp, e cmdenv) (err error) {
var repo api.Repo
err = req.GetData(&repo)
if err != nil {
return
}
var repos []api.Repo
repos, err = db.Repos(e.DB)
if err != nil {
return
}
for _, r := range repos {
log.Debug().Msgf("%v, %v", r, repo.Name)
if repo.Name == r.Name {
err = fmt.Errorf("repo already exist")
return
}
}
cmd := exec.Command("git", "init", "--bare")
cmd.Dir = dotfiles.Dir("daemon/repos", repo.Name)
var out []byte
out, err = cmd.Output()
e.Log.Debug().Msgf("%v -> %v\n%v", cmd, err, string(out))
if err != nil {
return
}
err = db.AddRepo(e.DB, &repo)
return
}
func kernels(resp *api.Resp, e cmdenv) (err error) {
kcfg, err := config.ReadKernelConfig(e.KernelConfig)
if err != nil {
e.Log.Error().Err(err).Msg("read kernels config")
return
}
e.Log.Info().Msgf("send back %d kernels", len(kcfg.Kernels))
resp.SetData(&kcfg.Kernels)
return
}
func jobLogs(req *api.Req, resp *api.Resp, e cmdenv) (err error) {
var uuid string
err = req.GetData(&uuid)
if err != nil {
return
}
logdir := filepath.Join(dotfiles.File("daemon/logs"), uuid)
if _, err = os.Stat(logdir); err != nil {
return
}
files, err := os.ReadDir(logdir)
if err != nil {
return
}
var logs []api.JobLog
for _, f := range files {
if f.IsDir() {
continue
}
logfile := filepath.Join(logdir, f.Name())
var buf []byte
buf, err = os.ReadFile(logfile)
if err != nil {
return
}
logs = append(logs, api.JobLog{
Name: f.Name(),
Text: string(buf),
})
}
resp.SetData(&logs)
return
}
func jobStatus(req *api.Req, resp *api.Resp, e cmdenv) (err error) {
var uuid string
err = req.GetData(&uuid)
if err != nil {
return
}
st, err := db.JobStatus(e.DB, uuid)
if err != nil {
return
}
resp.SetData(&st)
return
}

247
daemon/daemon.go Normal file
View File

@ -0,0 +1,247 @@
package daemon
import (
"crypto/tls"
"database/sql"
"io"
"net"
"os/exec"
"runtime"
"sync"
"time"
"github.com/remeh/sizedwaitgroup"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/api"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
"code.dumpstack.io/tools/out-of-tree/daemon/db"
"code.dumpstack.io/tools/out-of-tree/fs"
)
type Daemon struct {
Threads int
Resources *Resources
db *sql.DB
kernelConfig string
shutdown bool
wg sync.WaitGroup
}
func Init(kernelConfig string) (d *Daemon, err error) {
d = &Daemon{}
d.Threads = runtime.NumCPU()
d.Resources = NewResources()
d.kernelConfig = kernelConfig
d.wg.Add(1) // matches with db.Close()
d.db, err = db.OpenDatabase(dotfiles.File("daemon/daemon.db"))
if err != nil {
log.Error().Err(err).Msg("cannot open daemon.db")
}
log.Info().Msgf("database %s", dotfiles.File("daemon/daemon.db"))
return
}
func (d *Daemon) Kill() {
d.shutdown = true
d.db.Close()
d.wg.Done()
}
func (d *Daemon) Daemon() {
if d.db == nil {
log.Fatal().Msg("db is not initialized")
}
swg := sizedwaitgroup.New(d.Threads)
log.Info().Int("threads", d.Threads).Msg("start")
first := true
for !d.shutdown {
d.wg.Add(1)
jobs, err := db.Jobs(d.db, "")
if err != nil && !d.shutdown {
log.Error().Err(err).Msg("")
d.wg.Done()
time.Sleep(time.Minute)
continue
}
for _, job := range jobs {
if d.shutdown {
break
}
pj := newJobProcessor(job, d.db)
if first && job.Status == api.StatusRunning {
pj.SetStatus(api.StatusWaiting)
continue
}
if job.Status == api.StatusNew {
pj.SetStatus(api.StatusWaiting)
continue
}
if job.Status != api.StatusWaiting {
continue
}
swg.Add()
go func(pj jobProcessor) {
defer swg.Done()
pj.Process(d.Resources)
time.Sleep(time.Second)
}(pj)
}
first = false
d.wg.Done()
time.Sleep(time.Second)
}
swg.Wait()
}
func handler(conn net.Conn, e cmdenv) {
defer conn.Close()
resp := api.NewResp()
e.Log = log.With().
Str("resp_uuid", resp.UUID).
Str("remote_addr", conn.RemoteAddr().String()).
Logger()
e.Log.Info().Msg("")
var req api.Req
defer func() {
if req.Command != api.RawMode {
resp.Encode(conn)
} else {
log.Debug().Msg("raw mode, not encode response")
}
}()
err := req.Decode(conn)
if err != nil {
e.Log.Error().Err(err).Msg("cannot decode")
return
}
err = command(&req, &resp, e)
if err != nil {
e.Log.Error().Err(err).Msg("")
return
}
}
func (d *Daemon) Listen(addr string) {
if d.db == nil {
log.Fatal().Msg("db is not initialized")
}
go func() {
repodir := dotfiles.Dir("daemon/repos")
git := exec.Command("git", "daemon", "--port=9418", "--verbose",
"--reuseaddr",
"--export-all", "--base-path="+repodir,
"--enable=receive-pack",
"--enable=upload-pack",
repodir)
stdout, err := git.StdoutPipe()
if err != nil {
log.Fatal().Err(err).Msgf("%v", git)
return
}
go io.Copy(logWriter{log: log.Logger}, stdout)
stderr, err := git.StderrPipe()
if err != nil {
log.Fatal().Err(err).Msgf("%v", git)
return
}
go io.Copy(logWriter{log: log.Logger}, stderr)
log.Debug().Msgf("start %v", git)
git.Start()
defer func() {
log.Debug().Msgf("stop %v", git)
}()
err = git.Wait()
if err != nil {
log.Fatal().Err(err).Msgf("%v", git)
return
}
}()
if !fs.PathExists(dotfiles.File("daemon/cert.pem")) {
log.Info().Msg("No cert.pem, generating...")
cmd := exec.Command("openssl",
"req", "-batch", "-newkey", "rsa:2048",
"-new", "-nodes", "-x509",
"-subj", "/CN=*",
"-addext", "subjectAltName = DNS:*",
"-out", dotfiles.File("daemon/cert.pem"),
"-keyout", dotfiles.File("daemon/key.pem"))
out, err := cmd.Output()
if err != nil {
log.Error().Err(err).Msg(string(out))
return
}
}
log.Info().Msg("copy to client:")
log.Info().Msgf("cert: %s, key: %s",
dotfiles.File("daemon/cert.pem"),
dotfiles.File("daemon/key.pem"))
cert, err := tls.LoadX509KeyPair(dotfiles.File("daemon/cert.pem"),
dotfiles.File("daemon/key.pem"))
if err != nil {
log.Fatal().Err(err).Msg("LoadX509KeyPair")
}
tlscfg := &tls.Config{Certificates: []tls.Certificate{cert}}
l, err := tls.Listen("tcp", addr, tlscfg)
if err != nil {
log.Fatal().Err(err).Msg("listen")
}
log.Info().Str("addr", ":9418").Msg("git")
log.Info().Str("addr", addr).Msg("daemon")
for {
conn, err := l.Accept()
if err != nil {
log.Fatal().Err(err).Msg("accept")
}
log.Info().Msgf("accept %s", conn.RemoteAddr())
e := cmdenv{
DB: d.db,
WG: &d.wg,
Conn: conn,
KernelConfig: d.kernelConfig,
}
go handler(conn, e)
}
}

15
daemon/daemon_test.go Normal file
View File

@ -0,0 +1,15 @@
package daemon
import (
"os"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
)
func init() {
log.Logger = zerolog.New(zerolog.ConsoleWriter{
Out: os.Stdout,
NoColor: true,
})
}

123
daemon/db/db.go Normal file
View File

@ -0,0 +1,123 @@
package db
import (
"database/sql"
"fmt"
"strconv"
_ "github.com/mattn/go-sqlite3"
)
// Change on ANY database update
const currentDatabaseVersion = 1
const versionField = "db_version"
func createMetadataTable(db *sql.DB) (err error) {
_, err = db.Exec(`
CREATE TABLE IF NOT EXISTS metadata (
id INTEGER PRIMARY KEY,
key TEXT UNIQUE,
value TEXT
)`)
return
}
func metaChkValue(db *sql.DB, key string) (exist bool, err error) {
sql := "SELECT EXISTS(SELECT id FROM metadata WHERE key = $1)"
stmt, err := db.Prepare(sql)
if err != nil {
return
}
defer stmt.Close()
err = stmt.QueryRow(key).Scan(&exist)
return
}
func metaGetValue(db *sql.DB, key string) (value string, err error) {
stmt, err := db.Prepare("SELECT value FROM metadata " +
"WHERE key = $1")
if err != nil {
return
}
defer stmt.Close()
err = stmt.QueryRow(key).Scan(&value)
return
}
func metaSetValue(db *sql.DB, key, value string) (err error) {
stmt, err := db.Prepare("INSERT OR REPLACE INTO metadata " +
"(key, value) VALUES ($1, $2)")
if err != nil {
return
}
defer stmt.Close()
_, err = stmt.Exec(key, value)
return
}
func getVersion(db *sql.DB) (version int, err error) {
s, err := metaGetValue(db, versionField)
if err != nil {
return
}
version, err = strconv.Atoi(s)
return
}
func createSchema(db *sql.DB) (err error) {
err = createMetadataTable(db)
if err != nil {
return
}
err = createJobTable(db)
if err != nil {
return
}
err = createRepoTable(db)
if err != nil {
return
}
return
}
func OpenDatabase(path string) (db *sql.DB, err error) {
db, err = sql.Open("sqlite3", path)
if err != nil {
return
}
db.SetMaxOpenConns(1)
exists, _ := metaChkValue(db, versionField)
if !exists {
err = createSchema(db)
if err != nil {
return
}
err = metaSetValue(db, versionField,
strconv.Itoa(currentDatabaseVersion))
return
}
version, err := getVersion(db)
if err != nil {
return
}
if version != currentDatabaseVersion {
err = fmt.Errorf("database is not supported (%d instead of %d)",
version, currentDatabaseVersion)
return
}
return
}

31
daemon/db/db_test.go Normal file
View File

@ -0,0 +1,31 @@
package db
import (
"database/sql"
"os"
"testing"
"github.com/stretchr/testify/assert"
)
func tmpdb(t *testing.T) (file *os.File, db *sql.DB) {
file, err := os.CreateTemp("", "temp-sqlite.db")
assert.Nil(t, err)
// defer os.Remove(file.Name())
db, err = OpenDatabase(file.Name())
assert.Nil(t, err)
// defer db.Close()
return
}
func TestOpenDatabase(t *testing.T) {
file, db := tmpdb(t)
defer os.Remove(file.Name())
db.Close()
db, err := OpenDatabase(file.Name())
assert.Nil(t, err)
db.Close()
}

193
daemon/db/job.go Normal file
View File

@ -0,0 +1,193 @@
package db
import (
"bytes"
"database/sql"
"encoding/gob"
"time"
"code.dumpstack.io/tools/out-of-tree/api"
)
func createJobTable(db *sql.DB) (err error) {
_, err = db.Exec(`
CREATE TABLE IF NOT EXISTS job (
id INTEGER PRIMARY KEY,
updated INT,
uuid TEXT,
group_uuid TEXT,
repo TEXT,
"commit" TEXT,
description TEXT,
config TEXT,
target TEXT,
created INT,
started INT,
finished INT,
status TEXT DEFAULT "new"
)`)
return
}
func AddJob(db *sql.DB, job *api.Job) (err error) {
stmt, err := db.Prepare(`INSERT INTO job (updated, uuid, group_uuid, repo, "commit", ` +
`description, config, target, created, started, finished) ` +
`VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11);`)
if err != nil {
return
}
defer stmt.Close()
var abuf bytes.Buffer
err = gob.NewEncoder(&abuf).Encode(job.Artifact)
if err != nil {
return
}
config := abuf.Bytes()
var tbuf bytes.Buffer
err = gob.NewEncoder(&tbuf).Encode(job.Target)
if err != nil {
return
}
target := tbuf.Bytes()
res, err := stmt.Exec(time.Now().Unix(), job.UUID, job.Group,
job.RepoName, job.Commit, job.Description, config, target,
job.Created.Unix(), job.Started.Unix(),
job.Finished.Unix(),
)
if err != nil {
return
}
job.ID, err = res.LastInsertId()
return
}
func UpdateJob(db *sql.DB, job *api.Job) (err error) {
stmt, err := db.Prepare(`UPDATE job ` +
`SET updated=$1, uuid=$2, group_uuid=$3, repo=$4, ` +
`"commit"=$5, description=$6, config=$7, target=$8, ` +
`created=$9, started=$10, finished=$11, ` +
`status=$12 ` +
`WHERE id=$13`)
if err != nil {
return
}
defer stmt.Close()
var abuf bytes.Buffer
err = gob.NewEncoder(&abuf).Encode(job.Artifact)
if err != nil {
return
}
config := abuf.Bytes()
var tbuf bytes.Buffer
err = gob.NewEncoder(&tbuf).Encode(job.Target)
if err != nil {
return
}
target := tbuf.Bytes()
_, err = stmt.Exec(time.Now().Unix(), job.UUID, job.Group,
job.RepoName, job.Commit, job.Description,
config, target,
job.Created.Unix(), job.Started.Unix(),
job.Finished.Unix(), job.Status, job.ID)
return
}
func scanJob(scan func(dest ...any) error) (job api.Job, err error) {
var config, target []byte
var updated, created, started, finished int64
err = scan(&job.ID, &updated, &job.UUID, &job.Group,
&job.RepoName, &job.Commit, &job.Description,
&config, &target,
&created, &started, &finished, &job.Status)
if err != nil {
return
}
abuf := bytes.NewBuffer(config)
err = gob.NewDecoder(abuf).Decode(&job.Artifact)
if err != nil {
return
}
tbuf := bytes.NewBuffer(target)
err = gob.NewDecoder(tbuf).Decode(&job.Target)
if err != nil {
return
}
job.UpdatedAt = time.Unix(updated, 0)
job.Created = time.Unix(created, 0)
job.Started = time.Unix(started, 0)
job.Finished = time.Unix(finished, 0)
return
}
func Jobs(db *sql.DB, where string, args ...any) (jobs []api.Job, err error) {
q := `SELECT id, updated, uuid, group_uuid, ` +
`repo, "commit", description, config, target, created, ` +
`started, finished, status FROM job`
if len(where) != 0 {
q += ` WHERE ` + where
}
stmt, err := db.Prepare(q)
if err != nil {
return
}
defer stmt.Close()
rows, err := stmt.Query(args...)
if err != nil {
return
}
defer rows.Close()
for rows.Next() {
var job api.Job
job, err = scanJob(rows.Scan)
if err != nil {
return
}
jobs = append(jobs, job)
}
return
}
func Job(db *sql.DB, uuid string) (job api.Job, err error) {
stmt, err := db.Prepare(`SELECT id, updated, uuid, ` +
`group_uuid, ` +
`repo, "commit", description, config, target, ` +
`created, started, finished, status ` +
`FROM job WHERE uuid=$1`)
if err != nil {
return
}
defer stmt.Close()
return scanJob(stmt.QueryRow(uuid).Scan)
}
func JobStatus(db *sql.DB, uuid string) (st api.Status, err error) {
stmt, err := db.Prepare(`SELECT status FROM job ` +
`WHERE uuid=$1`)
if err != nil {
return
}
defer stmt.Close()
err = stmt.QueryRow(uuid).Scan(&st)
if err != nil {
return
}
return
}

50
daemon/db/job_test.go Normal file
View File

@ -0,0 +1,50 @@
package db
import (
"os"
"testing"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"code.dumpstack.io/tools/out-of-tree/api"
)
func TestJobTable(t *testing.T) {
file, db := tmpdb(t)
defer os.Remove(file.Name())
defer db.Close()
job := api.Job{
RepoName: "testname",
Commit: "test",
Group: uuid.New().String(),
}
err := AddJob(db, &job)
assert.Nil(t, err)
job.Group = uuid.New().String()
job.Status = api.StatusSuccess
err = UpdateJob(db, &job)
assert.Nil(t, err)
jobs, err := Jobs(db, "")
assert.Nil(t, err)
assert.Equal(t, 1, len(jobs))
assert.Equal(t, job.Group, jobs[0].Group)
job, err = Job(db, job.UUID)
assert.Nil(t, err)
assert.Equal(t, api.StatusSuccess, job.Status)
st, err := JobStatus(db, job.UUID)
assert.Nil(t, err)
assert.Equal(t, job.Status, st)
}

61
daemon/db/repo.go Normal file
View File

@ -0,0 +1,61 @@
package db
import (
"database/sql"
"code.dumpstack.io/tools/out-of-tree/api"
)
func createRepoTable(db *sql.DB) (err error) {
_, err = db.Exec(`
CREATE TABLE IF NOT EXISTS repo (
id INTEGER PRIMARY KEY,
name TEXT UNIQUE
)`)
return
}
func AddRepo(db *sql.DB, repo *api.Repo) (err error) {
stmt, err := db.Prepare(`INSERT INTO repo (name) ` +
`VALUES ($1);`)
if err != nil {
return
}
defer stmt.Close()
res, err := stmt.Exec(repo.Name)
if err != nil {
return
}
repo.ID, err = res.LastInsertId()
return
}
func Repos(db *sql.DB) (repos []api.Repo, err error) {
stmt, err := db.Prepare(`SELECT id, name FROM repo`)
if err != nil {
return
}
defer stmt.Close()
rows, err := stmt.Query()
if err != nil {
return
}
defer rows.Close()
for rows.Next() {
var repo api.Repo
err = rows.Scan(&repo.ID, &repo.Name)
if err != nil {
return
}
repos = append(repos, repo)
}
return
}

46
daemon/db/repo_test.go Normal file
View File

@ -0,0 +1,46 @@
package db
import (
"database/sql"
"os"
"testing"
"github.com/stretchr/testify/assert"
"code.dumpstack.io/tools/out-of-tree/api"
)
func testCreateRepoTable(t *testing.T) (file *os.File, db *sql.DB) {
file, err := os.CreateTemp("", "temp-sqlite.db")
assert.Nil(t, err)
// defer os.Remove(tempDB.Name())
db, err = sql.Open("sqlite3", file.Name())
assert.Nil(t, err)
// defer db.Close()
db.SetMaxOpenConns(1)
err = createRepoTable(db)
assert.Nil(t, err)
return
}
func TestRepoTable(t *testing.T) {
file, db := testCreateRepoTable(t)
defer db.Close()
defer os.Remove(file.Name())
repo := api.Repo{Name: "testname"}
err := AddRepo(db, &repo)
assert.Nil(t, err)
repos, err := Repos(db)
assert.Nil(t, err)
assert.Equal(t, 1, len(repos))
assert.Equal(t, repo, repos[0])
}

177
daemon/process.go Normal file
View File

@ -0,0 +1,177 @@
package daemon
import (
"database/sql"
"errors"
"fmt"
"os"
"os/exec"
"path/filepath"
"time"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/api"
"code.dumpstack.io/tools/out-of-tree/artifact"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
"code.dumpstack.io/tools/out-of-tree/daemon/db"
"code.dumpstack.io/tools/out-of-tree/distro"
"code.dumpstack.io/tools/out-of-tree/qemu"
)
type jobProcessor struct {
job api.Job
log zerolog.Logger
db *sql.DB
}
func newJobProcessor(job api.Job, db *sql.DB) (pj jobProcessor) {
pj.job = job
pj.db = db
pj.log = log.With().
Str("uuid", job.UUID).
Str("group", job.Group).
Logger()
return
}
func (pj jobProcessor) Update() (err error) {
err = db.UpdateJob(pj.db, &pj.job)
if err != nil {
pj.log.Error().Err(err).Msgf("update job %v", pj.job)
}
return
}
func (pj jobProcessor) SetStatus(status api.Status) (err error) {
pj.log.Info().Msgf(`%v -> %v`, pj.job.Status, status)
pj.job.Status = status
err = pj.Update()
return
}
func (pj *jobProcessor) Process(res *Resources) (err error) {
if pj.job.Status != api.StatusWaiting {
err = errors.New("job is not available to process")
return
}
if pj.job.Artifact.Qemu.Cpus == 0 {
pj.job.Artifact.Qemu.Cpus = qemu.DefaultCPUs
}
if pj.job.Artifact.Qemu.Memory == 0 {
pj.job.Artifact.Qemu.Memory = qemu.DefaultMemory
}
err = res.Allocate(pj.job)
if err != nil {
return
}
defer func() {
res.Release(pj.job)
}()
log.Info().Msgf("process job %v", pj.job.UUID)
pj.SetStatus(api.StatusRunning)
pj.job.Started = time.Now()
defer func() {
pj.job.Finished = time.Now()
if err != nil {
pj.SetStatus(api.StatusFailure)
} else {
pj.SetStatus(api.StatusSuccess)
}
}()
var tmp string
tmp, err = os.MkdirTemp(dotfiles.Dir("tmp"), "")
if err != nil {
pj.log.Error().Err(err).Msg("mktemp")
return
}
defer os.RemoveAll(tmp)
tmprepo := filepath.Join(tmp, "repo")
pj.log.Debug().Msgf("temp repo: %v", tmprepo)
remote := fmt.Sprintf("git://localhost:9418/%s", pj.job.RepoName)
pj.log.Debug().Msgf("remote: %v", remote)
var raw []byte
cmd := exec.Command("git", "clone", remote, tmprepo)
raw, err = cmd.CombinedOutput()
pj.log.Trace().Msgf("%v\n%v", cmd, string(raw))
if err != nil {
pj.log.Error().Msgf("%v\n%v", cmd, string(raw))
return
}
cmd = exec.Command("git", "checkout", pj.job.Commit)
cmd.Dir = tmprepo
raw, err = cmd.CombinedOutput()
pj.log.Trace().Msgf("%v\n%v", cmd, string(raw))
if err != nil {
pj.log.Error().Msgf("%v\n%v", cmd, string(raw))
return
}
pj.job.Artifact.SourcePath = tmprepo
var result *artifact.Result
var dq *qemu.System
pj.job.Artifact.Process(pj.log, pj.job.Target, false, "", "", 0,
func(q *qemu.System, ka artifact.Artifact, ki distro.KernelInfo,
res *artifact.Result) {
result = res
dq = q
},
)
logdir := dotfiles.Dir("daemon/logs", pj.job.UUID)
err = os.WriteFile(filepath.Join(logdir, "build.log"),
[]byte(result.Build.Output), 0644)
if err != nil {
pj.log.Error().Err(err).Msg("")
}
err = os.WriteFile(filepath.Join(logdir, "run.log"),
[]byte(result.Run.Output), 0644)
if err != nil {
pj.log.Error().Err(err).Msg("")
}
err = os.WriteFile(filepath.Join(logdir, "test.log"),
[]byte(result.Test.Output), 0644)
if err != nil {
pj.log.Error().Err(err).Msg("")
}
err = os.WriteFile(filepath.Join(logdir, "qemu.log"),
[]byte(dq.Stdout), 0644)
if err != nil {
pj.log.Error().Err(err).Msg("")
}
pj.log.Info().Msgf("build %v, run %v, test %v",
result.Build.Ok, result.Run.Ok, result.Test.Ok)
if !result.Test.Ok {
err = errors.New("tests failed")
}
return
}

206
daemon/resources.go Normal file
View File

@ -0,0 +1,206 @@
package daemon
import (
"errors"
"runtime"
"sync"
"syscall"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/api"
)
type Resources struct {
initialized bool
CPU *CPUResource
RAM *RAMResources
}
func NewResources() (r *Resources) {
r = &Resources{}
r.CPU = NewCPUResources()
r.RAM = NewRAMResources()
r.initialized = true
return
}
func (r *Resources) Allocate(job api.Job) (err error) {
if !r.initialized {
err = errors.New("resources not initialized")
return
}
if job.Artifact.Qemu.Cpus == 0 {
err = errors.New("no cpus requested")
return
}
if job.Artifact.Qemu.Memory == 0 {
err = errors.New("no memory requested")
return
}
origRam := r.RAM.GetSpent()
origCPU := r.CPU.GetSpent()
err = r.CPU.Allocate(job.Artifact.Qemu.Cpus)
if err != nil {
return
}
err = r.RAM.Allocate(job.Artifact.Qemu.Memory)
if err != nil {
r.CPU.Release(job.Artifact.Qemu.Cpus)
return
}
log.Debug().Msgf("allocated %d cpus, %d MB ram",
r.CPU.GetSpent()-origCPU,
r.RAM.GetSpent()-origRam)
return
}
func (r *Resources) Release(job api.Job) {
if !r.initialized {
log.Error().Msg("resources not initialized")
return
}
r.CPU.Release(job.Artifact.Qemu.Cpus)
r.RAM.Release(job.Artifact.Qemu.Memory)
log.Debug().Msgf("released %d cpus, %d MB ram",
job.Artifact.Qemu.Cpus,
job.Artifact.Qemu.Memory)
}
type CPUResource struct {
num int
overcommit float64
mu *sync.Mutex
spent int
}
const (
Allocation = iota
Release
)
func NewCPUResources() (cpur *CPUResource) {
cpur = &CPUResource{}
cpur.mu = &sync.Mutex{}
cpur.num = runtime.NumCPU()
cpur.overcommit = 1
log.Debug().Msgf("total cpus: %d", cpur.num)
return
}
func (cpur *CPUResource) SetOvercommit(oc float64) {
log.Info().Int("cpus", cpur.num).
Int("result", int(float64(cpur.num)*oc)).
Msgf("%.02f", oc)
cpur.overcommit = oc
}
func (cpur *CPUResource) GetSpent() int {
cpur.mu.Lock()
defer cpur.mu.Unlock()
return cpur.spent
}
var ErrNotEnoughCpu = errors.New("not enough cpu")
func (cpur *CPUResource) Allocate(cpu int) (err error) {
cpur.mu.Lock()
defer cpur.mu.Unlock()
if cpur.spent+cpu > int(float64(cpur.num)*cpur.overcommit) {
err = ErrNotEnoughCpu
return
}
cpur.spent += cpu
return
}
func (cpur *CPUResource) Release(cpu int) (err error) {
cpur.mu.Lock()
defer cpur.mu.Unlock()
if cpur.spent < cpu {
err = ErrFreeingMoreThanAllocated
return
}
cpur.spent -= cpu
return
}
type RAMResources struct {
mb int
overcommit float64
mu *sync.Mutex
spent int
}
func NewRAMResources() (ramr *RAMResources) {
ramr = &RAMResources{}
ramr.mu = &sync.Mutex{}
ramr.overcommit = 1
var info syscall.Sysinfo_t
syscall.Sysinfo(&info)
ramr.mb = int(info.Totalram / 1024 / 1024)
log.Debug().Msgf("total ram: %d MB", ramr.mb)
return
}
func (ramr *RAMResources) SetOvercommit(oc float64) {
log.Info().Int("ram", ramr.mb).
Int("result", int(float64(ramr.mb)*oc)).
Msgf("%.02f", oc)
ramr.overcommit = oc
}
func (ramr RAMResources) GetSpent() int {
ramr.mu.Lock()
defer ramr.mu.Unlock()
return ramr.spent
}
var ErrNotEnoughRam = errors.New("not enough ram")
func (ramr *RAMResources) Allocate(mb int) (err error) {
ramr.mu.Lock()
defer ramr.mu.Unlock()
ocmem := int(float64(ramr.mb) * ramr.overcommit)
if mb > ocmem-ramr.spent {
err = ErrNotEnoughRam
return
}
ramr.spent += mb
return
}
var ErrFreeingMoreThanAllocated = errors.New("freeing more than allocated")
func (ramr *RAMResources) Release(mb int) (err error) {
ramr.mu.Lock()
defer ramr.mu.Unlock()
if ramr.spent < mb {
err = ErrFreeingMoreThanAllocated
return
}
ramr.spent -= mb
return
}

204
debug.go
View File

@ -1,204 +0,0 @@
// Copyright 2018 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package main
import (
"errors"
"fmt"
"io/ioutil"
"log"
"os"
"strings"
"time"
"gopkg.in/logrusorgru/aurora.v1"
"code.dumpstack.io/tools/out-of-tree/config"
"code.dumpstack.io/tools/out-of-tree/qemu"
)
func firstSupported(kcfg config.KernelConfig, ka config.Artifact,
kernel string) (ki config.KernelInfo, err error) {
km, err := kernelMask(kernel)
if err != nil {
return
}
ka.SupportedKernels = []config.KernelMask{km}
for _, ki = range kcfg.Kernels {
var supported bool
supported, err = ka.Supported(ki)
if err != nil || supported {
return
}
}
err = errors.New("No supported kernel found")
return
}
func handleLine(q *qemu.System) (err error) {
fmt.Print("out-of-tree> ")
rawLine := "help"
fmt.Scanf("%s", &rawLine)
params := strings.Fields(rawLine)
cmd := params[0]
switch cmd {
case "h", "help":
fmt.Printf("help\t: print this help message\n")
fmt.Printf("log\t: print qemu log\n")
fmt.Printf("clog\t: print qemu log and cleanup buffer\n")
fmt.Printf("cleanup\t: cleanup qemu log buffer\n")
fmt.Printf("ssh\t: print arguments to ssh command\n")
fmt.Printf("quit\t: quit\n")
case "l", "log":
fmt.Println(string(q.Stdout))
case "cl", "clog":
fmt.Println(string(q.Stdout))
q.Stdout = []byte{}
case "c", "cleanup":
q.Stdout = []byte{}
case "s", "ssh":
fmt.Println(q.GetSSHCommand())
case "q", "quit":
return errors.New("end of session")
default:
fmt.Println("No such command")
}
return
}
func interactive(q *qemu.System) (err error) {
for {
err = handleLine(q)
if err != nil {
return
}
}
}
func debugHandler(kcfg config.KernelConfig, workPath, kernRegex, gdb string,
dockerTimeout time.Duration, yekaslr, yesmep, yesmap, yekpti,
nokaslr, nosmep, nosmap, nokpti bool) (err error) {
ka, err := config.ReadArtifactConfig(workPath + "/.out-of-tree.toml")
if err != nil {
return
}
if ka.SourcePath == "" {
ka.SourcePath = workPath
}
ki, err := firstSupported(kcfg, ka, kernRegex)
if err != nil {
return
}
kernel := qemu.Kernel{KernelPath: ki.KernelPath, InitrdPath: ki.InitrdPath}
q, err := qemu.NewSystem(qemu.X86x64, kernel, ki.RootFS)
if err != nil {
return
}
if ka.Qemu.Cpus != 0 {
q.Cpus = ka.Qemu.Cpus
}
if ka.Qemu.Memory != 0 {
q.Memory = ka.Qemu.Memory
}
q.SetKASLR(false) // set KASLR to false by default because of gdb
q.SetSMEP(!ka.Mitigations.DisableSmep)
q.SetSMAP(!ka.Mitigations.DisableSmap)
q.SetKPTI(!ka.Mitigations.DisableKpti)
if yekaslr {
q.SetKASLR(true)
} else if nokaslr {
q.SetKASLR(false)
}
if yesmep {
q.SetSMEP(true)
} else if nosmep {
q.SetSMEP(false)
}
if yesmap {
q.SetSMAP(true)
} else if nosmap {
q.SetSMAP(false)
}
if yekpti {
q.SetKPTI(true)
} else if nokpti {
q.SetKPTI(false)
}
redgreen := func(name string, enabled bool) aurora.Value {
if enabled {
return aurora.BgGreen(aurora.Black(name))
}
return aurora.BgRed(aurora.Gray(name))
}
fmt.Printf("[*] %s %s %s %s\n",
redgreen("KASLR", q.GetKASLR()),
redgreen("SMEP", q.GetSMEP()),
redgreen("SMAP", q.GetSMAP()),
redgreen("KPTI", q.GetKPTI()))
fmt.Printf("[*] SMP: %d CPUs\n", q.Cpus)
fmt.Printf("[*] Memory: %d MB\n", q.Memory)
q.Debug(gdb)
coloredGdbAddress := aurora.BgGreen(aurora.Black(gdb))
fmt.Printf("[*] gdb is listening on %s\n", coloredGdbAddress)
err = q.Start()
if err != nil {
return
}
defer q.Stop()
tmp, err := ioutil.TempDir("/tmp/", "out-of-tree_")
if err != nil {
return
}
defer os.RemoveAll(tmp)
outFile, output, err := build(tmp, ka, ki, dockerTimeout)
if err != nil {
log.Println(err, output)
return
}
remoteFile := "/tmp/exploit"
if ka.Type == config.KernelModule {
remoteFile = "/tmp/module.ko"
}
err = q.CopyFile("user", outFile, remoteFile)
if err != nil {
return
}
coloredRemoteFile := aurora.BgGreen(aurora.Black(remoteFile))
fmt.Printf("[*] build result copied to %s\n", coloredRemoteFile)
fmt.Printf("\n%s\n", q.GetSSHCommand())
fmt.Printf("gdb %s -ex 'target remote %s'\n\n", ki.VmlinuxPath, gdb)
// TODO set substitute-path /build/.../linux-... /path/to/linux-source
err = interactive(q)
return
}

39
default.nix Normal file
View File

@ -0,0 +1,39 @@
{ pkgs ? (
let
inherit (builtins) fetchTree fromJSON readFile;
inherit ((fromJSON (readFile ./flake.lock)).nodes) nixpkgs gomod2nix;
in
import (fetchTree nixpkgs.locked) {
overlays = [
(import "${fetchTree gomod2nix.locked}/overlay.nix")
];
}
)
, lib
, version
}:
pkgs.buildGoApplication rec {
pname = "out-of-tree";
inherit version;
nativeBuildInputs = [ pkgs.makeWrapper ];
src = ./.;
pwd = ./.;
doCheck = false;
postFixup = ''
wrapProgram $out/bin/out-of-tree \
--prefix PATH : "${lib.makeBinPath [ pkgs.qemu pkgs.podman pkgs.openssl ]}"
'';
meta = with lib; {
description = "kernel {module, exploit} development tool";
homepage = "https://out-of-tree.io";
maintainers = [ maintainers.dump_stack ];
license = licenses.agpl3Plus;
};
}

217
distro/centos/centos.go Normal file
View File

@ -0,0 +1,217 @@
package centos
import (
"fmt"
"strings"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
"code.dumpstack.io/tools/out-of-tree/container"
"code.dumpstack.io/tools/out-of-tree/distro"
)
func init() {
releases := []string{"6", "7", "8"}
for _, release := range releases {
distro.Register(CentOS{release: release})
}
}
type CentOS struct {
release string
}
func (centos CentOS) Equal(d distro.Distro) bool {
return centos.release == d.Release && distro.CentOS == d.ID
}
func (centos CentOS) Distro() distro.Distro {
return distro.Distro{ID: distro.CentOS, Release: centos.release}
}
func (centos CentOS) Packages() (pkgs []string, err error) {
c, err := container.New(centos.Distro())
if err != nil {
return
}
if !c.Exist() {
err = c.Build("centos:"+centos.release,
centos.envs(), centos.runs())
if err != nil {
return
}
}
cmd := "yum search kernel --showduplicates 2>/dev/null " +
"| grep '^kernel-[0-9]' " +
"| grep -v src " +
"| cut -d ' ' -f 1"
output, err := c.Run(dotfiles.Dir("tmp"), []string{cmd})
if err != nil {
return
}
pkgs = append(pkgs, strings.Fields(output)...)
return
}
func (centos CentOS) Kernels() (kernels []distro.KernelInfo, err error) {
c, err := container.New(centos.Distro())
if err != nil {
return
}
return c.Kernels()
}
func (centos CentOS) envs() (envs []string) {
return
}
func (centos CentOS) runs() (commands []string) {
cmdf := func(f string, s ...interface{}) {
commands = append(commands, fmt.Sprintf(f, s...))
}
var repos []string
// TODO refactor
switch centos.release {
case "6":
repofmt := "[6.%d-%s]\\nbaseurl=https://vault.centos.org/6.%d/%s/$basearch/\\ngpgcheck=0"
for i := 0; i <= 10; i++ {
repos = append(repos, fmt.Sprintf(repofmt, i, "os", i, "os"))
repos = append(repos, fmt.Sprintf(repofmt, i, "updates", i, "updates"))
}
cmdf("rm /etc/yum.repos.d/*")
case "7":
repofmt := "[%s-%s]\\nbaseurl=https://vault.centos.org/%s/%s/$basearch/\\ngpgcheck=0"
for _, ver := range []string{
"7.0.1406", "7.1.1503", "7.2.1511",
"7.3.1611", "7.4.1708", "7.5.1804",
"7.6.1810", "7.7.1908", "7.8.2003",
} {
repos = append(repos, fmt.Sprintf(repofmt, ver, "os", ver, "os"))
repos = append(repos, fmt.Sprintf(repofmt, ver, "updates", ver, "updates"))
}
// FIXME http/gpgcheck=0
repofmt = "[%s-%s]\\nbaseurl=http://mirror.centos.org/centos-7/%s/%s/$basearch/\\ngpgcheck=0"
repos = append(repos, fmt.Sprintf(repofmt, "7.9.2009", "os", "7.9.2009", "os"))
repos = append(repos, fmt.Sprintf(repofmt, "7.9.2009", "updates", "7.9.2009", "updates"))
case "8":
repofmt := "[%s-%s]\\nbaseurl=https://vault.centos.org/%s/%s/$basearch/os/\\ngpgcheck=0"
for _, ver := range []string{
"8.0.1905", "8.1.1911", "8.2.2004",
"8.3.2011", "8.4.2105", "8.5.2111",
} {
repos = append(repos, fmt.Sprintf(repofmt, ver, "baseos", ver, "BaseOS"))
repos = append(repos, fmt.Sprintf(repofmt, ver, "appstream", ver, "AppStream"))
}
default:
log.Fatal().Msgf("no support for centos %s", centos.release)
return
}
cmdf("sed -i 's/enabled=1/enabled=0/' /etc/yum.repos.d/* || true")
for _, repo := range repos {
cmdf("echo -e '%s' >> /etc/yum.repos.d/oot.repo\n", repo)
}
// do not remove old kernels
cmdf("sed -i 's;installonly_limit=;installonly_limit=100500;' /etc/yum.conf")
cmdf("yum -y update")
cmdf("yum -y groupinstall 'Development Tools'")
// TODO do not use lexicographical comparison, change to parse int
if centos.release <= "6" {
cmdf("yum -y install kernel-firmware")
} else {
cmdf("yum -y install linux-firmware")
}
if centos.release < "8" {
cmdf("yum -y install deltarpm")
} else {
cmdf("yum -y install grub2-tools-minimal elfutils-libelf-devel")
}
var flags string
if centos.release >= "8" {
flags = "--noautoremove"
}
// Install and remove a single kernel and headers.
// This ensures that all dependencies are cached.
cmd := "export HEADERS=$(yum search kernel-devel --showduplicates " +
"| grep '^kernel-devel' | cut -d ' ' -f 1 | head -n 1)"
cmd += " KERNEL=$(echo $HEADERS | sed 's/-devel//')"
cmd += " MODULES=$(echo $HEADERS | sed 's/-devel/-modules/')"
cmd += " CORE=$(echo $HEADERS | sed 's/-devel/-core/')"
cmd += " && yum -y install $KERNEL $HEADERS"
cmd += " && yum -y remove %s $KERNEL $HEADERS $MODULES $CORE"
cmdf(cmd, flags)
return
}
func (centos CentOS) Install(pkgname string, headers bool) (err error) {
var headerspkg string
if headers {
headerspkg = strings.Replace(pkgname, "kernel", "kernel-devel", -1)
}
var commands []string
cmdf := func(f string, s ...interface{}) {
commands = append(commands, fmt.Sprintf(f, s...))
}
cmdf("yum -y install %s %s", pkgname, headerspkg)
version := strings.Replace(pkgname, "kernel-", "", -1)
if centos.release <= "7" {
cmdf("dracut -v --add-drivers 'e1000 ext4' -f "+
"/boot/initramfs-%s.img %s", version, version)
} else {
cmdf("dracut -v --add-drivers 'ata_piix libata' "+
"--force-drivers 'e1000 ext4 sd_mod' -f "+
"/boot/initramfs-%s.img %s", version, version)
}
cmdf("cp -r /boot /target/")
cmdf("cp -r /lib/modules /target/lib/")
cmdf("cp -r /usr/src /target/usr/")
c, err := container.New(centos.Distro())
if err != nil {
return
}
for i := range c.Volumes {
c.Volumes[i].Dest = "/target" + c.Volumes[i].Dest
}
_, err = c.Run("", commands)
if err != nil {
return
}
return
}
func (centos CentOS) RootFS() string {
return fmt.Sprintf("out_of_tree_centos_%s.img", centos.release)
}

View File

@ -0,0 +1,19 @@
package centos
import (
"testing"
"github.com/stretchr/testify/assert"
"code.dumpstack.io/tools/out-of-tree/distro"
)
func TestCentOS(t *testing.T) {
assert := assert.New(t)
u := CentOS{release: "7"}
assert.True(u.Equal(distro.Distro{Release: "7", ID: distro.CentOS}))
assert.NotEmpty(u.Packages())
}

53
distro/debian/cache.go Normal file
View File

@ -0,0 +1,53 @@
package debian
import (
"errors"
"sync"
"github.com/rapidloop/skv"
)
type Cache struct {
store *skv.KVStore
}
// cache is not thread-safe, so make sure there are only one user
var mu sync.Mutex
func NewCache(path string) (c *Cache, err error) {
mu.Lock()
c = &Cache{}
c.store, err = skv.Open(path)
return
}
func (c Cache) Put(p []DebianKernel) error {
if len(p) == 0 {
return errors.New("empty slice")
}
return c.store.Put(p[0].Version.Package, p)
}
func (c Cache) Get(version string) (p []DebianKernel, err error) {
err = c.store.Get(version, &p)
if len(p) == 0 {
err = skv.ErrNotFound
}
return
}
func (c Cache) PutVersions(versions []string) error {
return c.store.Put("versions", versions)
}
func (c Cache) GetVersions() (versions []string, err error) {
err = c.store.Get("versions", &versions)
return
}
func (c Cache) Close() (err error) {
err = c.store.Close()
mu.Unlock()
return
}

106
distro/debian/cache_test.go Normal file
View File

@ -0,0 +1,106 @@
package debian
import (
"os"
"path/filepath"
"testing"
"github.com/rapidloop/skv"
"code.dumpstack.io/tools/out-of-tree/distro/debian/snapshot"
)
func TestCache(t *testing.T) {
dir, err := os.MkdirTemp("", "out-of-tree_cache_test_")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
path := filepath.Join(dir, "debian.cache")
c, err := NewCache(path)
if err != nil {
t.Fatal(err)
}
image := snapshot.Package{}
image.Deb.Hash = "12345"
version := "4.17.14-1"
dk := DebianKernel{
Version: DebianKernelVersion{Package: version},
Image: image,
}
err = c.Put([]DebianKernel{dk})
if err != nil {
t.Fatal(err)
}
dk2s, err := c.Get(version)
if err != nil {
t.Fatal(err)
}
dk2 := dk2s[0]
if dk.Image.Deb.Hash != dk2.Image.Deb.Hash {
t.Fatalf("mismatch")
}
c.Close()
c, err = NewCache(path)
if err != nil {
t.Fatal(err)
}
defer c.Close()
dk3s, err := c.Get(version)
if err != nil {
t.Fatal(err)
}
dk3 := dk3s[0]
if dk.Image.Deb.Hash != dk3.Image.Deb.Hash {
t.Fatalf("mismatch")
}
_, err = c.Get("key not exist")
if err == nil || err != skv.ErrNotFound {
t.Fatal(err)
}
}
func TestVersionsCache(t *testing.T) {
dir, err := os.MkdirTemp("", "out-of-tree_cache_test_")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
path := filepath.Join(dir, "debian.cache")
c, err := NewCache(path)
if err != nil {
t.Fatal(err)
}
defer c.Close()
versions := []string{"a", "b", "c"}
err = c.PutVersions(versions)
if err != nil {
t.Fatal(err)
}
result, err := c.GetVersions()
if err != nil {
t.Fatal(err)
}
if len(versions) != len(result) {
t.Fatal("mismatch")
}
}

535
distro/debian/debian.go Normal file
View File

@ -0,0 +1,535 @@
package debian
import (
"fmt"
"os"
"path"
"path/filepath"
"strings"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/cache"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
"code.dumpstack.io/tools/out-of-tree/container"
"code.dumpstack.io/tools/out-of-tree/distro"
"code.dumpstack.io/tools/out-of-tree/distro/debian/snapshot"
"code.dumpstack.io/tools/out-of-tree/fs"
)
func init() {
releases := []Release{
Wheezy,
Jessie,
Stretch,
Buster,
Bullseye,
Bookworm,
}
for _, release := range releases {
distro.Register(Debian{release: release})
}
}
type Debian struct {
release Release
}
func (d Debian) Equal(dd distro.Distro) bool {
if dd.ID != distro.Debian {
return false
}
return ReleaseFromString(dd.Release) == d.release
}
func (d Debian) Distro() distro.Distro {
return distro.Distro{distro.Debian, d.release.String()}
}
func (d Debian) Packages() (packages []string, err error) {
c, err := container.New(d.Distro())
if err != nil {
return
}
if !c.Exist() {
err = c.Build(d.image(), d.envs(), d.runs())
if err != nil {
return
}
}
kernels, err := GetKernels()
if err != nil {
log.Error().Err(err).Msg("get kernels")
return
}
for _, dk := range kernels {
if d.release != dk.Release {
continue
}
version := kver(dk.Version.Package)
// filter out pre-release kernels
switch dk.Release {
case Wheezy:
if version.LessThan(kver("3.2-rc0")) {
continue
}
case Jessie:
if version.LessThan(kver("3.16-rc0")) {
continue
}
case Stretch:
if version.LessThan(kver("4.9-rc0")) {
continue
}
case Buster:
if version.LessThan(kver("4.19-rc0")) {
continue
}
case Bullseye:
if version.LessThan(kver("5.10-rc0")) {
continue
}
case Bookworm:
if version.LessThan(kver("6.1-rc0")) {
continue
}
}
p := dk.Image.Deb.Name[:len(dk.Image.Deb.Name)-4] // w/o .deb
packages = append(packages, p)
}
return
}
type Release int
const (
None Release = iota
Buzz
Hamm
Woody
Etch
Lenny
Squeeze
Wheezy
Jessie
Stretch
Buster
Bullseye
Bookworm
)
var ReleaseStrings = [...]string{
"",
"buzz",
"hamm",
"woody",
"etch",
"lenny",
"squeeze",
"wheezy",
"jessie",
"stretch",
"buster",
"bullseye",
"bookworm",
}
func (cn Release) Name() string {
return ReleaseStrings[cn]
}
func (cn Release) String() string {
return fmt.Sprintf("%d", cn)
}
func ReleaseFromString(s string) (r Release) {
switch strings.ToLower(s) {
case "1", "buzz":
r = Buzz
case "2", "hamm":
r = Hamm
case "3", "woody":
r = Woody
case "4", "etch":
r = Etch
case "5", "lenny":
r = Lenny
case "6", "squeeze":
r = Squeeze
case "7", "wheezy":
r = Wheezy
case "8", "jessie":
r = Jessie
case "9", "stretch":
r = Stretch
case "10", "buster":
r = Buster
case "11", "bullseye":
r = Bullseye
case "12", "bookworm":
r = Bookworm
default:
r = None
}
return
}
func (d Debian) envs() (envs []string) {
envs = append(envs, "DEBIAN_FRONTEND=noninteractive")
return
}
func (d Debian) image() (image string) {
image += "debian:"
switch d.release {
case Wheezy:
image += "wheezy-20190228"
case Jessie:
image += "jessie-20210326"
case Stretch:
image += "stretch-20220622"
default:
image += d.release.Name()
}
return
}
func repositories(release Release) (repos []string) {
var snapshot string
switch release {
// Latest snapshots that include release
case Wheezy:
// doesn't include snapshot repos in /etc/apt/source.list
snapshot = "20190321T212815Z"
case Jessie:
snapshot = "20230322T152120Z"
case Stretch:
snapshot = "20230423T032533Z"
default:
return
}
repo := func(archive, s string) {
format := "deb [check-valid-until=no trusted=yes] " +
"http://snapshot.debian.org/archive/%s/%s " +
"%s%s main"
r := fmt.Sprintf(format, archive, snapshot, release.Name(), s)
repos = append(repos, r)
}
repo("debian", "")
repo("debian", "-updates")
if release <= 7 {
repo("debian", "-backports")
}
repo("debian-security", "/updates")
return
}
func (d Debian) runs() (commands []string) {
cmdf := func(f string, s ...interface{}) {
commands = append(commands, fmt.Sprintf(f, s...))
}
repos := repositories(d.release)
if len(repos) != 0 {
cmdf("rm /etc/apt/sources.list")
for _, repo := range repos {
cmdf("echo '%s' >> /etc/apt/sources.list", repo)
}
} else {
cmdf("apt-get update || sed -i " +
"-e '/snapshot/!d' " +
"-e 's/# deb/deb [check-valid-until=no trusted=yes]/' " +
"/etc/apt/sources.list")
}
cmdf("apt-get update || apt-get update || apt-get update")
pkglist := []string{
"wget", "build-essential", "libelf-dev", "git",
"kmod", "linux-base", "libssl-dev",
"firmware-linux-free",
"libxml2", "libglib2.0.0", "irqbalance", "libcap-ng0",
"libnuma1", "sgml-base", "shared-mime-info", "xdg-user-dirs",
"xml-core", "python3",
}
gccs := "'^(gcc-[0-9].[0-9]|gcc-[0-9]|gcc-[1-9][0-9])$'"
pkglist = append(pkglist, gccs)
if d.release >= 8 {
pkglist = append(pkglist, "initramfs-tools")
} else {
// by default Debian backports repositories have a lower
// priority than stable, so we should specify it manually
cmdf("apt-get -y install -t %s-backports "+
"initramfs-tools", d.release.Name())
}
if d.release >= 9 {
pkglist = append(pkglist, "apparmor")
}
if d.release < 9 {
pkglist = append(pkglist, "module-init-tools")
}
var packages string
for _, pkg := range pkglist {
packages += fmt.Sprintf("%s ", pkg)
}
cmdf("timeout 5m apt-get install -y %s "+
"|| timeout 10m apt-get install -y %s "+
"|| apt-get install -y %s", packages, packages, packages)
if d.release == Wheezy {
// We need newer libc for deb8*~bpo70+1
format := "deb [check-valid-until=no trusted=yes] " +
"http://snapshot.debian.org/archive/debian/%s " +
"jessie main"
// Keep it here not in repos to have apt-priority close
repo := fmt.Sprintf(format, "20190321T212815Z")
cmdf("echo '%s' >> /etc/apt/sources.list", repo)
cmdf("echo 'Package: *' >> /etc/apt/preferences.d/jessie")
cmdf("echo 'Pin: release a=jessie' >> /etc/apt/preferences.d/jessie")
cmdf("echo 'Pin-Priority: 10' >> /etc/apt/preferences.d/jessie")
cmdf("apt-get -y update")
// glibc guarantee backwards compatibility, so should be no problem
cmdf("apt-get -y install -t jessie libc6-dev")
}
cmdf("mkdir -p /lib/modules")
return
}
func (d Debian) Kernels() (kernels []distro.KernelInfo, err error) {
c, err := container.New(d.Distro())
if err != nil {
return
}
if !c.Exist() {
return
}
cpath := dotfiles.Dir("volumes", c.Name())
rootfs := dotfiles.File("images", c.Name()+".img")
files, err := os.ReadDir(cpath)
if err != nil {
return
}
for _, file := range files {
if !strings.Contains(file.Name(), "linux-image") {
continue
}
pkgname := file.Name()
kpkgdir := filepath.Join(cpath, pkgname)
bootdir := filepath.Join(kpkgdir, "boot")
vmlinuz, err := fs.FindBySubstring(bootdir, "vmlinuz")
if err != nil {
log.Warn().Msgf("cannot find vmlinuz for %s", pkgname)
continue
}
initrd, err := fs.FindBySubstring(bootdir, "initrd")
if err != nil {
log.Warn().Msgf("cannot find initrd for %s", pkgname)
continue
}
modulesdir := filepath.Join(kpkgdir, "lib/modules")
modules, err := fs.FindBySubstring(modulesdir, "")
if err != nil {
log.Warn().Msgf("cannot find modules for %s", pkgname)
continue
}
log.Debug().Msgf("%s %s %s", vmlinuz, initrd, modules)
release := strings.Replace(pkgname, "linux-image-", "", -1)
ki := distro.KernelInfo{
Distro: d.Distro(),
KernelVersion: path.Base(modules),
KernelRelease: release,
ContainerName: c.Name(),
KernelPath: vmlinuz,
InitrdPath: initrd,
ModulesPath: modules,
RootFS: rootfs,
Package: pkgname,
}
smapBlocklist := []string{
"3.10.5-1~bpo70+1",
"3.10.11-1~bpo70+1",
"3.9.6-1~bpo70+1",
}
for _, ver := range smapBlocklist {
if strings.Contains(release, ver) {
ki.CPU.Flags = append(ki.CPU.Flags, "smap=off")
}
}
kernels = append(kernels, ki)
}
return
}
func (d Debian) volumes(pkgname string) (volumes []container.Volume) {
c, err := container.New(d.Distro())
if err != nil {
return
}
pkgdir := filepath.Join("volumes", c.Name(), pkgname)
volumes = append(volumes, container.Volume{
Src: dotfiles.Dir(pkgdir, "/lib/modules"),
Dest: "/lib/modules",
})
volumes = append(volumes, container.Volume{
Src: dotfiles.Dir(pkgdir, "/usr/src"),
Dest: "/usr/src",
})
volumes = append(volumes, container.Volume{
Src: dotfiles.Dir(pkgdir, "/boot"),
Dest: "/boot",
})
return
}
func (d Debian) Install(pkgname string, headers bool) (err error) {
defer func() {
if err != nil {
d.cleanup(pkgname)
}
}()
dk, err := getCachedKernel(pkgname + ".deb")
if err != nil {
return
}
var pkgs []snapshot.Package
if headers {
pkgs = dk.Packages()
} else {
pkgs = []snapshot.Package{dk.Image}
}
var commands []string
cmdf := func(f string, s ...interface{}) {
commands = append(commands, fmt.Sprintf(f, s...))
}
for _, pkg := range pkgs {
found, newurl := cache.PackageURL(
distro.Debian,
pkg.Deb.URL,
)
if found {
log.Debug().Msgf("cached deb found %s", newurl)
pkg.Deb.URL = newurl
}
// TODO use faketime on old releases?
pkg.Deb.URL = strings.Replace(pkg.Deb.URL, "https", "http", -1)
cmdf("wget --no-verbose " +
"--timeout=10 --waitretry=1 --tries=10 " +
"--no-check-certificate " + pkg.Deb.URL)
}
// prepare local repository
cmdf("mkdir debs && mv *.deb debs/")
cmdf("dpkg-scanpackages debs /dev/null | gzip > debs/Packages.gz")
cmdf(`echo "deb [trusted=yes] file:$(pwd) debs/" >> /etc/apt/sources.list.d/local.list`)
cmdf("apt-get update -o Dir::Etc::sourcelist='sources.list.d/local.list' -o Dir::Etc::sourceparts='-' -o APT::Get::List-Cleanup='0'")
// make sure apt-get will not download the repo version
cmdf("echo 'Package: *' >> /etc/apt/preferences.d/pin")
cmdf(`echo 'Pin: origin "*.debian.org"' >> /etc/apt/preferences.d/pin`)
cmdf("echo 'Pin-Priority: 100' >> /etc/apt/preferences.d/pin")
// cut package names and install
cmdf("ls debs | grep deb | cut -d '_' -f 1 | " +
"xargs apt-get -y --force-yes install")
// for debug
cmdf("ls debs | grep deb | cut -d '_' -f 1 | xargs apt-cache policy")
c, err := container.New(d.Distro())
if err != nil {
return
}
c.Volumes = d.volumes(pkgname)
for i := range c.Volumes {
c.Volumes[i].Dest = "/target" + c.Volumes[i].Dest
}
cmdf("cp -r /boot /target/")
cmdf("cp -r /lib/modules /target/lib/")
cmdf("cp -rL /usr/src /target/usr/")
_, err = c.Run("", commands)
if err != nil {
return
}
return
}
func (d Debian) cleanup(pkgname string) {
c, err := container.New(d.Distro())
if err != nil {
return
}
pkgdir := dotfiles.Dir(filepath.Join("volumes", c.Name(), pkgname))
log.Debug().Msgf("cleanup %s", pkgdir)
err = os.RemoveAll(pkgdir)
if err != nil {
log.Warn().Err(err).Msg("cleanup")
}
}
func (d Debian) RootFS() string {
return fmt.Sprintf("out_of_tree_debian_%s.img", d.release.String())
}

View File

@ -0,0 +1,24 @@
package debian
import (
"os"
"testing"
"github.com/stretchr/testify/assert"
"code.dumpstack.io/tools/out-of-tree/distro"
)
func TestDebian(t *testing.T) {
assert := assert.New(t)
u := Debian{release: Wheezy}
assert.True(u.Equal(distro.Distro{Release: "wheezy", ID: distro.Debian}))
if os.Getenv("CI") != "" {
t.Skip("skip testing in CI")
}
assert.NotEmpty(u.Packages())
}

467
distro/debian/kernel.go Normal file
View File

@ -0,0 +1,467 @@
package debian
import (
"errors"
"math"
"strings"
"time"
"github.com/Masterminds/semver"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/cache"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
"code.dumpstack.io/tools/out-of-tree/distro/debian/snapshot"
"code.dumpstack.io/tools/out-of-tree/distro/debian/snapshot/metasnap"
"code.dumpstack.io/tools/out-of-tree/fs"
)
type DebianKernelVersion struct {
// linux-headers-4.17.0-2-amd64_4.17.14-1_amd64.deb
// Package version, e.g. "4.17.14-1"
// See tags in https://salsa.debian.org/kernel-team/linux
Package string
// ABI version, e.g. "4.17.0-2"
ABI string
}
func ParseKernelVersion(pkg string) (dkv DebianKernelVersion, err error) {
// -> 4.11.0-trunk-amd64_4.11-1~exp2_amd64.deb
pkg = strings.Replace(pkg, "linux-image-", "", -1)
// -> [4.11.0-trunk-amd64 4.11-1~exp2 amd64.deb]
fields := strings.Split(pkg, "_")
if len(fields) != 3 {
err = errors.New("incorrect input format")
return
}
// 4.11.0-trunk-amd64 -> 4.11.0-trunk
// TODO other archs?
dkv.ABI = strings.Split(fields[0], "-amd64")[0]
if dkv.ABI == "" {
err = errors.New("incorrect input format")
return
}
dkv.Package = fields[1]
if dkv.Package == "" {
err = errors.New("incorrect input format")
return
}
return
}
type DebianKernel struct {
Version DebianKernelVersion
Image snapshot.Package
Headers []snapshot.Package
Dependencies []snapshot.Package
// FIXME There is a better way
Internal struct {
Invalid bool
LastFetch time.Time
}
Release Release
}
func (dk DebianKernel) HasDependency(pkgname string) bool {
for _, deppkg := range dk.Dependencies {
if strings.Contains(deppkg.Name, pkgname) {
return true
}
}
return false
}
func (dk DebianKernel) Packages() (pkgs []snapshot.Package) {
pkgs = append(pkgs, dk.Image)
pkgs = append(pkgs, dk.Headers...)
pkgs = append(pkgs, dk.Dependencies...)
return
}
// use only for inline comparison
func kver(ver string) *semver.Version {
ver = strings.Replace(ver, "~", "-", -1)
ver = strings.Replace(ver, "+", "-", -1)
return semver.MustParse(ver)
}
var (
ErrNoBinaryPackages = errors.New("no binary packages found")
ErrNoHeadersPackage = errors.New("no headers package found")
ErrNoImagePackage = errors.New("no image package found")
)
func getDebianKernel(version string) (dk DebianKernel, err error) {
flog := log.With().
Str("version", version).
Logger()
dk.Version.Package = version
regex := `^(linux-(image|headers)-[a-z+~0-9\.\-]*-(common|amd64|amd64-unsigned)|linux-kbuild-.*|linux-compiler-.*-x86)$`
filter := []string{
"rt-amd64",
"cloud-amd64",
"all-amd64",
"dbg",
}
packages, err := snapshot.Packages("linux", version, regex,
[]string{"amd64", "all"}, filter)
if err != nil {
return
}
if len(packages) == 0 {
err = ErrNoBinaryPackages
return
}
var imageFound, headersFound bool
for _, p := range packages {
if strings.Contains(p.Name, "image") {
imageFound = true
dk.Image = p
} else if strings.Contains(p.Name, "headers") {
headersFound = true
dk.Headers = append(dk.Headers, p)
} else {
dk.Dependencies = append(dk.Dependencies, p)
}
}
if !imageFound {
err = ErrNoImagePackage
return
}
if !headersFound {
err = ErrNoHeadersPackage
return
}
s := strings.Replace(dk.Image.Name, "linux-image-", "", -1)
dk.Version.ABI = strings.Replace(s, "-amd64", "", -1)
dk.Release = getRelease(dk.Image)
if dk.Release == None {
flog.Warn().Msg("release not found")
} else {
flog.Debug().Msgf("release is %s", dk.Release.Name())
}
return
}
func getRelease(p snapshot.Package) Release {
repos, err := metasnap.GetRepos(p.Repo.Archive, p.Name, p.Arch, p.Version)
if err != nil {
log.Debug().Err(err).Msg("metasnap")
return None
}
for _, repo := range repos {
for _, rel := range ReleaseStrings[1:] {
switch repo.Suite {
case rel, rel + "-backports",
rel + "-updates",
rel + "-proposed-updates":
return ReleaseFromString(rel)
}
}
}
return None
}
// GetCachedKernel by deb package name
func getCachedKernel(deb string) (dk DebianKernel, err error) {
c, err := NewCache(CachePath)
if err != nil {
log.Error().Err(err).Msg("cache")
return
}
defer c.Close()
versions, err := c.GetVersions()
if err != nil {
log.Error().Err(err).Msg("get source package versions from cache")
return
}
for _, version := range versions {
var tmpdks []DebianKernel
tmpdks, err = c.Get(version)
if err != nil {
continue
}
tmpdk := tmpdks[0]
if deb == tmpdk.Image.Deb.Name {
dk = tmpdk
return
}
for _, h := range tmpdk.Headers {
if deb == h.Deb.Name {
dk = tmpdk
return
}
}
}
return
}
func kbuildVersion(versions []string, kpkgver string) string {
for _, v := range versions {
if v == kpkgver {
return v
}
}
ver := kver(kpkgver)
// Not able to find the exact version, try similar
for _, v := range versions {
cver := kver(v)
// It's certainly not fit for purpose if the major and
// minor versions aren't the same
if ver.Major() != cver.Major() {
continue
}
if ver.Minor() != cver.Minor() {
continue
}
return v
}
return ""
}
func findKbuild(versions []string, kpkgver string) (
pkg snapshot.Package, err error) {
version := kbuildVersion(versions, kpkgver)
if version == "" {
err = errors.New("cannot find kbuild version")
return
}
packages, err := snapshot.Packages("linux-tools", version,
`^linux-kbuild`, []string{"amd64"}, []string{"dbg"})
if err != nil {
return
}
if len(packages) == 0 {
err = errors.New("cannot find kbuild package")
}
pkg = packages[0]
return
}
func updateKbuild(toolsVersions []string, dk *DebianKernel) {
if !kver(dk.Version.Package).LessThan(kver("4.5-rc0")) {
return
}
var deps []snapshot.Package
for _, pkg := range dk.Dependencies {
if strings.Contains(pkg.Name, "kbuild") {
continue
}
deps = append(deps, pkg)
}
dk.Dependencies = deps
kbuildpkg, err := findKbuild(toolsVersions, dk.Version.Package)
if err != nil {
dk.Internal.Invalid = true
return
}
dk.Dependencies = append(dk.Dependencies, kbuildpkg)
}
func getKernelsByVersion(slog zerolog.Logger, c *Cache, toolsVersions []string,
version string, mode GetKernelsMode) (kernels []DebianKernel,
fromcache bool) {
var dk DebianKernel
dks, err := c.Get(version)
if err == nil {
dk = dks[0]
if !dk.Internal.Invalid {
// TODO refactor
slog.Trace().Msgf("found in cache")
if dk.Release == None && mode&UpdateRelease != 0 {
slog.Debug().Msg("update release")
dk.Release = getRelease(dk.Image)
if dk.Release != None {
slog.Debug().Msg("update cache")
err = c.Put([]DebianKernel{dk})
if err != nil {
slog.Error().Err(err).Msg("")
return
}
}
}
if mode&UpdateKbuild != 0 {
slog.Debug().Msg("update kbuild")
updateKbuild(toolsVersions, &dk)
slog.Debug().Msg("update cache")
err = c.Put([]DebianKernel{dk})
if err != nil {
slog.Error().Err(err).Msg("")
return
}
}
kernels = append(kernels, dk)
fromcache = true
return
}
}
if dk.Internal.Invalid {
refetch := dk.Internal.LastFetch.AddDate(0, 0, RefetchDays)
if refetch.After(time.Now()) {
slog.Trace().Msgf("refetch at %v", refetch)
return
}
}
dk, err = getDebianKernel(version)
if err != nil {
if err == ErrNoBinaryPackages {
slog.Warn().Err(err).Msg("")
} else {
slog.Error().Err(err).Msg("get debian kernel")
}
dk.Internal.Invalid = true
}
if !dk.HasDependency("kbuild") {
// Debian kernels prior to the 4.5 package
// version did not have a kbuild built from
// the linux source itself, but used the
// linux-tools source package.
if !kver(dk.Version.Package).LessThan(kver("4.5-rc0")) {
dk.Internal.Invalid = true
} else {
updateKbuild(toolsVersions, &dk)
}
}
dk.Internal.LastFetch = time.Now()
if !dk.Internal.Invalid {
kernels = append(kernels, dk)
}
err = c.Put([]DebianKernel{dk})
if err != nil {
slog.Error().Err(err).Msg("put to cache")
return
}
slog.Debug().Msgf("%s cached", version)
return
}
var (
CachePath string
RefetchDays int = 14
)
type GetKernelsMode int
const (
NoMode GetKernelsMode = iota
UpdateRelease
UpdateKbuild
)
// GetKernelsWithLimit is workaround for testing and building the
// first cache, which is heavily rate limited by snapshot.debian.org
func GetKernelsWithLimit(limit int, mode GetKernelsMode) (kernels []DebianKernel,
err error) {
if CachePath == "" {
CachePath = dotfiles.File("debian.cache")
log.Debug().Msgf("Use default kernels cache path: %s", CachePath)
if !fs.PathExists(CachePath) {
log.Debug().Msgf("No cache, download")
err = cache.DownloadDebianCache(CachePath)
if err != nil {
log.Debug().Err(err).Msg(
"No remote cache, will take some time")
}
}
} else {
log.Debug().Msgf("Debian kernels cache path: %s", CachePath)
}
c, err := NewCache(CachePath)
if err != nil {
log.Error().Err(err).Msg("cache")
return
}
defer c.Close()
toolsVersions, err := snapshot.SourcePackageVersions("linux-tools")
if err != nil {
log.Error().Err(err).Msg("get linux-tools source pkg versions")
return
}
versions, err := snapshot.SourcePackageVersions("linux")
if err != nil {
log.Error().Err(err).Msg("get linux source package versions")
return
}
err = c.PutVersions(versions)
if err != nil {
log.Error().Err(err).Msg("put source package versions to cache")
return
}
for i, version := range versions {
slog := log.With().Str("version", version).Logger()
slog.Trace().Msgf("%03d/%03d", i, len(versions))
vkernels, fromcache := getKernelsByVersion(slog, c, toolsVersions, version, mode)
kernels = append(kernels, vkernels...)
if !fromcache {
limit--
}
if limit <= 0 {
return
}
}
return
}
func GetKernels() (kernels []DebianKernel, err error) {
return GetKernelsWithLimit(math.MaxInt32, NoMode)
}

View File

@ -0,0 +1,67 @@
package debian
import (
"testing"
"github.com/davecgh/go-spew/spew"
"github.com/stretchr/testify/assert"
"code.dumpstack.io/tools/out-of-tree/distro/debian/snapshot"
)
func TestGetDebianKernel(t *testing.T) {
assert := assert.New(t)
dk, err := getDebianKernel("4.6.4-1")
assert.Nil(err)
assert.Equal(getRelease(dk.Image), Stretch)
t.Logf("%s", spew.Sdump(dk))
}
func TestParseKernelVersion(t *testing.T) {
assert := assert.New(t)
kernels, err := GetKernelsWithLimit(16, NoMode)
assert.Nil(err)
assert.NotEmpty(kernels)
versions := make(map[string]bool)
for _, dk := range kernels {
dkv, err := ParseKernelVersion(dk.Image.Deb.Name)
assert.Nil(err)
_, found := versions[dkv.Package]
assert.True(!found)
versions[dkv.Package] = true
}
}
func TestKbuildVersion(t *testing.T) {
assert := assert.New(t)
kernels, err := GetKernelsWithLimit(16, NoMode)
assert.Nil(err)
assert.NotEmpty(kernels)
toolsVersions, err := snapshot.SourcePackageVersions("linux-tools")
assert.Nil(err)
for _, dk := range kernels {
if !kver(dk.Version.Package).LessThan(kver("4.5-rc0")) {
continue
}
version := kbuildVersion(
toolsVersions,
dk.Version.Package,
)
assert.Nil(err)
assert.NotEmpty(version)
t.Log(dk.Version.Package, "->", version)
}
}

View File

@ -0,0 +1,158 @@
package metasnap
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"strings"
"time"
"github.com/rs/zerolog/log"
"golang.org/x/time/rate"
)
// Note: Metasnap does not have all the packages, and its API is
// rather buggy.
const apiURL = "http://metasnap.debian.net/cgi-bin/api?"
var (
limiterTimeout time.Duration = time.Second / 20
limiterMaxTimeout time.Duration = time.Second * 2
limiterBurst int = 1
limiterUpdateDelay time.Duration = time.Second
Limiter = rate.NewLimiter(rate.Every(limiterTimeout), limiterBurst)
)
func lowerLimit() {
limiterTimeout = limiterTimeout * 2
if limiterTimeout > limiterMaxTimeout {
limiterTimeout = limiterMaxTimeout
}
log.Info().Msgf("limiter timeout set to %v", limiterTimeout)
Limiter.SetLimitAt(
time.Now().Add(limiterUpdateDelay),
rate.Every(limiterTimeout),
)
log.Info().Msgf("wait %v", limiterUpdateDelay)
time.Sleep(limiterUpdateDelay)
}
// Retries in case of 5xx errors
var Retries = 10
var ErrNotFound = errors.New("404 not found")
func query(q string) (result string, err error) {
flog := log.With().Str("url", q).Logger()
var resp *http.Response
for i := Retries; i > 0; i-- {
flog.Trace().Msg("wait")
Limiter.Wait(context.Background())
flog.Trace().Msg("start")
resp, err = http.Get(q)
if err != nil {
if strings.Contains(err.Error(), "reset by peer") {
flog.Debug().Err(err).Msg("")
lowerLimit()
continue
}
flog.Error().Err(err).Msg("")
return
}
defer resp.Body.Close()
flog.Debug().Msgf("%s", resp.Status)
if resp.StatusCode == 404 {
err = ErrNotFound
return
}
if resp.StatusCode < 500 {
break
}
flog.Debug().Msgf("retry (%d left)", i)
}
if resp.StatusCode >= 400 {
err = fmt.Errorf("%d (%s)", resp.StatusCode, q)
}
buf, err := io.ReadAll(resp.Body)
if err != nil {
return
}
result = string(buf)
return
}
func queryAPIf(f string, s ...interface{}) (result string, err error) {
return query(apiURL + fmt.Sprintf(f, s...))
}
type Snapshot struct {
First string
Last string
}
type Repo struct {
Archive string
Suite string
Component string
Snapshot Snapshot
}
func GetRepos(archive, pkg, arch, ver string) (repos []Repo, err error) {
result, err := queryAPIf("archive=%s&pkg=%s&arch=%s",
archive, pkg, arch)
if err != nil {
return
}
if result == "" {
err = ErrNotFound
return
}
for _, line := range strings.Split(result, "\n") {
if line == "" {
break
}
fields := strings.Split(line, " ")
if len(fields) != 5 {
err = fmt.Errorf("metasnap api returned %s", result)
return
}
repo := Repo{
Archive: archive,
Suite: fields[1],
Component: fields[2],
Snapshot: Snapshot{
First: fields[3],
Last: fields[4],
},
}
if fields[0] == ver {
repos = append(repos, repo)
}
}
if len(repos) == 0 {
err = ErrNotFound
return
}
return
}

View File

@ -0,0 +1,28 @@
package metasnap
import (
"testing"
"github.com/davecgh/go-spew/spew"
)
func TestGetRepos(t *testing.T) {
// existing
infos, err := GetRepos("debian", "linux-image-3.8-trunk-amd64",
"amd64", "3.8.2-1~experimental.1")
if err != nil {
t.Fatal(err)
}
t.Log(spew.Sdump(infos))
// non-existing
infos, err = GetRepos("debian", "meh", "amd64", "meh")
if err == nil {
t.Fatalf("should not be ok, result: %s", spew.Sdump(infos))
}
if err != ErrNotFound {
t.Fatal("wrong error type")
}
}

View File

@ -0,0 +1,186 @@
package mr
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"strings"
"time"
"github.com/rs/zerolog/log"
"golang.org/x/time/rate"
)
const apiURL = "https://snapshot.debian.org/mr"
var (
limiterTimeout time.Duration = time.Second / 20
limiterMaxTimeout time.Duration = time.Second * 2
limiterBurst int = 1
limiterUpdateDelay time.Duration = time.Second
Limiter = rate.NewLimiter(rate.Every(limiterTimeout), limiterBurst)
)
func lowerLimit() {
limiterTimeout = limiterTimeout * 2
if limiterTimeout > limiterMaxTimeout {
limiterTimeout = limiterMaxTimeout
}
log.Info().Msgf("limiter timeout set to %v", limiterTimeout)
Limiter.SetLimitAt(
time.Now().Add(limiterUpdateDelay),
rate.Every(limiterTimeout),
)
log.Info().Msgf("wait %v", limiterUpdateDelay)
time.Sleep(limiterUpdateDelay)
}
// Retries in case of 5xx errors
var Retries = 10
// https://salsa.debian.org/snapshot-team/snapshot/blob/master/API
// /mr/package/<package>/
type Package struct {
Comment string `json:"_comment"`
Package string `json:"package"`
Result []struct {
Version string `json:"version"`
} `json:"result"`
}
// /mr/package/<package>/<version>/binpackages
type Binpackages struct {
Comment string `json:"_comment"`
Package string `json:"package"`
Result []struct {
Name string `json:"name"`
Version string `json:"version"`
} `json:"result"`
Version string `json:"version"`
}
// /mr/binary/<binary>/
type Binary struct {
Comment string `json:"_comment"`
Binary string `json:"binary"`
Result []struct {
BinaryVersion string `json:"binary_version"`
Name string `json:"name"`
Source string `json:"source"`
Version string `json:"version"`
} `json:"result"`
}
// /mr/binary/<binpkg>/<binversion>/binfiles
type Binfiles struct {
Comment string `json:"_comment"`
Binary string `json:"binary"`
BinaryVersion string `json:"binary_version"`
Result []struct {
Architecture string `json:"architecture"`
Hash string `json:"hash"`
} `json:"result"`
}
type Fileinfo struct {
ArchiveName string `json:"archive_name"`
FirstSeen string `json:"first_seen"`
Name string `json:"name"`
Path string `json:"path"`
Size int `json:"size"`
}
// /mr/file/<hash>/info
type Info struct {
Comment string `json:"_comment"`
Hash string `json:"hash"`
Result []Fileinfo `json:"result"`
}
var ErrNotFound = errors.New("404 not found")
func getJson(query string, target interface{}) (err error) {
flog := log.With().Str("url", query).Logger()
var resp *http.Response
for i := Retries; i > 0; i-- {
flog.Trace().Msg("wait")
Limiter.Wait(context.Background())
flog.Trace().Msg("start")
resp, err = http.Get(query)
if err != nil {
if strings.Contains(err.Error(), "reset by peer") ||
strings.Contains(err.Error(), "connection refused") {
flog.Debug().Err(err).Msg("")
lowerLimit()
continue
}
flog.Error().Err(err).Msg("")
return
}
defer resp.Body.Close()
flog.Debug().Msgf("%s", resp.Status)
if resp.StatusCode == 404 {
err = ErrNotFound
return
}
if resp.StatusCode < 500 {
break
}
flog.Debug().Msgf("retry (%d left)", i)
}
if resp.StatusCode >= 400 {
err = fmt.Errorf("%d (%s)", resp.StatusCode, query)
}
return json.NewDecoder(resp.Body).Decode(target)
}
func GetPackage(name string) (pkg Package, err error) {
query := fmt.Sprintf("%s/package/%s/", apiURL, name)
err = getJson(query, &pkg)
return
}
func GetBinpackages(name, version string) (binpkgs Binpackages, err error) {
query := fmt.Sprintf("%s/package/%s/%s/binpackages",
apiURL, name, version)
err = getJson(query, &binpkgs)
return
}
func GetBinary(pkg string) (binary Binary, err error) {
query := fmt.Sprintf("%s/binary/%s/", apiURL, pkg)
err = getJson(query, &binary)
return
}
func GetBinfiles(binpkg, binversion string) (binfiles Binfiles, err error) {
query := fmt.Sprintf("%s/binary/%s/%s/binfiles",
apiURL, binpkg, binversion)
err = getJson(query, &binfiles)
return
}
func GetInfo(hash string) (info Info, err error) {
query := fmt.Sprintf("%s/file/%s/info", apiURL, hash)
err = getJson(query, &info)
if err != nil {
return
}
if len(info.Result) == 0 {
err = errors.New("empty response")
}
return
}

View File

@ -0,0 +1,50 @@
package mr
import (
"testing"
)
func TestMR(t *testing.T) {
name := "linux"
t.Log(name)
pkg, err := GetPackage(name)
if err != nil {
t.Fatal(err)
}
version := pkg.Result[0].Version
t.Log(version)
binpkgs, err := GetBinpackages(name, version)
if err != nil {
t.Fatal(err)
}
binpkgName := binpkgs.Result[0].Name
t.Log(binpkgName)
binary, err := GetBinary(binpkgName)
if err != nil {
t.Fatal(err)
}
binaryName := binary.Result[0].Name
binaryVersion := binary.Result[0].BinaryVersion
t.Log(binaryName, binaryVersion)
binfiles, err := GetBinfiles(binaryName, binaryVersion)
if err != nil {
t.Fatal(err)
}
hash := binfiles.Result[0].Hash
t.Log(hash)
info, err := GetInfo(hash)
if err != nil {
t.Fatal(err)
}
t.Log(info)
}

View File

@ -0,0 +1,166 @@
package snapshot
import (
"errors"
"fmt"
"net/url"
"regexp"
"strings"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/distro/debian/snapshot/mr"
)
const URL = "https://snapshot.debian.org"
func SourcePackageVersions(name string) (versions []string, err error) {
pkg, err := mr.GetPackage(name)
if err != nil {
return
}
for _, res := range pkg.Result {
versions = append(versions, res.Version)
}
return
}
type Package struct {
Name string
Source string
Version string
Arch string
Deb struct {
Name string
Hash string
URL string
}
Repo struct {
Snapshot string
Archive string
Component string
}
}
func NewPackage(name, srcname, version string, archs []string) (
p Package, err error) {
p.Name = name
p.Source = srcname
p.Version = version
p.Arch, p.Deb.Hash, err = p.getHash(archs)
if err != nil {
return
}
info, err := mr.GetInfo(p.Deb.Hash)
if err != nil {
return
}
p.Deb.Name = info.Result[0].Name
p.Repo.Archive = info.Result[0].ArchiveName
p.Repo.Snapshot = info.Result[0].FirstSeen
p.Deb.URL, err = url.JoinPath(URL, "archive", p.Repo.Archive,
p.Repo.Snapshot, info.Result[0].Path, p.Deb.Name)
if err != nil {
return
}
split := strings.Split(info.Result[0].Path, "/")
if split[1] != "pool" || len(split) < 3 {
err = fmt.Errorf("incorrect path: %s", info.Result[0].Path)
return
}
p.Repo.Component = split[2]
return
}
func (p Package) getHash(archs []string) (arch, hash string, err error) {
binfiles, err := mr.GetBinfiles(p.Name, p.Version)
if err != nil {
return
}
for _, res := range binfiles.Result {
for _, allowedArch := range archs {
if res.Architecture == allowedArch {
arch = res.Architecture
hash = res.Hash
return
}
}
}
err = errors.New("hash not found")
return
}
func contains(pkgs []Package, pkg Package) bool {
for _, p := range pkgs {
if p.Name == pkg.Name {
return true
}
}
return false
}
func filtered(s string, filter []string) bool {
for _, f := range filter {
if strings.Contains(s, f) {
return true
}
}
return false
}
func Packages(srcname, version, regex string, archs, filter []string) (
pkgs []Package, err error) {
binpkgs, err := mr.GetBinpackages(srcname, version)
if err == mr.ErrNotFound {
err = nil
return
}
if err != nil {
return
}
r := regexp.MustCompile(regex)
for _, res := range binpkgs.Result {
if res.Version != version {
continue
}
if !r.MatchString(res.Name) || filtered(res.Name, filter) {
continue
}
log.Trace().Msgf("matched %v", res.Name)
var pkg Package
pkg, err = NewPackage(res.Name, srcname, version, archs)
if err != nil {
return
}
if contains(pkgs, pkg) {
log.Trace().Msgf("%v already in slice O_o", pkg.Name)
continue
}
log.Trace().Msgf("append %v", pkg.Name)
pkgs = append(pkgs, pkg)
}
return
}

View File

@ -0,0 +1,37 @@
package snapshot
import (
"errors"
"testing"
)
func TestSourcePackageVersions(t *testing.T) {
versions, err := SourcePackageVersions("linux")
if err != nil {
t.Fatal(err)
}
if len(versions) == 0 {
t.Fatal(errors.New("empty response"))
}
t.Logf("found %d package versions", len(versions))
}
func TestPackages(t *testing.T) {
rx := `^(linux-(image|headers)-[a-z+~0-9\.\-]*-(common|amd64|amd64-unsigned)|linux-kbuild-.*)$`
packages, err := Packages("linux", "5.10.179-1", rx,
[]string{"amd64", "all"}, []string{})
if err != nil {
t.Fatal(err)
}
if len(packages) == 0 {
t.Fatal(errors.New("empty response"))
}
for _, pkg := range packages {
t.Logf("%#v", pkg)
}
}

106
distro/distro.go Normal file
View File

@ -0,0 +1,106 @@
package distro
import (
"errors"
"sync"
)
var mu sync.Mutex
var distros []distribution
type distribution interface {
Distro() Distro
Equal(Distro) bool
Packages() (packages []string, err error)
Install(pkg string, headers bool) (err error)
Kernels() (kernels []KernelInfo, err error)
RootFS() string
}
func Register(d distribution) {
mu.Lock()
defer mu.Unlock()
distros = append(distros, d)
}
func List() (dds []Distro) {
for _, dd := range distros {
dds = append(dds, dd.Distro())
}
return
}
type Distro struct {
ID ID
Release string
}
func (d Distro) String() string {
return d.ID.String() + " " + d.Release
}
func (d Distro) Packages() (packages []string, err error) {
for _, dd := range distros {
if d.ID != None && d.ID != dd.Distro().ID {
continue
}
if d.Release != "" && !dd.Equal(d) {
continue
}
var pkgs []string
pkgs, err = dd.Packages()
if err != nil {
return
}
packages = append(packages, pkgs...)
}
return
}
func (d Distro) Install(pkg string, headers bool) (err error) {
for _, dd := range distros {
if !dd.Equal(d) {
continue
}
return dd.Install(pkg, headers)
}
return errors.New("not found")
}
func (d Distro) Kernels() (kernels []KernelInfo, err error) {
for _, dd := range distros {
if dd.Equal(d) {
return dd.Kernels()
}
}
return
}
func (d Distro) Equal(to Distro) bool {
for _, dd := range distros {
if dd.Equal(d) {
return dd.Equal(to)
}
}
return false
}
func (d Distro) RootFS() string {
for _, dd := range distros {
if dd.Equal(d) {
return dd.RootFS()
}
}
return ""
}
type Command struct {
Distro Distro
Command string
}

72
distro/id.go Normal file
View File

@ -0,0 +1,72 @@
package distro
import (
"fmt"
"strings"
)
// ID of the distro
type ID int
const (
None ID = iota
// Ubuntu https://ubuntu.com/
Ubuntu
// CentOS https://www.centos.org/
CentOS
// Debian https://www.debian.org/
Debian
// OracleLinux https://www.oracle.com/linux/
OracleLinux
// OpenSUSE https://opensuse.org/
OpenSUSE
)
var IDs = []ID{
None, Ubuntu, CentOS, Debian, OracleLinux, OpenSUSE,
}
var nameStrings = [...]string{
"",
"Ubuntu",
"CentOS",
"Debian",
"OracleLinux",
"openSUSE",
}
func NewID(name string) (id ID, err error) {
err = id.UnmarshalTOML([]byte(name))
return
}
func (id ID) String() string {
return nameStrings[id]
}
// UnmarshalTOML is for support github.com/naoina/toml
func (id *ID) UnmarshalTOML(data []byte) (err error) {
name := strings.Trim(string(data), `"`)
if strings.EqualFold(name, "Ubuntu") {
*id = Ubuntu
} else if strings.EqualFold(name, "CentOS") {
*id = CentOS
} else if strings.EqualFold(name, "Debian") {
*id = Debian
} else if strings.EqualFold(name, "OracleLinux") {
*id = OracleLinux
} else if strings.EqualFold(name, "openSUSE") {
*id = OpenSUSE
} else if name != "" {
err = fmt.Errorf("distro %s is not supported", name)
} else {
*id = None
}
return
}
// MarshalTOML is for support github.com/naoina/toml
func (id ID) MarshalTOML() (data []byte, err error) {
data = []byte(`"` + id.String() + `"`)
return
}

41
distro/kernel.go Normal file
View File

@ -0,0 +1,41 @@
package distro
import "code.dumpstack.io/tools/out-of-tree/qemu"
// ByRootFS is sorting by .RootFS lexicographically
type ByRootFS []KernelInfo
func (a ByRootFS) Len() int { return len(a) }
func (a ByRootFS) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByRootFS) Less(i, j int) bool { return a[i].RootFS < a[j].RootFS }
// KernelInfo defines kernels.toml entries
type KernelInfo struct {
Distro Distro
// Must be *exactly* same as in `uname -r`
KernelVersion string
KernelRelease string
// Build-time information
KernelSource string // module/exploit will be build on host
ContainerName string
// Runtime information
KernelPath string
InitrdPath string
ModulesPath string
CPU qemu.CPU
RootFS string
// Debug symbols
VmlinuxPath string
// Package name, not mandatory (yet)
Package string
Blocklisted bool
}

301
distro/opensuse/opensuse.go Normal file
View File

@ -0,0 +1,301 @@
package opensuse
import (
"fmt"
"strings"
"code.dumpstack.io/tools/out-of-tree/cache"
"code.dumpstack.io/tools/out-of-tree/container"
"code.dumpstack.io/tools/out-of-tree/distro"
)
func init() {
releases := []string{
"12.1", "12.2", "12.3",
"13.1", "13.2",
"42.1", "42.2", "42.3",
"15.0", "15.1", "15.2", "15.3", "15.4", "15.5",
}
for _, release := range releases {
distro.Register(OpenSUSE{release: release})
}
}
type OpenSUSE struct {
release string
}
func (suse OpenSUSE) Equal(d distro.Distro) bool {
return suse.release == d.Release && distro.OpenSUSE == d.ID
}
func (suse OpenSUSE) Distro() distro.Distro {
return distro.Distro{ID: distro.OpenSUSE, Release: suse.release}
}
func (suse OpenSUSE) Packages() (pkgs []string, err error) {
c, err := container.New(suse.Distro())
if err != nil {
return
}
var name string
if strings.HasPrefix(suse.release, "12") {
var cnt string
switch suse.release {
case "12.1", "12.2":
name = "opensuse:12.1"
cnt = "openSUSE-12.1"
case "12.3":
name = "opensuse:12.3"
cnt = "openSUSE-12.3"
}
cnturl := cache.ContainerURL(cnt)
err = container.Import(cnturl, name)
if err != nil {
return
}
} else if strings.HasPrefix(suse.release, "13") {
name = "opensuse:13"
cnturl := cache.ContainerURL("openSUSE-13.2")
err = container.Import(cnturl, name)
if err != nil {
return
}
} else if strings.HasPrefix(suse.release, "42") {
name = "opensuse/leap:42"
} else if strings.HasPrefix(suse.release, "15") {
name = "opensuse/leap:" + suse.release
}
if !c.Exist() {
err = c.Build(name, suse.envs(), suse.runs())
if err != nil {
return
}
}
cmd := "zypper search -s --match-exact kernel-default | grep x86_64 " +
"| cut -d '|' -f 4 | sed 's/ //g'"
output, err := c.Run("", []string{cmd})
if err != nil {
return
}
pkgs = append(pkgs, strings.Fields(output)...)
return
}
func (suse OpenSUSE) Kernels() (kernels []distro.KernelInfo, err error) {
c, err := container.New(suse.Distro())
if err != nil {
return
}
kernels, err = c.Kernels()
if err != nil {
return
}
for i := range kernels {
kernels[i].KernelRelease = strings.Replace(
kernels[i].KernelRelease, "-default", "", -1)
}
return
}
func (suse OpenSUSE) envs() (envs []string) {
return
}
func (suse OpenSUSE) runs() (commands []string) {
cmdf := func(f string, s ...interface{}) {
commands = append(commands, fmt.Sprintf(f, s...))
}
main := "http://download.opensuse.org/"
discontinued := "http://ftp.gwdg.de/pub/opensuse/discontinued/"
var repourls []string
if strings.HasPrefix(suse.release, "12") ||
strings.HasPrefix(suse.release, "13") {
dist := discontinued + "distribution/%s/repo/oss/"
update := discontinued + "update/%s/"
repourls = append(repourls,
fmt.Sprintf(dist, suse.release),
fmt.Sprintf(update, suse.release),
)
} else if strings.HasPrefix(suse.release, "42") {
dist := discontinued + "distribution/leap/%s/repo/oss/suse/"
update := discontinued + "update/leap/%s/oss/"
repourls = append(repourls,
fmt.Sprintf(dist, suse.release),
fmt.Sprintf(update, suse.release),
)
} else if strings.HasPrefix(suse.release, "15") {
dist := main + "distribution/leap/%s/repo/oss/"
update := main + "update/leap/%s/oss/"
repourls = append(repourls,
fmt.Sprintf(dist, suse.release),
fmt.Sprintf(update, suse.release),
)
switch suse.release {
case "15.3", "15.4", "15.5":
sle := main + "update/leap/%s/sle/"
repourls = append(repourls,
fmt.Sprintf(sle, suse.release),
)
}
}
cmdf("rm /etc/zypp/repos.d/*")
switch suse.release {
case "12.1", "12.2":
repourl := discontinued + "distribution/12.3/repo/oss/"
cmdf(`echo -e `+
`"[dracut]\n`+
`name=dracut\n`+
`enabled=1\n`+
`autorefresh=0\n`+
`gpgcheck=0\n`+
// higher number is lower priority
// default is 99
`priority=100\n`+
`baseurl=%s" > /etc/zypp/repos.d/dracut.repo`,
repourl,
)
}
for i, repourl := range repourls {
cmdf(`echo -e `+
`"[%d]\n`+
`name=%d\n`+
`enabled=1\n`+
`autorefresh=0\n`+
`gpgcheck=0\n`+
`baseurl=%s" > /etc/zypp/repos.d/%d.repo`,
i, i, repourl, i,
)
}
cmdf("zypper -n refresh")
params := "--no-recommends --force-resolution"
if !strings.HasPrefix(suse.release, "12") {
params += " --replacefiles"
}
cmdf("zypper -n update %s", params)
cmdf("zypper --no-refresh -n install %s -t pattern devel_kernel", params)
// Cache dependencies
cmdf("zypper -n install %s kernel-default kernel-default-devel "+
"&& zypper -n remove -U kernel-default kernel-default-devel",
params)
switch suse.release {
case "12.1", "12.2":
cmdf("zypper -n install %s -r dracut dracut", params)
cmdf("rm /etc/zypp/repos.d/dracut.repo")
case "12.3":
cmdf("zypper -n install %s dracut", params)
}
if !strings.HasPrefix(suse.release, "12") {
cmdf("zypper --no-refresh -n install %s kmod which", params)
}
if strings.HasPrefix(suse.release, "13") {
cmdf("zypper --no-refresh -n install %s kernel-firmware", params)
}
cmdf("rm -rf /boot/*")
cmdf("rm -rf /lib/modules/*")
return
}
func (suse OpenSUSE) Install(version string, headers bool) (err error) {
var commands []string
cmdf := func(f string, s ...interface{}) {
commands = append(commands, fmt.Sprintf(f, s...))
}
installcmd := "zypper --no-refresh -n install "
if !strings.HasPrefix(suse.release, "12") {
installcmd += " --replacefiles"
}
installcmd += " --no-recommends --force-resolution --capability"
cmdf("%s kernel-default=%s", installcmd, version)
if headers {
cmdf("%s kernel-default-devel=%s", installcmd, version)
}
cmdf("mkdir /usr/lib/dracut/modules.d/42workaround")
wsetuppath := "/usr/lib/dracut/modules.d/42workaround/module-setup.sh"
cmdf("echo 'check() { return 0; }' >> %s", wsetuppath)
cmdf("echo 'depends() { return 0; }' >> %s", wsetuppath)
cmdf(`echo 'install() { `+
`inst_hook pre-mount 91 "$moddir/workaround.sh"; `+
`}' >> %s`, wsetuppath)
cmdf("echo 'installkernel() { "+
"instmods af_packet e1000; "+
"}' >> %s", wsetuppath)
wpath := "/usr/lib/dracut/modules.d/42workaround/workaround.sh"
cmdf("echo '#!/bin/sh' >> %s", wpath)
cmdf("echo 'modprobe af_packet' >> %s", wpath)
cmdf("echo 'modprobe e1000' >> %s", wpath)
modules := "ata_piix e1000 rfkill af_packet"
if suse.release != "15.2" {
modules += " libata ext4 sd_mod"
}
format := "dracut "
format += "-a workaround "
if strings.HasPrefix(suse.release, "12") {
format += "--no-hostonly --add-drivers '%s' "
} else {
format += "--force-drivers '%s' "
}
format += "-f /boot/initrd-$(ls /lib/modules) $(ls /lib/modules)"
cmdf(format, modules)
cmdf("cp -r /boot /target/")
cmdf("cp -r /lib/modules /target/lib/")
cmdf("cp -r /usr/src /target/usr/")
c, err := container.New(suse.Distro())
if err != nil {
return
}
for i := range c.Volumes {
c.Volumes[i].Dest = "/target" + c.Volumes[i].Dest
}
_, err = c.Run("", commands)
if err != nil {
return
}
return
}
func (suse OpenSUSE) RootFS() string {
return fmt.Sprintf("out_of_tree_opensuse_%s.img",
strings.Split(suse.release, ".")[0])
}

View File

@ -0,0 +1,222 @@
package oraclelinux
import (
"fmt"
"regexp"
"strings"
"github.com/rs/zerolog/log"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
"code.dumpstack.io/tools/out-of-tree/container"
"code.dumpstack.io/tools/out-of-tree/distro"
)
func init() {
releases := []string{"6", "7", "8", "9"}
for _, release := range releases {
distro.Register(OracleLinux{release: release})
}
}
type OracleLinux struct {
release string
}
func (ol OracleLinux) Equal(d distro.Distro) bool {
return ol.release == d.Release && distro.OracleLinux == d.ID
}
func (ol OracleLinux) Distro() distro.Distro {
return distro.Distro{ID: distro.OracleLinux, Release: ol.release}
}
func (ol OracleLinux) Packages() (pkgs []string, err error) {
c, err := container.New(ol.Distro())
if err != nil {
return
}
if !c.Exist() {
err = c.Build("oraclelinux:"+ol.release,
ol.envs(), ol.runs())
if err != nil {
return
}
}
if ol.release == "8" {
// Image for ol9 is required for some kernels
// See notes in OracleLinux.Kernels()
_, err = OracleLinux{release: "9"}.Packages()
if err != nil {
return
}
}
cmd := "yum search kernel --showduplicates 2>/dev/null " +
"| grep '^kernel-[0-9]\\|^kernel-uek-[0-9]' " +
"| grep -v src " +
"| cut -d ' ' -f 1"
output, err := c.Run(dotfiles.Dir("tmp"), []string{cmd})
if err != nil {
return
}
pkgs = append(pkgs, strings.Fields(output)...)
return
}
func (ol OracleLinux) Kernels() (kernels []distro.KernelInfo, err error) {
c, err := container.New(ol.Distro())
if err != nil {
return
}
kernels, err = c.Kernels()
if err != nil {
return
}
// Some kernels do not work with the smap enabled
//
// BUG: unable to handle kernel paging request at 00007fffc64b2fda
// IP: [<ffffffff8127a9ed>] strnlen+0xd/0x40"
// ...
// Call Trace:
// [<ffffffff81123bf8>] dtrace_psinfo_alloc+0x138/0x390
// [<ffffffff8118b143>] do_execve_common.isra.24+0x3c3/0x460
// [<ffffffff81554d70>] ? rest_init+0x80/0x80
// [<ffffffff8118b1f8>] do_execve+0x18/0x20
// [<ffffffff81554dc2>] kernel_init+0x52/0x180
// [<ffffffff8157cd2c>] ret_from_fork+0x7c/0xb0
//
smapBlocklist := []string{
"3.8.13-16",
"3.8.13-26",
"3.8.13-35",
"3.8.13-44",
"3.8.13-55",
"3.8.13-68",
"3.8.13-98",
}
// BUG: soft lockup - CPU#0 stuck for 61s!
blocklistr := regexp.MustCompile(
`2[.]6[.]32-300[.]3(2[.][2-3]|[3-9][.][0-9])`)
for i, k := range kernels {
// The latest uek kernels require gcc-11, which is
// only present in el8 with scl load, so not so
// convinient. It is possible to just build from
// the next release container.
if strings.Contains(k.KernelVersion, "5.15.0") {
cnt := strings.Replace(k.ContainerName, "8", "9", -1)
kernels[i].ContainerName = cnt
}
for _, ver := range smapBlocklist {
if strings.Contains(k.KernelVersion, ver) {
kernels[i].CPU.Flags = append(
kernels[i].CPU.Flags, "smap=off",
)
}
}
if blocklistr.MatchString(k.KernelVersion) {
kernels[i].Blocklisted = true
}
}
return
}
func (ol OracleLinux) envs() (envs []string) {
return
}
func (ol OracleLinux) runs() (commands []string) {
cmdf := func(f string, s ...interface{}) {
commands = append(commands, fmt.Sprintf(f, s...))
}
if ol.release < "6" {
log.Fatal().Msgf("no support for pre-EL6")
}
cmdf("sed -i 's/enabled=0/enabled=1/' /etc/yum.repos.d/*")
cmdf("sed -i 's;installonly_limit=;installonly_limit=100500;' /etc/yum.conf /etc/dnf/dnf.conf || true")
cmdf("yum -y update")
cmdf("yum -y groupinstall 'Development Tools'")
packages := "linux-firmware grubby"
if ol.release <= "7" {
packages += " libdtrace-ctf"
}
cmdf("yum -y install %s", packages)
return
}
func (ol OracleLinux) Install(pkgname string, headers bool) (err error) {
var headerspkg string
if headers {
if strings.Contains(pkgname, "uek") {
headerspkg = strings.Replace(pkgname,
"kernel-uek", "kernel-uek-devel", -1)
} else {
headerspkg = strings.Replace(pkgname,
"kernel", "kernel-devel", -1)
}
}
var commands []string
cmdf := func(f string, s ...interface{}) {
commands = append(commands, fmt.Sprintf(f, s...))
}
cmdf("yum -y install %s %s", pkgname, headerspkg)
var version string
if strings.Contains(pkgname, "uek") {
version = strings.Replace(pkgname, "kernel-uek-", "", -1)
} else {
version = strings.Replace(pkgname, "kernel-", "", -1)
}
if ol.release <= "7" {
cmdf("dracut -v --add-drivers 'e1000 ext4' -f "+
"/boot/initramfs-%s.img %s", version, version)
} else {
cmdf("dracut -v --add-drivers 'ata_piix libata' "+
"--force-drivers 'e1000 ext4 sd_mod' -f "+
"/boot/initramfs-%s.img %s", version, version)
}
cmdf("cp -r /boot /target/")
cmdf("cp -r /lib/modules /target/lib/")
cmdf("cp -r /usr/src /target/usr/")
c, err := container.New(ol.Distro())
if err != nil {
return
}
for i := range c.Volumes {
c.Volumes[i].Dest = "/target" + c.Volumes[i].Dest
}
_, err = c.Run("", commands)
if err != nil {
return
}
return
}
func (ol OracleLinux) RootFS() string {
return fmt.Sprintf("out_of_tree_oraclelinux_%s.img", ol.release)
}

View File

@ -0,0 +1,19 @@
package oraclelinux
import (
"testing"
"github.com/stretchr/testify/assert"
"code.dumpstack.io/tools/out-of-tree/distro"
)
func TestOracleLinux(t *testing.T) {
assert := assert.New(t)
u := OracleLinux{release: "9"}
assert.True(u.Equal(distro.Distro{Release: "9", ID: distro.OracleLinux}))
assert.NotEmpty(u.Packages())
}

165
distro/ubuntu/ubuntu.go Normal file
View File

@ -0,0 +1,165 @@
package ubuntu
import (
"fmt"
"strings"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
"code.dumpstack.io/tools/out-of-tree/container"
"code.dumpstack.io/tools/out-of-tree/distro"
)
func init() {
releases := []string{
"12.04",
"14.04",
"16.04",
"18.04",
"20.04",
"22.04",
}
for _, release := range releases {
distro.Register(Ubuntu{release: release})
}
}
type Ubuntu struct {
release string
}
func (u Ubuntu) Equal(d distro.Distro) bool {
return u.release == d.Release && distro.Ubuntu == d.ID
}
func (u Ubuntu) Distro() distro.Distro {
return distro.Distro{ID: distro.Ubuntu, Release: u.release}
}
func (u Ubuntu) Packages() (pkgs []string, err error) {
c, err := container.New(u.Distro())
if err != nil {
return
}
if !c.Exist() {
err = c.Build("ubuntu:"+u.release, u.envs(), u.runs())
if err != nil {
return
}
}
cmd := "apt-cache search " +
"--names-only '^linux-image-[0-9\\.\\-]*-generic$' " +
"| awk '{ print $1 }'"
output, err := c.Run(dotfiles.Dir("tmp"), []string{cmd})
if err != nil {
return
}
pkgs = append(pkgs, strings.Fields(output)...)
return
}
func (u Ubuntu) Kernels() (kernels []distro.KernelInfo, err error) {
c, err := container.New(u.Distro())
if err != nil {
return
}
return c.Kernels()
}
func (u Ubuntu) envs() (envs []string) {
envs = append(envs, "DEBIAN_FRONTEND=noninteractive")
return
}
func (u Ubuntu) runs() (commands []string) {
cmdf := func(f string, s ...interface{}) {
commands = append(commands, fmt.Sprintf(f, s...))
}
if u.release < "14.04" {
cmdf("sed -i 's/archive.ubuntu.com/old-releases.ubuntu.com/' " +
"/etc/apt/sources.list")
}
cmdf("apt-get update")
cmdf("apt-get install -y build-essential libelf-dev")
cmdf("apt-get install -y wget git")
if u.release == "12.04" {
cmdf("apt-get install -y grub")
cmdf("cp /bin/true /usr/sbin/grub-probe")
cmdf("mkdir -p /boot/grub")
cmdf("touch /boot/grub/menu.lst")
}
if u.release < "14.04" {
return
}
if u.release == "22.04" {
cmdf("apt-get install -y gcc-12")
return
}
cmdf("apt-get install -y libseccomp-dev")
// Install and remove a single kernel and headers.
// This ensures that all dependencies are cached.
cmd := "export HEADERS=$(apt-cache search " +
"--names-only '^linux-headers-[0-9\\.\\-]*-generic' " +
"| awk '{ print $1 }' | head -n 1)"
cmd += " KERNEL=$(echo $HEADERS | sed 's/headers/image/')"
cmd += " MODULES=$(echo $HEADERS | sed 's/headers/modules/')"
cmd += " && apt-get install -y $HEADERS $KERNEL $MODULES"
cmd += " && apt-get remove -y $HEADERS $KERNEL $MODULES"
cmdf(cmd)
return
}
func (u Ubuntu) Install(pkgname string, headers bool) (err error) {
var headerspkg string
if headers {
headerspkg = strings.Replace(pkgname, "image", "headers", -1)
}
var commands []string
cmdf := func(f string, s ...interface{}) {
commands = append(commands, fmt.Sprintf(f, s...))
}
cmdf("apt-get install -y %s %s", pkgname, headerspkg)
cmdf("cp -r /boot /target/")
cmdf("cp -r /lib/modules /target/lib/")
cmdf("cp -r /usr/src /target/usr/")
c, err := container.New(u.Distro())
if err != nil {
return
}
for i := range c.Volumes {
c.Volumes[i].Dest = "/target" + c.Volumes[i].Dest
}
_, err = c.Run("", commands)
if err != nil {
return
}
return
}
func (u Ubuntu) RootFS() string {
return fmt.Sprintf("out_of_tree_ubuntu_%s.img",
strings.Replace(u.release, ".", "__", -1))
}

View File

@ -0,0 +1,19 @@
package ubuntu
import (
"testing"
"github.com/stretchr/testify/assert"
"code.dumpstack.io/tools/out-of-tree/distro"
)
func TestUbuntu(t *testing.T) {
assert := assert.New(t)
u := Ubuntu{release: "22.04"}
assert.True(u.Equal(distro.Distro{Release: "22.04", ID: distro.Ubuntu}))
assert.NotEmpty(u.Packages())
}

1
docs/conf.py Normal file
View File

@ -0,0 +1 @@
project = "out-of-tree"

View File

@ -1,4 +1,4 @@
Installation
Installation (from source)
============
OS/Distro-specific
@ -10,7 +10,7 @@ Ubuntu
Install dependencies::
$ sudo snap install go --classic
$ sudo snap install docker
$ # Install docker: https://docs.docker.com/engine/install/ubuntu/
$ sudo apt install qemu-system-x86 build-essential gdb
macOS
@ -36,18 +36,33 @@ There's a minimal configuration that you need to apply::
];
}
Gentoo
------
Install dependencies::
$ sudo emerge app-emulation/qemu app-emulation/docker dev-lang/go
Fedora
------
Install dependencies::
$ sudo dnf install go qemu moby-engine
Common
======
Setup Go environment::
Setup environment::
$ echo 'export GOPATH=$HOME' >> ~/.bashrc
$ echo 'export PATH=$PATH:$HOME/bin' >> ~/.bashrc
$ source ~/.bashrc
Build *out-of-tree*::
$ go get -u code.dumpstack.io/tools/out-of-tree
$ git clone https://code.dumpstack.io/tools/out-of-tree
$ cd out-of-tree
$ CGO_ENABLED=1 go build -o ~/bin/out-of-tree
.. note::
On a GNU/Linux you need to add your user to docker group if you want
@ -57,7 +72,7 @@ Build *out-of-tree*::
Test that everything works::
$ cd $GOPATH/src/code.dumpstack.io/tools/out-of-tree/examples/kernel-exploit
$ cd out-of-tree/examples/kernel-exploit
$ out-of-tree kernel autogen --max=1
$ out-of-tree pew --max=1

1
examples/kernel-exploit/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
logs

View File

@ -1,38 +1,28 @@
# out-of-tree configuration file
# docs at https://out-of-tree.io
name = "out-of-tree exploit example"
name = "exploit_example"
type = "exploit"
[[supported_kernels]]
distro_type = "Ubuntu"
distro_release = "16.04"
release_mask = "4[.]4[.]0-(1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20|21|22|23|24|25|26|27|28|29|30|31|32|33|34|35|36|37|38|39|40|41|42|43|44|45|46|47|48|49|50|51|52|53|54|55|56|57|58|59|60|61|62|63|64|65|66|67|68|69|70|71|72|73|74|75|76|77|78|79|80|81|82|83|84|85|86|87|88|89|90|91|92|93|94|95|96|97|98|99|100|101|102|103|104|105|106|107|108|109|110|111|112|113|114|115|116)-.*"
[[targets]]
distro = { id = "Ubuntu", release = "16.04" }
kernel = { regex = "4[.]4[.]0-(1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20|21|22|23|24|25|26|27|28|29|30|31|32|33|34|35|36|37|38|39|40|41|42|43|44|45|46|47|48|49|50|51|52|53|54|55|56|57|58|59|60|61|62|63|64|65|66|67|68|69|70|71|72|73|74|75|76|77|78|79|80|81|82|83|84|85|86|87|88|89|90|91|92|93|94|95|96|97|98|99|100|101|102|103|104|105|106|107|108|109|110|111|112|113|114|115|116)-.*" }
[[supported_kernels]]
distro_type = "Ubuntu"
distro_release = "16.04"
release_mask = "4[.]8[.]0-(1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20|21|22|23|24|25|26|27|28|29|30|31|32|33|34|35|36|37|38|39|40|41|42|43|44|45|46|47|48|49|50|51|52|53|54|55|56|57|58)-.*"
[[targets]]
distro = { id = "Ubuntu", release = "16.04" }
kernel = { regex = "4[.]8[.]0-(1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20|21|22|23|24|25|26|27|28|29|30|31|32|33|34|35|36|37|38|39|40|41|42|43|44|45|46|47|48|49|50|51|52|53|54|55|56|57|58)-.*" }
[[supported_kernels]]
[[targets]]
# Can be Ubuntu/CentOS/Debian/etc.
distro_type = "Ubuntu"
distro_release = "16.04"
distro = { id = "Ubuntu", release = "16.04" }
# regex for `uname -r`
# See also: regex-golang.appspot.com
# stupid way to generate: $ echo '4.4.0-('$(seq 44 | xargs echo | sed 's/ /|/g')')-.*'
release_mask = "4[.]10[.]0-(1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20|21|22|23|24|25|26|27|28|29|30|31|32|33|34|35|36|37|38|39|40|41|42)-.*"
kernel = { regex = "4[.]10[.]0-(1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20|21|22|23|24|25|26|27|28|29|30|31|32|33|34|35|36|37|38|39|40|41|42)-.*" }
[[supported_kernels]]
distro_type = "Ubuntu"
distro_release = "16.04"
release_mask = "4[.]11[.]0-(1|2|3|4|5|6|7|8|9|10|11|12|13|14)-.*"
[[targets]]
distro = { id = "Ubuntu", release = "16.04" }
kernel = { regex = "4[.]11[.]0-(1|2|3|4|5|6|7|8|9|10|11|12|13|14)-.*" }
[[supported_kernels]]
distro_type = "Ubuntu"
distro_release = "16.04"
# equivalent for "4[.]13[.]0-(1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20|21)-.*"
[supported_kernels.kernel]
version = [ 4 ]
major = [ 13 ]
minor = [ 0 ]
patch = [ 1, 21 ]
[[targets]]
distro = { id = "Ubuntu", release = "16.04" }
kernel = { regex = "4[.]13[.]0-(1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20|21)-.*" }

View File

@ -12,3 +12,4 @@ GPATH
GRTAGS
GTAGS
.cache.mk
logs

View File

@ -1,30 +1,26 @@
# out-of-tree configuration file
# docs at https://out-of-tree.io
name = "out-of-tree module example"
name = "module_example"
type = "module"
[[supported_kernels]]
[[targets]]
# Can be Ubuntu/CentOS/Debian/etc.
distro_type = "Ubuntu"
distro_release = "16.04"
distro = { id = "Ubuntu", release = "16.04" }
# regex for `uname -r`
# See also: regex-golang.appspot.com
release_mask = "4[.]4[.]0-70-.*"
kernel = { regex = "4[.]4[.]0-70-.*" }
# [[supported_kernels]] may be defined unlimited number of times
[[supported_kernels]]
distro_type = "Ubuntu"
distro_release = "18.04"
# [[targets]] may be defined unlimited number of times
[[targets]]
distro = { id = "Ubuntu", release = "18.04" }
# Also you can use only one kernel
release_mask = "4[.]15[.]0-(24|29)-generic"
kernel = { regex = "4[.]15[.]0-(24|29)-generic" }
[[supported_kernels]]
distro_type = "Ubuntu"
distro_release = "18.04"
[[targets]]
distro = { id = "Ubuntu", release = "18.04" }
# Also you can use only one kernel
release_mask = "4[.]15[.]0-23-generic"
kernel = { regex = "4[.]15[.]0-23-generic" }
[[supported_kernels]]
distro_type = "CentOS"
distro_release = "7"
release_mask = "3[.]10[.]0-862.el7.x86_64"
[[targets]]
distro = { id = "CentOS", release = "7" }
kernel = { regex = "3[.]10[.]0-862.el7.x86_64" }

1
examples/preload/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
logs

View File

@ -0,0 +1,11 @@
name = "preload_example"
type = "module"
[[targets]]
distro = { id = "Ubuntu", release = "18.04" }
kernel = { regex = ".*" }
[[preload]]
repo = "https://github.com/openwall/lkrg"
#path = "/local/path/to/lkrg"
timeout_after_load = "1s"

11
examples/preload/Makefile Normal file
View File

@ -0,0 +1,11 @@
KERNEL := /lib/modules/$(shell uname -r)/build
TARGET := module
obj-m += $(TARGET).o
$(TARGET)-objs = module.o
all:
make -C $(KERNEL) M=$(PWD) modules
clean:
make -C $(KERNEL) M=$(PWD) clean

View File

@ -0,0 +1,5 @@
# out-of-tree kernel module preload example
See .out-of-tree.toml
Note that it should fail to insert module if lkrg is enabled in the preload list.

17
examples/preload/module.c Normal file
View File

@ -0,0 +1,17 @@
#include <linux/module.h>
#include <linux/slab.h>
int init_module(void)
{
char *argv[] = { "/bin/sh", "--help", NULL };
char *envp[] = { NULL };
/* trigger lkrg */
return call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
}
void cleanup_module(void)
{
}
MODULE_LICENSE("GPL");

3
examples/preload/test.sh Normal file
View File

@ -0,0 +1,3 @@
#!/bin/sh
dmesg | grep BLOCK

1
examples/script/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
logs

View File

@ -0,0 +1,10 @@
# out-of-tree configuration file
# docs at https://out-of-tree.io
name = "script_example"
type = "script"
script = "script.sh"
[[targets]]
distro = { id = "Ubuntu", release = "22.04" }
kernel = { regex = ".*" }

View File

@ -0,0 +1,3 @@
# out-of-tree script example
See .out-of-tree.toml

View File

@ -0,0 +1,5 @@
#!/bin/sh
uname -a
ls /proc | grep config

130
flake.lock Normal file
View File

@ -0,0 +1,130 @@
{
"nodes": {
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1705309234,
"narHash": "sha256-uNRRNRKmJyCRC/8y1RqBkqWBLM034y4qN7EprSdmgyA=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "1ef2e671c3b0c19053962c07dbda38332dcebf26",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_2": {
"inputs": {
"systems": "systems_2"
},
"locked": {
"lastModified": 1694529238,
"narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "ff7b65b44d01cf9ba6a71320833626af21126384",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"gomod2nix": {
"inputs": {
"flake-utils": "flake-utils_2",
"nixpkgs": "nixpkgs"
},
"locked": {
"lastModified": 1705314449,
"narHash": "sha256-yfQQ67dLejP0FLK76LKHbkzcQqNIrux6MFe32MMFGNQ=",
"owner": "nix-community",
"repo": "gomod2nix",
"rev": "30e3c3a9ec4ac8453282ca7f67fca9e1da12c3e6",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "gomod2nix",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1658285632,
"narHash": "sha256-zRS5S/hoeDGUbO+L95wXG9vJNwsSYcl93XiD0HQBXLk=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "5342fc6fb59d0595d26883c3cadff16ce58e44f3",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "master",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1708296515,
"narHash": "sha256-FyF489fYNAUy7b6dkYV6rGPyzp+4tThhr80KNAaF/yY=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "b98a4e1746acceb92c509bc496ef3d0e5ad8d4aa",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"flake-utils": "flake-utils",
"gomod2nix": "gomod2nix",
"nixpkgs": "nixpkgs_2"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_2": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

24
flake.nix Normal file
View File

@ -0,0 +1,24 @@
{
description = "kernel {module, exploit} development tool";
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
inputs.flake-utils.url = "github:numtide/flake-utils";
inputs.gomod2nix.url = "github:nix-community/gomod2nix";
outputs = { self, nixpkgs, flake-utils, gomod2nix }:
(flake-utils.lib.eachDefaultSystem
(system:
let
pkgs = import nixpkgs {
inherit system;
overlays = [ gomod2nix.overlays.default ];
};
version = self.lastModifiedDate;
in
{
packages.default = pkgs.callPackage ./. { inherit version; };
devShells.default = import ./shell.nix { inherit pkgs; };
})
);
}

72
fs/fs.go Normal file
View File

@ -0,0 +1,72 @@
package fs
import (
"errors"
"os"
"path/filepath"
"strings"
"code.dumpstack.io/tools/out-of-tree/config/dotfiles"
)
// CaseInsensitive check
func CaseInsensitive(dir string) (yes bool, err error) {
pathLowercase := filepath.Join(dir, "file")
fLowercase, err := os.Create(pathLowercase)
if err != nil {
return
}
defer fLowercase.Close()
defer os.Remove(pathLowercase)
pathUppercase := filepath.Join(dir, "FILE")
fUppercase, err := os.Create(pathUppercase)
if err != nil {
return
}
defer fUppercase.Close()
defer os.Remove(pathUppercase)
statLowercase, err := fLowercase.Stat()
if err != nil {
return
}
statUppercase, err := fUppercase.Stat()
if err != nil {
return
}
yes = os.SameFile(statLowercase, statUppercase)
return
}
// PathExists check
func PathExists(path string) bool {
if _, err := os.Stat(path); err != nil {
return false
}
return true
}
// TempDir that exist relative to config directory
func TempDir() (string, error) {
return os.MkdirTemp(dotfiles.Dir("tmp"), "")
}
func FindBySubstring(dir, substring string) (k string, err error) {
files, err := os.ReadDir(dir)
if err != nil {
return
}
for _, file := range files {
if strings.Contains(file.Name(), substring) {
k = filepath.Join(dir, file.Name())
return
}
}
err = errors.New("not found")
return
}

33
gen.go
View File

@ -1,33 +0,0 @@
// Copyright 2018 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package main
import (
"fmt"
"github.com/naoina/toml"
"code.dumpstack.io/tools/out-of-tree/config"
)
func genConfig(at config.ArtifactType) (err error) {
a := config.Artifact{
Name: "Put name here",
Type: at,
}
a.SupportedKernels = append(a.SupportedKernels, config.KernelMask{
DistroType: config.Ubuntu,
DistroRelease: "18.04",
ReleaseMask: ".*",
})
buf, err := toml.Marshal(&a)
if err != nil {
return
}
fmt.Print(string(buf))
return
}

67
go.mod
View File

@ -1,21 +1,58 @@
module code.dumpstack.io/tools/out-of-tree
replace code.dumpstack.io/tools/out-of-tree/qemu => ./qemu
replace code.dumpstack.io/tools/out-of-tree/config => ./config
go 1.21
require (
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc // indirect
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf // indirect
github.com/mattn/go-runewidth v0.0.4 // indirect
github.com/mattn/go-sqlite3 v1.11.0
github.com/naoina/go-stringutil v0.1.0 // indirect
github.com/Masterminds/semver v1.5.0
github.com/alecthomas/kong v0.7.1
github.com/cavaliergopher/grab/v3 v3.0.1
github.com/davecgh/go-spew v1.1.1
github.com/go-git/go-git/v5 v5.6.1
github.com/google/uuid v1.6.0
github.com/mattn/go-sqlite3 v1.14.16
github.com/mitchellh/go-homedir v1.1.0
github.com/naoina/toml v0.1.1
github.com/olekukonko/tablewriter v0.0.1
github.com/otiai10/copy v1.0.1
github.com/remeh/sizedwaitgroup v0.0.0-20180822144253-5e7302b12cce
github.com/zcalusic/sysinfo v0.0.0-20190429151633-fbadb57345c2
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5
gopkg.in/alecthomas/kingpin.v2 v2.2.6
gopkg.in/logrusorgru/aurora.v1 v1.0.0-20181002194514-a7b3b318ed4e
github.com/natefinch/lumberjack v2.0.0+incompatible
github.com/olekukonko/tablewriter v0.0.5
github.com/otiai10/copy v1.11.0
github.com/povsister/scp v0.0.0-20210427074412-33febfd9f13e
github.com/rapidloop/skv v0.0.0-20180909015525-9def2caac4cc
github.com/remeh/sizedwaitgroup v1.0.0
github.com/rs/zerolog v1.29.1
github.com/stretchr/testify v1.7.0
github.com/zcalusic/sysinfo v0.9.5
golang.org/x/crypto v0.9.0
golang.org/x/time v0.3.0
gopkg.in/logrusorgru/aurora.v2 v2.0.3
)
require (
github.com/BurntSushi/toml v1.2.1 // indirect
github.com/Microsoft/go-winio v0.5.2 // indirect
github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 // indirect
github.com/acomagu/bufpipe v1.0.4 // indirect
github.com/boltdb/bolt v1.3.1 // indirect
github.com/cloudflare/circl v1.1.0 // indirect
github.com/emirpasic/gods v1.18.1 // indirect
github.com/go-git/gcfg v1.5.0 // indirect
github.com/go-git/go-billy/v5 v5.4.1 // indirect
github.com/imdario/mergo v0.3.13 // indirect
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
github.com/kevinburke/ssh_config v1.2.0 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/mattn/go-colorable v0.1.12 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/mattn/go-runewidth v0.0.9 // indirect
github.com/naoina/go-stringutil v0.1.0 // indirect
github.com/pjbgf/sha1cd v0.3.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/sergi/go-diff v1.1.0 // indirect
github.com/skeema/knownhosts v1.1.0 // indirect
github.com/xanzy/ssh-agent v0.3.3 // indirect
golang.org/x/net v0.10.0 // indirect
golang.org/x/sys v0.8.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v2 v2.3.0 // indirect
gopkg.in/yaml.v3 v3.0.0 // indirect
)

230
go.sum
View File

@ -1,33 +1,211 @@
bou.ke/monkey v1.0.1/go.mod h1:FgHuK96Rv2Nlf+0u1OOVDpCMdsWyOFmeeketDHE7LIg=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-sqlite3 v1.11.0 h1:LDdKkqtYlom37fkvqs8rMPFKAMe8+SgjbwZ6ex1/A/Q=
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA=
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 h1:wPbRQzjjwFc0ih8puEVAOFGELsn1zoIIYdxvML7mDxA=
github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8/go.mod h1:I0gYDMZ6Z5GRU7l58bNFSkPTFN6Yl12dsUlAZ8xy98g=
github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ=
github.com/acomagu/bufpipe v1.0.4/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4=
github.com/alecthomas/assert/v2 v2.1.0 h1:tbredtNcQnoSd3QBhQWI7QZ3XHOVkw1Moklp2ojoH/0=
github.com/alecthomas/assert/v2 v2.1.0/go.mod h1:b/+1DI2Q6NckYi+3mXyH3wFb8qG37K/DuK80n7WefXA=
github.com/alecthomas/kong v0.7.1 h1:azoTh0IOfwlAX3qN9sHWTxACE2oV8Bg2gAwBsMwDQY4=
github.com/alecthomas/kong v0.7.1/go.mod h1:n1iCIO2xS46oE8ZfYCNDqdR0b0wZNrXAIAqro/2132U=
github.com/alecthomas/repr v0.1.0 h1:ENn2e1+J3k09gyj2shc0dHr/yjaWSHRlrJ4DPMevDqE=
github.com/alecthomas/repr v0.1.0/go.mod h1:2kn6fqh/zIyPLmm3ugklbEi5hg5wS435eygvNfaDQL8=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
github.com/cavaliergopher/grab/v3 v3.0.1 h1:4z7TkBfmPjmLAAmkkAZNX/6QJ1nNFdv3SdIHXju0Fr4=
github.com/cavaliergopher/grab/v3 v3.0.1/go.mod h1:1U/KNnD+Ft6JJiYoYBAimKH2XrYptb8Kl3DFGmsjpq4=
github.com/cloudflare/circl v1.1.0 h1:bZgT/A+cikZnKIwn7xL2OBj012Bmvho/o6RpRvv3GKY=
github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY=
github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4=
github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4=
github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E=
github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0=
github.com/go-git/go-billy/v5 v5.4.1 h1:Uwp5tDRkPr+l/TnbHOQzp+tmJfLceOlbVucgpTz8ix4=
github.com/go-git/go-billy/v5 v5.4.1/go.mod h1:vjbugF6Fz7JIflbVpl1hJsGjSHNltrSw45YK/ukIvQg=
github.com/go-git/go-git-fixtures/v4 v4.3.1 h1:y5z6dd3qi8Hl+stezc8p3JxDkoTRqMAlKnXHuzrfjTQ=
github.com/go-git/go-git-fixtures/v4 v4.3.1/go.mod h1:8LHG1a3SRW71ettAD/jW13h8c6AqjVSeL11RAdgaqpo=
github.com/go-git/go-git/v5 v5.6.1 h1:q4ZRqQl4pR/ZJHc1L5CFjGA1a10u76aV1iC+nh+bHsk=
github.com/go-git/go-git/v5 v5.6.1/go.mod h1:mvyoL6Unz0PiTQrGQfSfiLFhBH1c1e84ylC2MDs4ee8=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4=
github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A=
github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA=
github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40=
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mmcloughlin/avo v0.5.0/go.mod h1:ChHFdoV7ql95Wi7vuq2YT1bwCJqiWdZrQ1im3VujLYM=
github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks=
github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0=
github.com/naoina/toml v0.1.1 h1:PT/lllxVVN0gzzSqSlHEmP8MJB4MY2U7STGxiouV4X8=
github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E=
github.com/olekukonko/tablewriter v0.0.1 h1:b3iUnf1v+ppJiOfNX4yxxqfWKMQPZR5yoh8urCTFX88=
github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
github.com/otiai10/copy v1.0.1 h1:gtBjD8aq4nychvRZ2CyJvFWAw0aja+VHazDdruZKGZA=
github.com/otiai10/copy v1.0.1/go.mod h1:8bMCJrAqOtN/d9oyh5HR7HhLQMvcGMpGdwRDYsfOCHc=
github.com/otiai10/mint v1.2.3/go.mod h1:YnfyPNhBvnY8bW4SGQHCs/aAFhkgySlMZbrF5U0bOVw=
github.com/remeh/sizedwaitgroup v0.0.0-20180822144253-5e7302b12cce h1:aP+C+YbHZfOQlutA4p4soHi7rVUqHQdWEVMSkHfDTqY=
github.com/remeh/sizedwaitgroup v0.0.0-20180822144253-5e7302b12cce/go.mod h1:3j2R4OIe/SeS6YDhICBy22RWjJC5eNCJ1V+9+NVNYlo=
github.com/zcalusic/sysinfo v0.0.0-20190429151633-fbadb57345c2 h1:uMiaKNX5zFLOa6nNtun+d/lpV5bOBh7BvE4q9jfZacQ=
github.com/zcalusic/sysinfo v0.0.0-20190429151633-fbadb57345c2/go.mod h1:zAn3FAIbgZPYnutDND49Ivf8sb/mXYk8UjZdqMswgHg=
github.com/natefinch/lumberjack v2.0.0+incompatible h1:4QJd3OLAMgj7ph+yZTuX13Ld4UpgHp07nNdFX7mqFfM=
github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/otiai10/copy v1.11.0 h1:OKBD80J/mLBrwnzXqGtFCzprFSGioo30JcmR4APsNwc=
github.com/otiai10/copy v1.11.0/go.mod h1:rSaLseMUsZFFbsFGc7wCJnnkTAvdc5L6VWxPE4308Ww=
github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks=
github.com/otiai10/mint v1.5.1/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM=
github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4=
github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/povsister/scp v0.0.0-20210427074412-33febfd9f13e h1:VtsDti2SgX7M7jy0QAyGgb162PeHLrOaNxmcYOtaGsY=
github.com/povsister/scp v0.0.0-20210427074412-33febfd9f13e/go.mod h1:i1Au86ZXK0ZalQNyBp2njCcyhSCR/QP/AMfILip+zNI=
github.com/rapidloop/skv v0.0.0-20180909015525-9def2caac4cc h1:eXQoy66wUI9meNnIdKYJ+EV/Tq3LvXeUe95AB2dPk8g=
github.com/rapidloop/skv v0.0.0-20180909015525-9def2caac4cc/go.mod h1:V5hvlcTzUJ3MOo0fEolWR25CZBBsb7q3wWVAmBYwr54=
github.com/remeh/sizedwaitgroup v1.0.0 h1:VNGGFwNo/R5+MJBf6yrsr110p0m4/OX4S3DCy7Kyl5E=
github.com/remeh/sizedwaitgroup v1.0.0/go.mod h1:3j2R4OIe/SeS6YDhICBy22RWjJC5eNCJ1V+9+NVNYlo=
github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rs/zerolog v1.29.1 h1:cO+d60CHkknCbvzEWxP0S9K6KqyTjrCNUy1LdQLCGPc=
github.com/rs/zerolog v1.29.1/go.mod h1:Le6ESbR7hc+DP6Lt1THiV8CQSdkkNrd3R0XbEgp3ZBU=
github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/skeema/knownhosts v1.1.0 h1:Wvr9V0MxhjRbl3f9nMnKnFfiWTJmtECJ9Njkea3ysW0=
github.com/skeema/knownhosts v1.1.0/go.mod h1:sKFq3RD6/TKZkSWn8boUbDC7Qkgcv+8XXijpFO6roag=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/zcalusic/sysinfo v0.9.5 h1:ivoHyj9aIAYkwzo1+8QgJ5s4oeE6Etx9FmZtqa4wJjQ=
github.com/zcalusic/sysinfo v0.9.5/go.mod h1:Z/gPVufBrFc8X5sef3m6kkw3r3nlNFp+I6bvASfvBZQ=
golang.org/x/arch v0.1.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220826181053-bd7e27e6170d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g=
golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.0.0-20220722155259-a9ba230a4035/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/logrusorgru/aurora.v1 v1.0.0-20181002194514-a7b3b318ed4e h1:uKdf1KQDFZDYqNzSDhxB5hFxj5Fq4e3/C/ejtRJxlY0=
gopkg.in/logrusorgru/aurora.v1 v1.0.0-20181002194514-a7b3b318ed4e/go.mod h1:DGR33jeYG1jxERD2W4hGjuW94Pxf3mkUf/Ddhf5BskA=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/logrusorgru/aurora.v2 v2.0.3 h1:5Hr76hqgwx9PednedYf5Q1dBfiPMZ2IgExR7u3tNXIE=
gopkg.in/logrusorgru/aurora.v2 v2.0.3/go.mod h1:Wm+IEn1fgFp8E2paL93oFVrHZW4toMKARNE85fDY5w8=
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA=
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=

135
gomod2nix.toml Normal file
View File

@ -0,0 +1,135 @@
schema = 3
[mod]
[mod."github.com/BurntSushi/toml"]
version = "v1.2.1"
hash = "sha256-Z1dlsUTjF8SJZCknYKt7ufJz8NPGg9P9+W17DQn+LO0="
[mod."github.com/Masterminds/semver"]
version = "v1.5.0"
hash = "sha256-3fEInOXFdzCiGdDZ1s9otEes7VXiL8Q1RVB3zXRPJsQ="
[mod."github.com/Microsoft/go-winio"]
version = "v0.5.2"
hash = "sha256-g+kEivzu+sIaO5fDGR4RCpm3LmJSzmsAO16wAzBnP6c="
[mod."github.com/ProtonMail/go-crypto"]
version = "v0.0.0-20230217124315-7d5c6f04bbb8"
hash = "sha256-QWS55wWNCrgx6BbIrroWpc1s08FeSqf2ehNTXkhbDJQ="
[mod."github.com/acomagu/bufpipe"]
version = "v1.0.4"
hash = "sha256-gO76ADEf7bzVUhmZbRU/LNA+L9qCdb/aaAAavvj26mA="
[mod."github.com/alecthomas/kong"]
version = "v0.7.1"
hash = "sha256-Wyg4T/TX2Le7SsbA3YFX1LVRsc8+7e8JPf1elBs/jdo="
[mod."github.com/boltdb/bolt"]
version = "v1.3.1"
hash = "sha256-eSxMiPaicRFOVsgwU8XOWrgvprJfuPfA8CQ6GakB8nw="
[mod."github.com/cavaliergopher/grab/v3"]
version = "v3.0.1"
hash = "sha256-7yixBq4kPAp+NqHvEC4xCKFwI5bqSbZfzdVVLwvMvl4="
[mod."github.com/cloudflare/circl"]
version = "v1.1.0"
hash = "sha256-3FxALC6ZXwhv+MwZsh3iHusx0E4Mh/SoxyfXWIwD3MU="
[mod."github.com/davecgh/go-spew"]
version = "v1.1.1"
hash = "sha256-nhzSUrE1fCkN0+RL04N4h8jWmRFPPPWbCuDc7Ss0akI="
[mod."github.com/emirpasic/gods"]
version = "v1.18.1"
hash = "sha256-hGDKddjLj+5dn2woHtXKUdd49/3xdsqnhx7VEdCu1m4="
[mod."github.com/go-git/gcfg"]
version = "v1.5.0"
hash = "sha256-A62eSLI/0y4sfwCsZhe/uoSC9Z3TryyW+PyWIcknYdE="
[mod."github.com/go-git/go-billy/v5"]
version = "v5.4.1"
hash = "sha256-BGkU8ryX1czUc6s30qt4vjt2NTtkWdwDjQxZ3LxGC2k="
[mod."github.com/go-git/go-git/v5"]
version = "v5.6.1"
hash = "sha256-8HbSt4yX7B2ozSFj8Beoo05GcHb8/rBt/6ILkoTEtd8="
[mod."github.com/imdario/mergo"]
version = "v0.3.13"
hash = "sha256-03LKAZXgR5YUMeK5IRh2ds2GyV6uSbdq5QCPbAzlTus="
[mod."github.com/jbenet/go-context"]
version = "v0.0.0-20150711004518-d14ea06fba99"
hash = "sha256-VANNCWNNpARH/ILQV9sCQsBWgyL2iFT+4AHZREpxIWE="
[mod."github.com/kevinburke/ssh_config"]
version = "v1.2.0"
hash = "sha256-Ta7ZOmyX8gG5tzWbY2oES70EJPfI90U7CIJS9EAce0s="
[mod."github.com/kylelemons/godebug"]
version = "v1.1.0"
hash = "sha256-DJ0re9mGqZb6PROQI8NPC0JVyDHdZ/y4uehNH7MbczY="
[mod."github.com/mattn/go-colorable"]
version = "v0.1.12"
hash = "sha256-Y1vCt0ShrCz4wSmwsppCfeLPLKrWusc2zM2lUFwDMyI="
[mod."github.com/mattn/go-isatty"]
version = "v0.0.14"
hash = "sha256-e8zn5eCVh/B1HOP1PGXeXH0bGkIV0vKYP9KLwZni5as="
[mod."github.com/mattn/go-runewidth"]
version = "v0.0.9"
hash = "sha256-dK/kIPe1tcxEubwI4CWfov/HWRBgD/fqlPC3d5i30CY="
[mod."github.com/mattn/go-sqlite3"]
version = "v1.14.16"
hash = "sha256-Ky0kas72AY0lpuRiC/fQk9rw9aJ6dvL9y1Ikw5PFzlA="
[mod."github.com/mitchellh/go-homedir"]
version = "v1.1.0"
hash = "sha256-oduBKXHAQG8X6aqLEpqZHs5DOKe84u6WkBwi4W6cv3k="
[mod."github.com/naoina/go-stringutil"]
version = "v0.1.0"
hash = "sha256-htVZGTbH2kFO56UrWfZUwc6DDhgU/TCXrzEPy8MNAwE="
[mod."github.com/naoina/toml"]
version = "v0.1.1"
hash = "sha256-Tq9NDUJSye1staRAuT32AqI3qLfxDQH1nAkZPpntB04="
[mod."github.com/natefinch/lumberjack"]
version = "v2.0.0+incompatible"
hash = "sha256-CLir3wRkgNy7tXQWODk7u3RP/W7qIsO2LADdM6/vWtQ="
[mod."github.com/olekukonko/tablewriter"]
version = "v0.0.5"
hash = "sha256-/5i70IkH/qSW5KjGzv8aQNKh9tHoz98tqtL0K2DMFn4="
[mod."github.com/otiai10/copy"]
version = "v1.11.0"
hash = "sha256-2xQtmy9eor9BIhNuvs52noIQDJ1alG3ZXumXXSL6l9Q="
[mod."github.com/pjbgf/sha1cd"]
version = "v0.3.0"
hash = "sha256-kX9BdLh2dxtGNaDvc24NORO+C0AZ7JzbrXrtecCdB7w="
[mod."github.com/povsister/scp"]
version = "v0.0.0-20210427074412-33febfd9f13e"
hash = "sha256-SgFphgTQZQtiINSQDjY9K7fCPhynWR3SsBe+X9nQqKo="
[mod."github.com/rapidloop/skv"]
version = "v0.0.0-20180909015525-9def2caac4cc"
hash = "sha256-q5AMoiWcLDNwriaownVVyP58DpRa5J0KZjkyJKSf42I="
[mod."github.com/remeh/sizedwaitgroup"]
version = "v1.0.0"
hash = "sha256-CtjNoNeep0TnfkuRN/rc48diAo0jUog1fOz3I/z6jfc="
[mod."github.com/rs/zerolog"]
version = "v1.29.1"
hash = "sha256-UX+uiffB13Wdt1DGatxm0WiDWnI91w5Vxfhz4D7oLAw="
[mod."github.com/sergi/go-diff"]
version = "v1.1.0"
hash = "sha256-8NJMabldpf40uwQN20T6QXx5KORDibCBJL02KD661xY="
[mod."github.com/skeema/knownhosts"]
version = "v1.1.0"
hash = "sha256-WpHhJaZea5zh7PBZPuL4a9XJyADeS7TK0DGnQF4NjmI="
[mod."github.com/xanzy/ssh-agent"]
version = "v0.3.3"
hash = "sha256-l3pGB6IdzcPA/HLk93sSN6NM2pKPy+bVOoacR5RC2+c="
[mod."github.com/zcalusic/sysinfo"]
version = "v0.9.5"
hash = "sha256-An8f5875m2OgrdzzwwY4DTVKq4+zFgjypX4PL/QX1Bo="
[mod."golang.org/x/crypto"]
version = "v0.9.0"
hash = "sha256-RpGvWrx96GBXpu1zsWxdv9/+WcRmjBxOC7fvSgOJGL0="
[mod."golang.org/x/net"]
version = "v0.10.0"
hash = "sha256-HkGiUYBZOBdOtt7mYo3N3swFjjAXzW++pG2JeWGJR9Q="
[mod."golang.org/x/sys"]
version = "v0.8.0"
hash = "sha256-wLPPnoFkHM1HPUaFIfRyQZOJjrqXVZimB0nMySly7Xg="
[mod."golang.org/x/time"]
version = "v0.3.0"
hash = "sha256-/hmc9skIswMYbivxNS7R8A6vCTUF9k2/7tr/ACkcEaM="
[mod."gopkg.in/logrusorgru/aurora.v2"]
version = "v2.0.3"
hash = "sha256-7o5Fh4jscdYKgXfnNMbcD68Kjw8Z4LcPgHcr4ZyQYrI="
[mod."gopkg.in/natefinch/lumberjack.v2"]
version = "v2.2.1"
hash = "sha256-GaXWRDxhGy4Z4mgE+bJ8OE9SVvYUa9TnNiydnp2s1Ms="
[mod."gopkg.in/warnings.v0"]
version = "v0.1.2"
hash = "sha256-ATVL9yEmgYbkJ1DkltDGRn/auGAjqGOfjQyBYyUo8s8="

View File

@ -1,7 +0,0 @@
// Copyright 2019 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package main
const imagesBaseURL = "https://out-of-tree.fra1.digitaloceanspaces.com/1.0.0/"

View File

@ -1,82 +0,0 @@
// Copyright 2019 Mikhail Klementev. All rights reserved.
// Use of this source code is governed by a AGPLv3 license
// (or later) that can be found in the LICENSE file.
package main
import (
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"os/exec"
)
// inspired by Edd Turtle code
func downloadFile(filepath string, url string) (err error) {
out, err := os.Create(filepath)
if err != nil {
return
}
defer out.Close()
resp, err := http.Get(url)
if err != nil {
return
}
defer resp.Body.Close()
switch resp.StatusCode {
case http.StatusOK:
break
case http.StatusForbidden, http.StatusNotFound:
err = fmt.Errorf("Cannot download %s. It looks like you need "+
"to generate it manually and place it "+
"to ~/.out-of-tree/images/. "+
"Check documentation for additional information.", url)
return
default:
err = fmt.Errorf("Something weird happens while "+
"download file: %d", resp.StatusCode)
return
}
_, err = io.Copy(out, resp.Body)
return
}
func unpackTar(archive, destination string) (err error) {
// NOTE: If you're change anything in tar command please check also
// BSD tar (or if you're using macOS, do not forget to check GNU Tar)
// Also make sure that sparse files are extracting correctly
cmd := exec.Command("tar", "-Sxf", archive)
cmd.Dir = destination + "/"
rawOutput, err := cmd.CombinedOutput()
if err != nil {
err = fmt.Errorf("%v: %s", err, rawOutput)
return
}
return
}
func downloadImage(path, file string) (err error) {
tmp, err := ioutil.TempDir("/tmp/", "out-of-tree_")
if err != nil {
return
}
defer os.RemoveAll(tmp)
archive := tmp + "/" + file + ".tar.gz"
url := imagesBaseURL + file + ".tar.gz"
err = downloadFile(archive, url)
if err != nil {
return
}
err = unpackTar(archive, path)
return
}

Some files were not shown because too many files have changed in this diff Show More