commit bebecbd6f6688b320d225e490e130aa7a13a212a Author: Nils AndrĂ© Date: Sun Feb 18 08:14:55 2024 -0800 Initial commit diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..09357c3 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,6 @@ +src/tools/skopeo filter=lfs diff=lfs merge=lfs -text +src/tools/squashfuse filter=lfs diff=lfs merge=lfs -text +src/tools/umoci.amd64 filter=lfs diff=lfs merge=lfs -text +src/tools/crun filter=lfs diff=lfs merge=lfs -text +src/tools/fuse-overlayfs filter=lfs diff=lfs merge=lfs -text +src/tools/mksquashfs filter=lfs diff=lfs merge=lfs -text diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..e73c965 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +zig-cache/ +zig-out/ diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..4a7c219 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "lib/zig-clap"] + path = src/zig-clap + url = https://github.com/Hejsil/zig-clap.git diff --git a/BUILD.md b/BUILD.md new file mode 100644 index 0000000..f189211 --- /dev/null +++ b/BUILD.md @@ -0,0 +1,15 @@ +``` +sudo apt install libzstd-dev +./configure --without-xz --without-zlib LDFLAGS="-static" +make LDFLAGS="-all-static" -j +``` + +``` +zig build -Doptimize=ReleaseSafe -Dtarget=x86_64-linux-musl +``` + +Examples images to try on: + +* https://github.com/oven-sh/bun?tab=readme-ov-file#install +* https://github.com/containers/skopeo/blob/main/install.md#container-images +* https://github.com/shepherdjerred/macos-cross-compiler diff --git a/README.md b/README.md new file mode 100644 index 0000000..71c982d --- /dev/null +++ b/README.md @@ -0,0 +1,18 @@ +# dockerc - compile docker images to standalone portable binaries + +## Features + +- [X] Compiler docker images into portable binaries +- [ ] MacOS support (using qemu) +- [X] x86_64 support +- [ ] arm64 support +- [X] Supports arguments +- [ ] Support `-p` +- [ ] Support `-v` +- [ ] Support other arguments... + + +### Why zig? + +* Small binary size +* Full static linking diff --git a/build.zig b/build.zig new file mode 100644 index 0000000..c634b72 --- /dev/null +++ b/build.zig @@ -0,0 +1,72 @@ +const std = @import("std"); + +// Although this function looks imperative, note that its job is to +// declaratively construct a build graph that will be executed by an external +// runner. +pub fn build(b: *std.Build) void { + b.reference_trace = 64; + + // Standard target options allows the person running `zig build` to choose + // what target to build for. Here we do not override the defaults, which + // means any target is allowed, and the default is native. Other options + // for restricting supported target set are available. + const target = b.standardTargetOptions(.{}); + // target.result.abi = .musl; + + // Standard optimization options allow the person running `zig build` to select + // between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. Here we do not + // set a preferred release mode, allowing the user to decide how to optimize. + const optimize = b.standardOptimizeOption(.{}); + + // const clap = b.addModule("clap", .{ .root_source_file = .{ .path = "lib/zig-clap/clap.zig" } }); + + const runtime = b.addExecutable(.{ + .name = "runtime", + .root_source_file = .{ .path = "src/main.zig" }, + .target = target, + .optimize = optimize, + .link_libc = true, + // necessary to link in bigger file + // .code_model = .medium, + }); + + const dockerc = b.addExecutable(.{ + .name = "dockerc", + .root_source_file = .{ .path = "src/dockerc.zig" }, + .target = target, + .optimize = optimize, + .link_libc = true, + }); + + dockerc.root_module.addAnonymousImport("runtime", .{ .root_source_file = runtime.getEmittedBin() }); + + b.installArtifact(dockerc); + + // This declares intent for the executable to be installed into the + // standard location when the user invokes the "install" step (the default + // step when running `zig build`). + // b.installArtifact(exe); + + // This *creates* a Run step in the build graph, to be executed when another + // step is evaluated that depends on it. The next line below will establish + // such a dependency. + // const run_cmd = b.addRunArtifact(exe); + + // By making the run step depend on the install step, it will be run from the + // installation directory rather than directly from within the cache directory. + // This is not necessary, however, if the application depends on other installed + // files, this ensures they will be present and in the expected location. + // run_cmd.step.dependOn(b.getInstallStep()); + + // This allows the user to pass arguments to the application in the build + // command itself, like this: `zig build run -- arg1 arg2 etc` + // if (b.args) |args| { + // run_cmd.addArgs(args); + // } + + // This creates a build step. It will be visible in the `zig build --help` menu, + // and can be selected like this: `zig build run` + // This will evaluate the `run` step rather than the default, which is "install". + // const run_step = b.step("run", "Run the app"); + // run_step.dependOn(&run_cmd.step); +} diff --git a/build.zig.zon b/build.zig.zon new file mode 100644 index 0000000..7ca31bf --- /dev/null +++ b/build.zig.zon @@ -0,0 +1,62 @@ +.{ + .name = "dockerc", + // This is a [Semantic Version](https://semver.org/). + // In a future version of Zig it will be used for package deduplication. + .version = "0.0.0", + + // This field is optional. + // This is currently advisory only; Zig does not yet do anything + // with this value. + //.minimum_zig_version = "0.11.0", + + // This field is optional. + // Each dependency must either provide a `url` and `hash`, or a `path`. + // `zig build --fetch` can be used to fetch all dependencies of a package, recursively. + // Once all dependencies are fetched, `zig build` no longer requires + // internet connectivity. + .dependencies = .{ + // See `zig fetch --save ` for a command-line interface for adding dependencies. + //.example = .{ + // // When updating this field to a new URL, be sure to delete the corresponding + // // `hash`, otherwise you are communicating that you expect to find the old hash at + // // the new URL. + // .url = "https://example.com/foo.tar.gz", + // + // // This is computed from the file contents of the directory of files that is + // // obtained after fetching `url` and applying the inclusion rules given by + // // `paths`. + // // + // // This field is the source of truth; packages do not come from a `url`; they + // // come from a `hash`. `url` is just one of many possible mirrors for how to + // // obtain a package matching this `hash`. + // // + // // Uses the [multihash](https://multiformats.io/multihash/) format. + // .hash = "...", + // + // // When this is provided, the package is found in a directory relative to the + // // build root. In this case the package's hash is irrelevant and therefore not + // // computed. This field and `url` are mutually exclusive. + // .path = "foo", + //}, + }, + + // Specifies the set of files and directories that are included in this package. + // Only files and directories listed here are included in the `hash` that + // is computed for this package. + // Paths are relative to the build root. Use the empty string (`""`) to refer to + // the build root itself. + // A directory listed here means that all files within, recursively, are included. + .paths = .{ + // This makes *all* files, recursively, included in this package. It is generally + // better to explicitly list the files and directories instead, to insure that + // fetching from tarballs, file system paths, and version control all result + // in the same contents hash. + "", + // For example... + //"build.zig", + //"build.zig.zon", + //"src", + //"LICENSE", + //"README.md", + }, +} diff --git a/src/common.zig b/src/common.zig new file mode 100644 index 0000000..4ad98d5 --- /dev/null +++ b/src/common.zig @@ -0,0 +1,16 @@ +const std = @import("std"); + +pub extern fn mkdtemp(in: [*:0]const u8) ?[*:0]const u8; + +// TODO: ideally we can use memfd_create +// The problem is that zig doesn't have fexecve support by default so it would +// be a pain to find the location of the file. +pub fn extract_file(tmpDir: []const u8, name: []const u8, data: []const u8, allocator: std.mem.Allocator) ![]const u8 { + const path = try std.fmt.allocPrint(allocator, "{s}/{s}", .{ tmpDir, name }); + + const file = try std.fs.createFileAbsolute(path, .{ .mode = 0o700 }); + defer file.close(); + try file.writeAll(data); + + return path; +} diff --git a/src/dockerc.zig b/src/dockerc.zig new file mode 100644 index 0000000..e962f61 --- /dev/null +++ b/src/dockerc.zig @@ -0,0 +1,130 @@ +const std = @import("std"); +// const clap = @import("../lib/zip-clap/clap.zig"); +const clap = @import("zig-clap/clap.zig"); +const common = @import("common.zig"); + +const mkdtemp = common.mkdtemp; +const extract_file = common.extract_file; + +const debug = std.debug; + +const io = std.io; + +const skopeo_content = @embedFile("tools/skopeo"); +const mksquashfs_content = @embedFile("tools/mksquashfs"); +const umoci_content = @embedFile("tools/umoci.amd64"); +const runtime_content = @embedFile("runtime"); + +const runtime_content_len_u64 = data: { + var buf: [8]u8 = undefined; + std.mem.writeInt(u64, &buf, runtime_content.len, .big); + break :data buf; +}; + +pub fn main() !void { + var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + defer _ = gpa.deinit(); + + const temp_dir_path = std.mem.span(mkdtemp("/tmp/dockerc-XXXXXX") orelse @panic("failed to create temp dir")); + + const allocator = gpa.allocator(); + const skopeo_path = try extract_file(temp_dir_path, "skopeo", skopeo_content, allocator); + defer allocator.free(skopeo_path); + + const umoci_path = try extract_file(temp_dir_path, "umoci", umoci_content, allocator); + defer allocator.free(umoci_path); + + const mksquashfs_path = try extract_file(temp_dir_path, "mksquashfs", mksquashfs_content, allocator); + defer allocator.free(mksquashfs_path); + + const params = comptime clap.parseParamsComptime( + \\-h, --help Display this help and exit. + \\-i, --image Image to pull. + \\-o, --output Output file. + \\--rootfull Do not use rootless container. + \\ + ); + + var diag = clap.Diagnostic{}; + var res = clap.parse(clap.Help, ¶ms, clap.parsers.default, .{ + .diagnostic = &diag, + .allocator = allocator, + }) catch |err| { + // Report useful error and exit + diag.report(io.getStdErr().writer(), err) catch {}; + return err; + }; + defer res.deinit(); + + if (res.args.help != 0) { + debug.print("help message\n", .{}); + return; + } + + var missing_args = false; + if (res.args.image == null) { + debug.print("no --image specified\n", .{}); + missing_args = true; + } + + if (res.args.output == null) { + debug.print("no --output specified\n", .{}); + missing_args = true; + } + + if (missing_args) { + return; + } + + // safe to assert because checked above + const image = res.args.image.?; + const output_path = res.args.output.?; + + const destination_arg = try std.fmt.allocPrint(allocator, "oci:{s}/image:latest", .{temp_dir_path}); + defer allocator.free(destination_arg); + + var skopeoProcess = std.ChildProcess.init(&[_][]const u8{ skopeo_path, "copy", image, destination_arg }, gpa.allocator()); + _ = try skopeoProcess.spawnAndWait(); + + const umoci_image_layout_path = try std.fmt.allocPrint(allocator, "{s}/image:latest", .{temp_dir_path}); + defer allocator.free(umoci_image_layout_path); + + const bundle_destination = try std.fmt.allocPrint(allocator, "{s}/bundle", .{temp_dir_path}); + defer allocator.free(bundle_destination); + + const umoci_args = [_][]const u8{ + umoci_path, + "unpack", + "--image", + umoci_image_layout_path, + bundle_destination, + "--rootless", + }; + var umociProcess = std.ChildProcess.init(if (res.args.rootfull == 0) &umoci_args else umoci_args[0 .. umoci_args.len - 1], gpa.allocator()); + _ = try umociProcess.spawnAndWait(); + + const offset_arg = try std.fmt.allocPrint(allocator, "{}", .{runtime_content.len}); + defer allocator.free(offset_arg); + + var mksquashfsProcess = std.ChildProcess.init(&[_][]const u8{ + mksquashfs_path, + bundle_destination, + output_path, + "-comp", + "zstd", + "-offset", + offset_arg, + "-noappend", + }, gpa.allocator()); + _ = try mksquashfsProcess.spawnAndWait(); + + const file = try std.fs.cwd().openFile(output_path, .{ + .mode = .write_only, + }); + defer file.close(); + + try file.writeAll(runtime_content); + try file.seekFromEnd(0); + try file.writeAll(&runtime_content_len_u64); + try file.chmod(0o755); +} diff --git a/src/main.zig b/src/main.zig new file mode 100644 index 0000000..a80dad1 --- /dev/null +++ b/src/main.zig @@ -0,0 +1,142 @@ +const std = @import("std"); +const assert = std.debug.assert; +const common = @import("common.zig"); + +const mkdtemp = common.mkdtemp; +const extract_file = common.extract_file; + +const squashfuse_content = @embedFile("tools/squashfuse"); +const crun_content = @embedFile("tools/crun"); +const overlayfs_content = @embedFile("tools/fuse-overlayfs"); + +fn getOffset(path: []const u8) !u64 { + var file = try std.fs.cwd().openFile(path, .{}); + try file.seekFromEnd(-8); + + var buffer: [8]u8 = undefined; + assert(try file.readAll(&buffer) == 8); + + return std.mem.readInt(u64, buffer[0..8], std.builtin.Endian.big); +} + +const eql = std.mem.eql; + +fn processArgs(file: std.fs.File, allocator: std.mem.Allocator) !void { + // const file = try std.fs.openFileAbsolute(path, .{ .mode = .read_write }); + + var jsonReader = std.json.reader(allocator, file.reader()); + + // TODO: having to specify max_value_len seems like a bug + var root_value = try std.json.Value.jsonParse(allocator, &jsonReader, .{ .max_value_len = 99999999 }); + + const argVal = args: { + switch (root_value) { + .object => |*object| { + const processVal = object.getPtr("process") orelse @panic("no process key"); + switch (processVal.*) { + .object => |*process| { + const argsVal = process.getPtr("args") orelse @panic("no args key"); + switch (argsVal.*) { + .array => |*argsArr| { + break :args argsArr; + }, + else => return error.InvalidJSON, + } + }, + else => return error.InvalidJSON, + } + }, + else => return error.InvalidJSON, + } + }; + + var args = std.process.args(); + _ = args.next() orelse @panic("there should be an executable"); + + while (args.next()) |arg| { + if (eql(u8, arg, "-p")) { + _ = args.next(); + @panic("not implemented"); + } else if (eql(u8, arg, "-v")) { + _ = args.next(); + @panic("not implemented"); + } else if (eql(u8, arg, "--")) { + while (args.next()) |arg_inner| { + try argVal.append(std.json.Value{ .string = arg_inner }); + } + } else { + try argVal.append(std.json.Value{ .string = arg }); + } + } + + try file.setEndPos(0); + try file.seekTo(0); + var jsonWriter = std.json.writeStream(file.writer(), .{ .whitespace = .indent_tab }); + + try std.json.Value.jsonStringify(root_value, &jsonWriter); +} + +pub fn main() !void { + var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + const allocator = gpa.allocator(); + // defer _ = gpa.deinit(); + var args = std.process.args(); + const executable_path = args.next() orelse unreachable; + + const temp_dir_path = std.mem.span(mkdtemp("/tmp/dockerc-XXXXXX") orelse @panic("failed to create temp dir")); + + const squashfuse_path = try extract_file(temp_dir_path, "squashfuse", squashfuse_content, allocator); + defer allocator.free(squashfuse_path); + + const crun_path = try extract_file(temp_dir_path, "crun", crun_content, allocator); + defer allocator.free(crun_path); + + const overlayfs_path = try extract_file(temp_dir_path, "fuse-overlayfs", overlayfs_content, allocator); + defer allocator.free(overlayfs_path); + + const filesystem_bundle_dir_null = try std.fmt.allocPrintZ(allocator, "{s}/{s}", .{ temp_dir_path, "bundle.squashfs" }); + defer allocator.free(filesystem_bundle_dir_null); + + try std.fs.makeDirAbsolute(filesystem_bundle_dir_null); + + const mount_dir_path = try std.fmt.allocPrint(allocator, "{s}/mount", .{temp_dir_path}); + defer allocator.free(mount_dir_path); + + const offsetArg = try std.fmt.allocPrint(allocator, "offset={}", .{try getOffset(executable_path)}); + defer allocator.free(offsetArg); + + const args_buf = [_][]const u8{ squashfuse_path, "-o", offsetArg, executable_path, filesystem_bundle_dir_null }; + + var mountProcess = std.ChildProcess.init(&args_buf, gpa.allocator()); + _ = try mountProcess.spawnAndWait(); + + const overlayfs_options = try std.fmt.allocPrint(allocator, "lowerdir={s},upperdir={s}/upper,workdir={s}/upper", .{ + filesystem_bundle_dir_null, + temp_dir_path, + temp_dir_path, + }); + defer allocator.free(overlayfs_options); + + const tmpDir = try std.fs.openDirAbsolute(temp_dir_path, .{}); + try tmpDir.makeDir("upper"); + try tmpDir.makeDir("work"); + try tmpDir.makeDir("mount"); + + var overlayfsProcess = std.ChildProcess.init(&[_][]const u8{ overlayfs_path, "-o", overlayfs_options, mount_dir_path }, allocator); + _ = try overlayfsProcess.spawnAndWait(); + + const file = try tmpDir.openFile("mount/config.json", .{ .mode = .read_write }); + defer file.close(); + try processArgs(file, allocator); + + var crunProcess = std.ChildProcess.init(&[_][]const u8{ crun_path, "run", "-b", mount_dir_path, "crun_docker_c_id" }, gpa.allocator()); + _ = try crunProcess.spawnAndWait(); + + var umountOverlayProcess = std.ChildProcess.init(&[_][]const u8{ "umount", mount_dir_path }, gpa.allocator()); + _ = try umountOverlayProcess.spawnAndWait(); + + var umountProcess = std.ChildProcess.init(&[_][]const u8{ "umount", filesystem_bundle_dir_null }, gpa.allocator()); + _ = try umountProcess.spawnAndWait(); + + // TODO: clean up /tmp +} diff --git a/src/tools/crun b/src/tools/crun new file mode 100755 index 0000000..a936cc3 --- /dev/null +++ b/src/tools/crun @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d3d6f02214a8782e2db84b5ef149509c496d182e619be15fb7d0f07fae476f9 +size 2811040 diff --git a/src/tools/fuse-overlayfs b/src/tools/fuse-overlayfs new file mode 100644 index 0000000..385ff91 --- /dev/null +++ b/src/tools/fuse-overlayfs @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0011ad825dc0274b6e330fb9a8d3d578ea7bbf738bab08934b90be070b8d0a4a +size 1785448 diff --git a/src/tools/mksquashfs b/src/tools/mksquashfs new file mode 100755 index 0000000..bc08fe8 --- /dev/null +++ b/src/tools/mksquashfs @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:868e311890152b76d7745f74eaedbbc97458be7adfea9734713c2e5c05aa37fc +size 2556520 diff --git a/src/tools/skopeo b/src/tools/skopeo new file mode 100755 index 0000000..8126fbd --- /dev/null +++ b/src/tools/skopeo @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:940ddef87b0b2ff8a9d3bf8657549160a8e42b89f7b3a59e2d04c0b5ed2e05a8 +size 35204913 diff --git a/src/tools/squashfuse b/src/tools/squashfuse new file mode 100755 index 0000000..f65dcba --- /dev/null +++ b/src/tools/squashfuse @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2acf6d8609125c68008df48e5febafb98730be116f257a218ccfc2ff8b03832d +size 1836456 diff --git a/src/tools/umoci.amd64 b/src/tools/umoci.amd64 new file mode 100755 index 0000000..44bd684 --- /dev/null +++ b/src/tools/umoci.amd64 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6abecdbe7ac96a8e48fdb73fb53f08d21d4dc5e040f7590d2ca5547b7f2b2e85 +size 7499776