Initial commit

This commit is contained in:
Nils André
2024-02-18 08:14:55 -08:00
committed by Nils
commit bebecbd6f6
16 changed files with 484 additions and 0 deletions

6
.gitattributes vendored Normal file
View File

@@ -0,0 +1,6 @@
src/tools/skopeo filter=lfs diff=lfs merge=lfs -text
src/tools/squashfuse filter=lfs diff=lfs merge=lfs -text
src/tools/umoci.amd64 filter=lfs diff=lfs merge=lfs -text
src/tools/crun filter=lfs diff=lfs merge=lfs -text
src/tools/fuse-overlayfs filter=lfs diff=lfs merge=lfs -text
src/tools/mksquashfs filter=lfs diff=lfs merge=lfs -text

2
.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
zig-cache/
zig-out/

3
.gitmodules vendored Normal file
View File

@@ -0,0 +1,3 @@
[submodule "lib/zig-clap"]
path = src/zig-clap
url = https://github.com/Hejsil/zig-clap.git

15
BUILD.md Normal file
View File

@@ -0,0 +1,15 @@
```
sudo apt install libzstd-dev
./configure --without-xz --without-zlib LDFLAGS="-static"
make LDFLAGS="-all-static" -j
```
```
zig build -Doptimize=ReleaseSafe -Dtarget=x86_64-linux-musl
```
Examples images to try on:
* https://github.com/oven-sh/bun?tab=readme-ov-file#install
* https://github.com/containers/skopeo/blob/main/install.md#container-images
* https://github.com/shepherdjerred/macos-cross-compiler

18
README.md Normal file
View File

@@ -0,0 +1,18 @@
# dockerc - compile docker images to standalone portable binaries
## Features
- [X] Compiler docker images into portable binaries
- [ ] MacOS support (using qemu)
- [X] x86_64 support
- [ ] arm64 support
- [X] Supports arguments
- [ ] Support `-p`
- [ ] Support `-v`
- [ ] Support other arguments...
### Why zig?
* Small binary size
* Full static linking

72
build.zig Normal file
View File

@@ -0,0 +1,72 @@
const std = @import("std");
// Although this function looks imperative, note that its job is to
// declaratively construct a build graph that will be executed by an external
// runner.
pub fn build(b: *std.Build) void {
b.reference_trace = 64;
// Standard target options allows the person running `zig build` to choose
// what target to build for. Here we do not override the defaults, which
// means any target is allowed, and the default is native. Other options
// for restricting supported target set are available.
const target = b.standardTargetOptions(.{});
// target.result.abi = .musl;
// Standard optimization options allow the person running `zig build` to select
// between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. Here we do not
// set a preferred release mode, allowing the user to decide how to optimize.
const optimize = b.standardOptimizeOption(.{});
// const clap = b.addModule("clap", .{ .root_source_file = .{ .path = "lib/zig-clap/clap.zig" } });
const runtime = b.addExecutable(.{
.name = "runtime",
.root_source_file = .{ .path = "src/main.zig" },
.target = target,
.optimize = optimize,
.link_libc = true,
// necessary to link in bigger file
// .code_model = .medium,
});
const dockerc = b.addExecutable(.{
.name = "dockerc",
.root_source_file = .{ .path = "src/dockerc.zig" },
.target = target,
.optimize = optimize,
.link_libc = true,
});
dockerc.root_module.addAnonymousImport("runtime", .{ .root_source_file = runtime.getEmittedBin() });
b.installArtifact(dockerc);
// This declares intent for the executable to be installed into the
// standard location when the user invokes the "install" step (the default
// step when running `zig build`).
// b.installArtifact(exe);
// This *creates* a Run step in the build graph, to be executed when another
// step is evaluated that depends on it. The next line below will establish
// such a dependency.
// const run_cmd = b.addRunArtifact(exe);
// By making the run step depend on the install step, it will be run from the
// installation directory rather than directly from within the cache directory.
// This is not necessary, however, if the application depends on other installed
// files, this ensures they will be present and in the expected location.
// run_cmd.step.dependOn(b.getInstallStep());
// This allows the user to pass arguments to the application in the build
// command itself, like this: `zig build run -- arg1 arg2 etc`
// if (b.args) |args| {
// run_cmd.addArgs(args);
// }
// This creates a build step. It will be visible in the `zig build --help` menu,
// and can be selected like this: `zig build run`
// This will evaluate the `run` step rather than the default, which is "install".
// const run_step = b.step("run", "Run the app");
// run_step.dependOn(&run_cmd.step);
}

62
build.zig.zon Normal file
View File

@@ -0,0 +1,62 @@
.{
.name = "dockerc",
// This is a [Semantic Version](https://semver.org/).
// In a future version of Zig it will be used for package deduplication.
.version = "0.0.0",
// This field is optional.
// This is currently advisory only; Zig does not yet do anything
// with this value.
//.minimum_zig_version = "0.11.0",
// This field is optional.
// Each dependency must either provide a `url` and `hash`, or a `path`.
// `zig build --fetch` can be used to fetch all dependencies of a package, recursively.
// Once all dependencies are fetched, `zig build` no longer requires
// internet connectivity.
.dependencies = .{
// See `zig fetch --save <url>` for a command-line interface for adding dependencies.
//.example = .{
// // When updating this field to a new URL, be sure to delete the corresponding
// // `hash`, otherwise you are communicating that you expect to find the old hash at
// // the new URL.
// .url = "https://example.com/foo.tar.gz",
//
// // This is computed from the file contents of the directory of files that is
// // obtained after fetching `url` and applying the inclusion rules given by
// // `paths`.
// //
// // This field is the source of truth; packages do not come from a `url`; they
// // come from a `hash`. `url` is just one of many possible mirrors for how to
// // obtain a package matching this `hash`.
// //
// // Uses the [multihash](https://multiformats.io/multihash/) format.
// .hash = "...",
//
// // When this is provided, the package is found in a directory relative to the
// // build root. In this case the package's hash is irrelevant and therefore not
// // computed. This field and `url` are mutually exclusive.
// .path = "foo",
//},
},
// Specifies the set of files and directories that are included in this package.
// Only files and directories listed here are included in the `hash` that
// is computed for this package.
// Paths are relative to the build root. Use the empty string (`""`) to refer to
// the build root itself.
// A directory listed here means that all files within, recursively, are included.
.paths = .{
// This makes *all* files, recursively, included in this package. It is generally
// better to explicitly list the files and directories instead, to insure that
// fetching from tarballs, file system paths, and version control all result
// in the same contents hash.
"",
// For example...
//"build.zig",
//"build.zig.zon",
//"src",
//"LICENSE",
//"README.md",
},
}

16
src/common.zig Normal file
View File

@@ -0,0 +1,16 @@
const std = @import("std");
pub extern fn mkdtemp(in: [*:0]const u8) ?[*:0]const u8;
// TODO: ideally we can use memfd_create
// The problem is that zig doesn't have fexecve support by default so it would
// be a pain to find the location of the file.
pub fn extract_file(tmpDir: []const u8, name: []const u8, data: []const u8, allocator: std.mem.Allocator) ![]const u8 {
const path = try std.fmt.allocPrint(allocator, "{s}/{s}", .{ tmpDir, name });
const file = try std.fs.createFileAbsolute(path, .{ .mode = 0o700 });
defer file.close();
try file.writeAll(data);
return path;
}

130
src/dockerc.zig Normal file
View File

@@ -0,0 +1,130 @@
const std = @import("std");
// const clap = @import("../lib/zip-clap/clap.zig");
const clap = @import("zig-clap/clap.zig");
const common = @import("common.zig");
const mkdtemp = common.mkdtemp;
const extract_file = common.extract_file;
const debug = std.debug;
const io = std.io;
const skopeo_content = @embedFile("tools/skopeo");
const mksquashfs_content = @embedFile("tools/mksquashfs");
const umoci_content = @embedFile("tools/umoci.amd64");
const runtime_content = @embedFile("runtime");
const runtime_content_len_u64 = data: {
var buf: [8]u8 = undefined;
std.mem.writeInt(u64, &buf, runtime_content.len, .big);
break :data buf;
};
pub fn main() !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const temp_dir_path = std.mem.span(mkdtemp("/tmp/dockerc-XXXXXX") orelse @panic("failed to create temp dir"));
const allocator = gpa.allocator();
const skopeo_path = try extract_file(temp_dir_path, "skopeo", skopeo_content, allocator);
defer allocator.free(skopeo_path);
const umoci_path = try extract_file(temp_dir_path, "umoci", umoci_content, allocator);
defer allocator.free(umoci_path);
const mksquashfs_path = try extract_file(temp_dir_path, "mksquashfs", mksquashfs_content, allocator);
defer allocator.free(mksquashfs_path);
const params = comptime clap.parseParamsComptime(
\\-h, --help Display this help and exit.
\\-i, --image <str> Image to pull.
\\-o, --output <str> Output file.
\\--rootfull Do not use rootless container.
\\
);
var diag = clap.Diagnostic{};
var res = clap.parse(clap.Help, &params, clap.parsers.default, .{
.diagnostic = &diag,
.allocator = allocator,
}) catch |err| {
// Report useful error and exit
diag.report(io.getStdErr().writer(), err) catch {};
return err;
};
defer res.deinit();
if (res.args.help != 0) {
debug.print("help message\n", .{});
return;
}
var missing_args = false;
if (res.args.image == null) {
debug.print("no --image specified\n", .{});
missing_args = true;
}
if (res.args.output == null) {
debug.print("no --output specified\n", .{});
missing_args = true;
}
if (missing_args) {
return;
}
// safe to assert because checked above
const image = res.args.image.?;
const output_path = res.args.output.?;
const destination_arg = try std.fmt.allocPrint(allocator, "oci:{s}/image:latest", .{temp_dir_path});
defer allocator.free(destination_arg);
var skopeoProcess = std.ChildProcess.init(&[_][]const u8{ skopeo_path, "copy", image, destination_arg }, gpa.allocator());
_ = try skopeoProcess.spawnAndWait();
const umoci_image_layout_path = try std.fmt.allocPrint(allocator, "{s}/image:latest", .{temp_dir_path});
defer allocator.free(umoci_image_layout_path);
const bundle_destination = try std.fmt.allocPrint(allocator, "{s}/bundle", .{temp_dir_path});
defer allocator.free(bundle_destination);
const umoci_args = [_][]const u8{
umoci_path,
"unpack",
"--image",
umoci_image_layout_path,
bundle_destination,
"--rootless",
};
var umociProcess = std.ChildProcess.init(if (res.args.rootfull == 0) &umoci_args else umoci_args[0 .. umoci_args.len - 1], gpa.allocator());
_ = try umociProcess.spawnAndWait();
const offset_arg = try std.fmt.allocPrint(allocator, "{}", .{runtime_content.len});
defer allocator.free(offset_arg);
var mksquashfsProcess = std.ChildProcess.init(&[_][]const u8{
mksquashfs_path,
bundle_destination,
output_path,
"-comp",
"zstd",
"-offset",
offset_arg,
"-noappend",
}, gpa.allocator());
_ = try mksquashfsProcess.spawnAndWait();
const file = try std.fs.cwd().openFile(output_path, .{
.mode = .write_only,
});
defer file.close();
try file.writeAll(runtime_content);
try file.seekFromEnd(0);
try file.writeAll(&runtime_content_len_u64);
try file.chmod(0o755);
}

142
src/main.zig Normal file
View File

@@ -0,0 +1,142 @@
const std = @import("std");
const assert = std.debug.assert;
const common = @import("common.zig");
const mkdtemp = common.mkdtemp;
const extract_file = common.extract_file;
const squashfuse_content = @embedFile("tools/squashfuse");
const crun_content = @embedFile("tools/crun");
const overlayfs_content = @embedFile("tools/fuse-overlayfs");
fn getOffset(path: []const u8) !u64 {
var file = try std.fs.cwd().openFile(path, .{});
try file.seekFromEnd(-8);
var buffer: [8]u8 = undefined;
assert(try file.readAll(&buffer) == 8);
return std.mem.readInt(u64, buffer[0..8], std.builtin.Endian.big);
}
const eql = std.mem.eql;
fn processArgs(file: std.fs.File, allocator: std.mem.Allocator) !void {
// const file = try std.fs.openFileAbsolute(path, .{ .mode = .read_write });
var jsonReader = std.json.reader(allocator, file.reader());
// TODO: having to specify max_value_len seems like a bug
var root_value = try std.json.Value.jsonParse(allocator, &jsonReader, .{ .max_value_len = 99999999 });
const argVal = args: {
switch (root_value) {
.object => |*object| {
const processVal = object.getPtr("process") orelse @panic("no process key");
switch (processVal.*) {
.object => |*process| {
const argsVal = process.getPtr("args") orelse @panic("no args key");
switch (argsVal.*) {
.array => |*argsArr| {
break :args argsArr;
},
else => return error.InvalidJSON,
}
},
else => return error.InvalidJSON,
}
},
else => return error.InvalidJSON,
}
};
var args = std.process.args();
_ = args.next() orelse @panic("there should be an executable");
while (args.next()) |arg| {
if (eql(u8, arg, "-p")) {
_ = args.next();
@panic("not implemented");
} else if (eql(u8, arg, "-v")) {
_ = args.next();
@panic("not implemented");
} else if (eql(u8, arg, "--")) {
while (args.next()) |arg_inner| {
try argVal.append(std.json.Value{ .string = arg_inner });
}
} else {
try argVal.append(std.json.Value{ .string = arg });
}
}
try file.setEndPos(0);
try file.seekTo(0);
var jsonWriter = std.json.writeStream(file.writer(), .{ .whitespace = .indent_tab });
try std.json.Value.jsonStringify(root_value, &jsonWriter);
}
pub fn main() !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
const allocator = gpa.allocator();
// defer _ = gpa.deinit();
var args = std.process.args();
const executable_path = args.next() orelse unreachable;
const temp_dir_path = std.mem.span(mkdtemp("/tmp/dockerc-XXXXXX") orelse @panic("failed to create temp dir"));
const squashfuse_path = try extract_file(temp_dir_path, "squashfuse", squashfuse_content, allocator);
defer allocator.free(squashfuse_path);
const crun_path = try extract_file(temp_dir_path, "crun", crun_content, allocator);
defer allocator.free(crun_path);
const overlayfs_path = try extract_file(temp_dir_path, "fuse-overlayfs", overlayfs_content, allocator);
defer allocator.free(overlayfs_path);
const filesystem_bundle_dir_null = try std.fmt.allocPrintZ(allocator, "{s}/{s}", .{ temp_dir_path, "bundle.squashfs" });
defer allocator.free(filesystem_bundle_dir_null);
try std.fs.makeDirAbsolute(filesystem_bundle_dir_null);
const mount_dir_path = try std.fmt.allocPrint(allocator, "{s}/mount", .{temp_dir_path});
defer allocator.free(mount_dir_path);
const offsetArg = try std.fmt.allocPrint(allocator, "offset={}", .{try getOffset(executable_path)});
defer allocator.free(offsetArg);
const args_buf = [_][]const u8{ squashfuse_path, "-o", offsetArg, executable_path, filesystem_bundle_dir_null };
var mountProcess = std.ChildProcess.init(&args_buf, gpa.allocator());
_ = try mountProcess.spawnAndWait();
const overlayfs_options = try std.fmt.allocPrint(allocator, "lowerdir={s},upperdir={s}/upper,workdir={s}/upper", .{
filesystem_bundle_dir_null,
temp_dir_path,
temp_dir_path,
});
defer allocator.free(overlayfs_options);
const tmpDir = try std.fs.openDirAbsolute(temp_dir_path, .{});
try tmpDir.makeDir("upper");
try tmpDir.makeDir("work");
try tmpDir.makeDir("mount");
var overlayfsProcess = std.ChildProcess.init(&[_][]const u8{ overlayfs_path, "-o", overlayfs_options, mount_dir_path }, allocator);
_ = try overlayfsProcess.spawnAndWait();
const file = try tmpDir.openFile("mount/config.json", .{ .mode = .read_write });
defer file.close();
try processArgs(file, allocator);
var crunProcess = std.ChildProcess.init(&[_][]const u8{ crun_path, "run", "-b", mount_dir_path, "crun_docker_c_id" }, gpa.allocator());
_ = try crunProcess.spawnAndWait();
var umountOverlayProcess = std.ChildProcess.init(&[_][]const u8{ "umount", mount_dir_path }, gpa.allocator());
_ = try umountOverlayProcess.spawnAndWait();
var umountProcess = std.ChildProcess.init(&[_][]const u8{ "umount", filesystem_bundle_dir_null }, gpa.allocator());
_ = try umountProcess.spawnAndWait();
// TODO: clean up /tmp
}

BIN
src/tools/crun (Stored with Git LFS) Executable file

Binary file not shown.

BIN
src/tools/fuse-overlayfs (Stored with Git LFS) Normal file

Binary file not shown.

BIN
src/tools/mksquashfs (Stored with Git LFS) Executable file

Binary file not shown.

BIN
src/tools/skopeo (Stored with Git LFS) Executable file

Binary file not shown.

BIN
src/tools/squashfuse (Stored with Git LFS) Executable file

Binary file not shown.

BIN
src/tools/umoci.amd64 (Stored with Git LFS) Executable file

Binary file not shown.