27 Commits

Author SHA1 Message Date
c673401c2a Use writer instead of RawSocket in Client 2025-09-09 22:30:31 -04:00
74b0c9ef1f Use 0.15.1 for application 2025-09-09 20:19:48 -04:00
0778889af5 Upgrade deps to 0.15.1 2025-09-09 17:32:59 -04:00
56b6b8a386 Use Client as var type instead of singleton 2025-05-11 13:52:42 -04:00
14ed0bc3f3 Fix issue returning stack pointer 2025-05-11 13:40:55 -04:00
c72503fce6 Fix extra bytes in connection message. 2025-05-11 13:40:23 -04:00
373dbebc8c Add broadcast initial interest using raw sockets
Use this from the relay message
2025-05-11 11:40:15 -04:00
cde289d648 Update gatorcat dep and use bytes for broadcast message
The latter is helpful for the lifetime of the message.
2025-05-11 10:12:26 -04:00
716fb466fa Remove allocation for messages 2025-05-10 21:46:53 -04:00
583f9d8b8f Add comments and fix tests
Also added networkBytesAsValue and restored bytesAsValue.
These are useful for treating the bytes from the network directly as a Message.
Otherwise, the init function would overwrite the packet type and length to be correct.
I would like the message handling to fail if the message body is incorrect.
2025-05-10 21:46:53 -04:00
56e72928c6 fix use after free 2025-05-10 21:46:53 -04:00
a80c9abfe7 Attempt to base64 encode the connection payload
For some reason I am still getting this:

2025/05/10 16:37:06 Error decoding message: SGVsbG8gZGFya25lc3MgbXkgb2xkIGZyaWVuZA==::53475673624738675a4746796132356c63334d6762586b676232786b49475a79615756755a413d3daaaa
2025-05-10 21:46:53 -04:00
245dab4909 Use slice for init, and add better error sets.
The slice sets us avoid allocating within the init function.
This means init can't fail, and it also makes it easier to stack allocate messages (slice an array buffer, instead of creating a stack allocator).
2025-05-10 21:46:53 -04:00
cde5c3626c 2025-05-10 21:46:53 -04:00
e84d1a2300 2025-05-10 21:46:53 -04:00
1b7d9bbb1a Remove bytesAsValueUnchecked
Callers can instead use std.mem.bytesAsValue directly.
2025-05-10 21:46:53 -04:00
1512ec1a86 Cleanup asBytes and test it 2025-05-10 21:46:53 -04:00
f1dce257be Simplify init interface 2025-05-10 21:46:53 -04:00
bcab1e4d00 2025-05-10 21:46:53 -04:00
0e8f016978 Align the bytes instead of the struct 2025-05-10 21:46:53 -04:00
fc53e87389 2025-05-10 21:46:53 -04:00
cbf554e853 2025-05-10 21:46:53 -04:00
775212013f 2025-05-10 21:46:53 -04:00
339ac5cfe5 2025-05-10 21:46:53 -04:00
eacfaffb6b 2025-05-10 21:46:53 -04:00
1731b2e643 2025-05-10 21:46:53 -04:00
dae66a0039 Starting real connections 2025-05-10 21:46:53 -04:00
8 changed files with 579 additions and 279 deletions

181
build.zig
View File

@@ -1,71 +1,121 @@
const std = @import("std");
// Although this function looks imperative, note that its job is to
// declaratively construct a build graph that will be executed by an external
// runner.
// Although this function looks imperative, it does not perform the build
// directly and instead it mutates the build graph (`b`) that will be then
// executed by an external runner. The functions in `std.Build` implement a DSL
// for defining build steps and express dependencies between them, allowing the
// build runner to parallelize the build automatically (and the cache system to
// know when a step doesn't need to be re-run).
pub fn build(b: *std.Build) void {
// Standard target options allows the person running `zig build` to choose
// Standard target options allow the person running `zig build` to choose
// what target to build for. Here we do not override the defaults, which
// means any target is allowed, and the default is native. Other options
// for restricting supported target set are available.
const target = b.standardTargetOptions(.{});
// Standard optimization options allow the person running `zig build` to select
// between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. Here we do not
// set a preferred release mode, allowing the user to decide how to optimize.
const optimize = b.standardOptimizeOption(.{});
// It's also possible to define more custom flags to toggle optional features
// of this build script using `b.option()`. All defined flags (including
// target and optimize options) will be listed when running `zig build --help`
// in this directory.
const lib_mod = b.createModule(.{
// This creates a module, which represents a collection of source files alongside
// some compilation options, such as optimization mode and linked system libraries.
// Zig modules are the preferred way of making Zig code available to consumers.
// addModule defines a module that we intend to make available for importing
// to our consumers. We must give it a name because a Zig package can expose
// multiple modules and consumers will need to be able to specify which
// module they want to access.
const mod = b.addModule("zaprus", .{
// The root source file is the "entry point" of this module. Users of
// this module will only be able to access public declarations contained
// in this file, which means that if you have declarations that you
// intend to expose to consumers that were defined in other files part
// of this module, you will have to make sure to re-export them from
// the root file.
.root_source_file = b.path("src/root.zig"),
// Later on we'll use this module as the root module of a test executable
// which requires us to specify a target.
.target = target,
.optimize = optimize,
});
// We will also create a module for our other entry point, 'main.zig'.
const exe_mod = b.createModule(.{
// `root_source_file` is the Zig "entry point" of the module. If a module
// only contains e.g. external object files, you can make this `null`.
// In this case the main source file is merely a path, however, in more
// complicated build scripts, this could be a generated file.
.root_source_file = b.path("src/main.zig"),
.target = target,
.optimize = optimize,
});
mod.addImport("network", b.dependency("network", .{}).module("network"));
mod.addImport("gatorcat", b.dependency("gatorcat", .{}).module("gatorcat"));
lib_mod.addImport("network", b.dependency("network", .{}).module("network"));
exe_mod.addImport("zaprus", lib_mod);
exe_mod.addImport("clap", b.dependency("clap", .{}).module("clap"));
const lib = b.addLibrary(.{
.linkage = .static,
.name = "zaprus",
.root_module = lib_mod,
});
b.installArtifact(lib);
// This creates another `std.Build.Step.Compile`, but this one builds an executable
// rather than a static library.
// Here we define an executable. An executable needs to have a root module
// which needs to expose a `main` function. While we could add a main function
// to the module defined above, it's sometimes preferable to split business
// business logic and the CLI into two separate modules.
//
// If your goal is to create a Zig library for others to use, consider if
// it might benefit from also exposing a CLI tool. A parser library for a
// data serialization format could also bundle a CLI syntax checker, for example.
//
// If instead your goal is to create an executable, consider if users might
// be interested in also being able to embed the core functionality of your
// program in their own executable in order to avoid the overhead involved in
// subprocessing your CLI tool.
//
// If neither case applies to you, feel free to delete the declaration you
// don't need and to put everything under a single module.
const exe = b.addExecutable(.{
.name = "zaprus",
.root_module = exe_mod,
.root_module = b.createModule(.{
// b.createModule defines a new module just like b.addModule but,
// unlike b.addModule, it does not expose the module to consumers of
// this package, which is why in this case we don't have to give it a name.
.root_source_file = b.path("src/main.zig"),
// Target and optimization levels must be explicitly wired in when
// defining an executable or library (in the root module), and you
// can also hardcode a specific target for an executable or library
// definition if desireable (e.g. firmware for embedded devices).
.target = target,
.optimize = optimize,
// List of modules available for import in source files part of the
// root module.
.imports = &.{
// Here "zaprus" is the name you will use in your source code to
// import this module (e.g. `@import("zaprus")`). The name is
// repeated because you are allowed to rename your imports, which
// can be extremely useful in case of collisions (which can happen
// importing modules from different packages).
.{ .name = "zaprus", .module = mod },
.{ .name = "clap", .module = b.dependency("clap", .{}).module("clap") },
},
}),
});
// This declares intent for the executable to be installed into the
// standard location when the user invokes the "install" step (the default
// step when running `zig build`).
// install prefix when running `zig build` (i.e. when executing the default
// step). By default the install prefix is `zig-out/` but can be overridden
// by passing `--prefix` or `-p`.
b.installArtifact(exe);
b.installArtifact(b.addLibrary(.{
.linkage = .static,
.name = "zaprus",
.root_module = mod,
}));
// This *creates* a Run step in the build graph, to be executed when another
// step is evaluated that depends on it. The next line below will establish
// such a dependency.
// This creates a top level step. Top level steps have a name and can be
// invoked by name when running `zig build` (e.g. `zig build run`).
// This will evaluate the `run` step rather than the default step.
// For a top level step to actually do something, it must depend on other
// steps (e.g. a Run step, as we will see in a moment).
const run_step = b.step("run", "Run the app");
// This creates a RunArtifact step in the build graph. A RunArtifact step
// invokes an executable compiled by Zig. Steps will only be executed by the
// runner if invoked directly by the user (in the case of top level steps)
// or if another step depends on it, so it's up to you to define when and
// how this Run step will be executed. In our case we want to run it when
// the user runs `zig build run`, so we create a dependency link.
const run_cmd = b.addRunArtifact(exe);
run_step.dependOn(&run_cmd.step);
// By making the run step depend on the install step, it will be run from the
// By making the run step depend on the default step, it will be run from the
// installation directory rather than directly from within the cache directory.
// This is not necessary, however, if the application depends on other installed
// files, this ensures they will be present and in the expected location.
run_cmd.step.dependOn(b.getInstallStep());
// This allows the user to pass arguments to the application in the build
@@ -74,21 +124,42 @@ pub fn build(b: *std.Build) void {
run_cmd.addArgs(args);
}
// This creates a build step. It will be visible in the `zig build --help` menu,
// and can be selected like this: `zig build run`
// This will evaluate the `run` step rather than the default, which is "install".
const run_step = b.step("run", "Run the app");
run_step.dependOn(&run_cmd.step);
const exe_unit_tests = b.addTest(.{
.root_module = exe_mod,
// Creates an executable that will run `test` blocks from the provided module.
// Here `mod` needs to define a target, which is why earlier we made sure to
// set the releative field.
const mod_tests = b.addTest(.{
.root_module = mod,
});
const run_exe_unit_tests = b.addRunArtifact(exe_unit_tests);
// A run step that will run the test executable.
const run_mod_tests = b.addRunArtifact(mod_tests);
// Similar to creating the run step earlier, this exposes a `test` step to
// the `zig build --help` menu, providing a way for the user to request
// running the unit tests.
const test_step = b.step("test", "Run unit tests");
test_step.dependOn(&run_exe_unit_tests.step);
// Creates an executable that will run `test` blocks from the executable's
// root module. Note that test executables only test one module at a time,
// hence why we have to create two separate ones.
const exe_tests = b.addTest(.{
.root_module = exe.root_module,
});
// A run step that will run the second test executable.
const run_exe_tests = b.addRunArtifact(exe_tests);
// A top level step for running all tests. dependOn can be called multiple
// times and since the two run steps do not depend on one another, this will
// make the two of them run in parallel.
const test_step = b.step("test", "Run tests");
test_step.dependOn(&run_mod_tests.step);
test_step.dependOn(&run_exe_tests.step);
// Just like flags, top level steps are also listed in the `--help` menu.
//
// The Zig build system is entirely implemented in userland, which means
// that it cannot hook into private compiler APIs. All compilation work
// orchestrated by the build system will result in other Zig compiler
// subcommands being invoked with the right flags defined. You can observe
// these invocations when one fails (or you pass a flag to increase
// verbosity) to validate assumptions and diagnose problems.
//
// Lastly, the Zig build system is relatively simple and self-contained,
// and reading its source code will allow you to master it.
}

View File

@@ -37,12 +37,16 @@
// internet connectivity.
.dependencies = .{
.network = .{
.url = "https://github.com/ikskuh/zig-network/archive/c76240d2240711a3dcbf1c0fb461d5d1f18be79a.zip",
.hash = "network-0.1.0-AAAAAOwlAQAQ6zKPUrsibdpGisxld9ftUKGdMvcCSpaj",
.url = "git+https://github.com/ikskuh/zig-network#7947237eec317d9458897f82089f343a05450c2b",
.hash = "network-0.1.0-Pm-Agl8xAQBmkwohveGOfTk4zQnuqDs0Ptfbms4KP5Ce",
},
.clap = .{
.url = "git+https://github.com/Hejsil/zig-clap?ref=0.10.0#e47028deaefc2fb396d3d9e9f7bd776ae0b2a43a",
.hash = "clap-0.10.0-oBajB434AQBDh-Ei3YtoKIRxZacVPF1iSwp3IX_ZB8f0",
.url = "git+https://github.com/Hejsil/zig-clap#9cfa61596cd44ef7be35f8d2e108d2025e09868e",
.hash = "clap-0.10.0-oBajB_TnAQB0l5UdW9WYhhJDEswbedvwFOzzZwGknYeR",
},
.gatorcat = .{
.url = "git+https://github.com/jeffective/gatorcat#db73d0f7780331d82e785e85773d1afaf154c2e6",
.hash = "gatorcat-0.3.11-WcrpTQn0BwArrCFVHy9FPBIPDJQqPrFdJlhiyH7Ng5x4",
},
},
.paths = .{

View File

@@ -1,22 +1,84 @@
var rand: ?Random = null;
const base64Enc = std.base64.Base64Encoder.init(std.base64.standard_alphabet_chars, '=');
const base64Dec = std.base64.Base64Decoder.init(std.base64.standard_alphabet_chars, '=');
pub fn init() !void {
rand: Random,
writer: *std.Io.Writer,
const Self = @This();
const max_message_size = 2048;
pub fn init(writer: *std.Io.Writer) !Self {
var prng = Random.DefaultPrng.init(blk: {
var seed: u64 = undefined;
try posix.getrandom(mem.asBytes(&seed));
break :blk seed;
});
rand = prng.random();
try network.init();
const rand = prng.random();
return .{
.rand = rand,
.writer = writer,
};
}
pub fn deinit() void {
network.deinit();
pub fn deinit(self: *Self) void {
self.writer.flush() catch {};
}
fn broadcastSaprusMessage(msg: SaprusMessage, udp_port: u16, allocator: Allocator) !void {
const msg_bytes = try msg.toBytes(allocator);
defer allocator.free(msg_bytes);
/// Used for relay messages and connection handshake.
/// Assumes Client .init has been called.
fn broadcastInitialInterestMessage(self: *Self, msg_bytes: []align(@alignOf(SaprusMessage)) u8) !void {
var packet_bytes: [max_message_size]u8 = comptime blk: {
var b: [max_message_size]u8 = @splat(0);
// Destination MAC addr to FF:FF:FF:FF:FF:FF
for (0..6) |i| {
b[i] = 0xff;
}
// Set Ethernet type to IPv4
b[0x0c] = 0x08;
b[0x0d] = 0x00;
// Set IPv4 version to 4
b[0x0e] = 0x45;
// Destination broadcast
for (0x1e..0x22) |i| {
b[i] = 0xff;
}
// Set TTL
b[0x16] = 0x40;
// Set IPv4 protocol to UDP
b[0x17] = 0x11;
// Set interest filter value to 8888.
b[0x24] = 0x22;
b[0x25] = 0xb8;
break :blk b;
};
var msg: *SaprusMessage = try .bytesAsValue(msg_bytes);
try msg.networkFromNativeEndian();
defer msg.nativeFromNetworkEndian() catch unreachable;
// The byte position within the packet that the saprus message starts at.
const saprus_start_byte = 42;
@memcpy(packet_bytes[saprus_start_byte .. saprus_start_byte + msg_bytes.len], msg_bytes);
const writer = self.writer;
_ = try writer.write(packet_bytes[0 .. saprus_start_byte + msg_bytes.len]);
try writer.flush();
}
// fn broadcastSaprusMessage(msg_bytes: []align(@alignOf(SaprusMessage)) u8) !void {}
fn broadcastSaprusMessage(msg_bytes: []align(@alignOf(SaprusMessage)) u8, udp_port: u16) !void {
const msg: *SaprusMessage = try .bytesAsValue(msg_bytes);
try msg.networkFromNativeEndian();
defer msg.nativeFromNetworkEndian() catch unreachable;
var sock = try network.Socket.create(.ipv4, .udp);
defer sock.close();
@@ -36,54 +98,57 @@ fn broadcastSaprusMessage(msg: SaprusMessage, udp_port: u16, allocator: Allocato
try sock.bind(bind_addr);
std.debug.print("{x}\n", .{msg_bytes});
_ = try sock.sendTo(dest_addr, msg_bytes);
}
pub fn sendRelay(payload: []const u8, dest: [4]u8, allocator: Allocator) !void {
const msg = SaprusMessage{
.relay = .{
.header = .{ .dest = dest },
.payload = payload,
},
};
pub fn sendRelay(self: *Self, payload: []const u8, dest: [4]u8) !void {
var buf: [max_message_size]u8 align(@alignOf(SaprusMessage)) = undefined;
const msg_bytes = buf[0..try SaprusMessage.calcSize(
.relay,
base64Enc.calcSize(payload.len),
)];
const msg: *SaprusMessage = .init(.relay, msg_bytes);
try broadcastSaprusMessage(msg, 8888, allocator);
const relay = (try msg.getSaprusTypePayload()).relay;
relay.dest = dest;
_ = base64Enc.encode(relay.getPayload(), payload);
try self.broadcastInitialInterestMessage(msg_bytes);
}
fn randomPort() u16 {
var p: u16 = 0;
if (rand) |r| {
p = r.intRangeAtMost(u16, 1024, 65000);
} else unreachable;
return p;
fn randomPort(self: Self) u16 {
return self.rand.intRangeAtMost(u16, 1024, 65000);
}
pub fn sendInitialConnection(payload: []const u8, initial_port: u16, allocator: Allocator) !SaprusMessage {
const dest_port = randomPort();
const msg = SaprusMessage{
.connection = .{
.header = .{
.src_port = initial_port,
.dest_port = dest_port,
},
.payload = payload,
},
};
pub fn sendInitialConnection(
self: Self,
payload: []const u8,
output_bytes: []align(@alignOf(SaprusMessage)) u8,
initial_port: u16,
) !*SaprusMessage {
const dest_port = self.randomPort();
const msg_bytes = output_bytes[0..try SaprusMessage.calcSize(
.connection,
base64Enc.calcSize(payload.len),
)];
const msg: *SaprusMessage = .init(.connection, msg_bytes);
try broadcastSaprusMessage(msg, 8888, allocator);
const connection = (try msg.getSaprusTypePayload()).connection;
connection.src_port = initial_port;
connection.dest_port = dest_port;
_ = base64Enc.encode(connection.getPayload(), payload);
try broadcastSaprusMessage(msg_bytes, 8888);
return msg;
}
pub fn connect(payload: []const u8, allocator: Allocator) !?SaprusMessage {
var initial_port: u16 = 0;
if (rand) |r| {
initial_port = r.intRangeAtMost(u16, 1024, 65000);
} else unreachable;
pub fn connect(self: Self, payload: []const u8) !?SaprusConnection {
const initial_port = self.randomPort();
var initial_conn_res: ?SaprusMessage = null;
errdefer if (initial_conn_res) |c| c.deinit(allocator);
var initial_conn_res: ?*SaprusMessage = null;
var sock = try network.Socket.create(.ipv4, .udp);
defer sock.close();
@@ -98,21 +163,26 @@ pub fn connect(payload: []const u8, allocator: Allocator) !?SaprusMessage {
try sock.setReadTimeout(1 * std.time.us_per_s);
try sock.bind(bind_addr);
const msg = try sendInitialConnection(payload, initial_port, allocator);
var sent_msg_bytes: [max_message_size]u8 align(@alignOf(SaprusMessage)) = undefined;
const msg = try self.sendInitialConnection(payload, &sent_msg_bytes, initial_port);
var response_buf: [4096]u8 = undefined;
var response_buf: [max_message_size]u8 align(@alignOf(SaprusMessage)) = undefined;
_ = try sock.receive(&response_buf); // Ignore message that I sent.
const len = try sock.receive(&response_buf);
initial_conn_res = try SaprusMessage.fromBytes(response_buf[0..len], allocator);
initial_conn_res = try .networkBytesAsValue(response_buf[0..len]);
// Complete handshake after awaiting response
try broadcastSaprusMessage(msg, randomPort(), allocator);
try broadcastSaprusMessage(msg.asBytes(), self.randomPort());
return initial_conn_res;
if (false) {
return initial_conn_res.?;
}
return null;
}
const SaprusMessage = @import("message.zig").Message;
const SaprusConnection = @import("Connection.zig");
const std = @import("std");
const Random = std.Random;
@@ -120,5 +190,3 @@ const posix = std.posix;
const mem = std.mem;
const network = @import("network");
const Allocator = mem.Allocator;

0
src/Connection.zig Normal file
View File

45
src/RawSocketWriter.zig Normal file
View File

@@ -0,0 +1,45 @@
const std = @import("std");
const gcat = @import("gatorcat");
const Writer = @This();
const assert = std.debug.assert;
interface: std.Io.Writer,
socket: gcat.nic.RawSocket,
alloc: std.mem.Allocator,
fn drain(io_w: *std.Io.Writer, data: []const []const u8, splat: usize) std.Io.Writer.Error!usize {
const w: *Writer = @alignCast(@fieldParentPtr("interface", io_w));
const buffered = io_w.buffered();
var res: usize = 0;
if (buffered.len > 0) {
w.socket.linkLayer().send(buffered) catch return error.WriteFailed;
_ = io_w.consumeAll();
}
for (data[0 .. data.len - 1]) |d| {
w.socket.linkLayer().send(d) catch return error.WriteFailed;
res += d.len;
}
if (splat > 0 and data[data.len - 1].len > 0) {
var splatBuffer: std.ArrayList(u8) = .empty;
defer splatBuffer.deinit(w.alloc);
for (0..splat) |_| {
splatBuffer.appendSlice(w.alloc, data[data.len - 1]) catch return error.WriteFailed;
}
w.socket.linkLayer().send(splatBuffer.items) catch return error.WriteFailed;
}
return res;
}
pub fn init(interface_name: [:0]const u8, buffer: []u8, alloc: std.mem.Allocator) !Writer {
return .{
.interface = .{
.vtable = &.{ .drain = drain },
.buffer = buffer,
},
.socket = try .init(interface_name),
.alloc = alloc,
};
}

View File

@@ -37,42 +37,37 @@ pub fn main() !void {
.allocator = gpa,
}) catch |err| {
// Report useful error and exit.
diag.report(std.io.getStdErr().writer(), err) catch {};
try diag.reportToFile(.stderr(), err);
return err;
};
defer res.deinit();
try SaprusClient.init();
defer SaprusClient.deinit();
if (res.args.help != 0) {
return clap.help(std.io.getStdErr().writer(), clap.Help, &params, .{});
return clap.helpToFile(.stderr(), clap.Help, &params, .{});
}
var sock_buffer: [2048]u8 = undefined;
var rawSocketWriter: RawSocketWriter = try .init("enp7s0", &sock_buffer, gpa); // /proc/net/dev
var client = try SaprusClient.init(&rawSocketWriter.interface);
defer client.deinit();
if (res.args.relay) |r| {
const dest = parseDest(res.args.dest) catch .{ 70, 70, 70, 70 };
try SaprusClient.sendRelay(
const dest = parseDest(res.args.dest);
try client.sendRelay(
if (r.len > 0) r else "Hello darkness my old friend",
dest,
gpa,
);
// std.debug.print("Sent: {s}\n", .{r});
return;
} else if (res.args.connect) |c| {
const conn_res: ?SaprusMessage = SaprusClient.connect(if (c.len > 0) c else "Hello darkness my old friend", gpa) catch |err| switch (err) {
_ = client.connect(if (c.len > 0) c else "Hello darkness my old friend") catch |err| switch (err) {
error.WouldBlock => null,
else => return err,
};
defer if (conn_res) |r| r.deinit(gpa);
if (conn_res) |r| {
std.debug.print("{s}\n", .{r.connection.payload});
} else {
std.debug.print("No response from connection request\n", .{});
}
return;
}
return clap.help(std.io.getStdErr().writer(), clap.Help, &params, .{});
return clap.helpToFile(.stderr(), clap.Help, &params, .{});
}
fn parseDest(in: ?[]const u8) [4]u8 {
@@ -86,7 +81,7 @@ fn parseDest(in: ?[]const u8) [4]u8 {
const addr = std.net.Ip4Address.parse(dest, 0) catch return "FAIL".*;
return @bitCast(addr.sa.addr);
}
return "zap".*;
return "zap\x00".*;
}
const builtin = @import("builtin");
@@ -97,5 +92,6 @@ const ArrayList = std.ArrayList;
const zaprus = @import("zaprus");
const SaprusClient = zaprus.Client;
const SaprusMessage = zaprus.Message;
const RawSocketWriter = zaprus.RawSocketWriter;
const clap = @import("clap");

View File

@@ -1,6 +1,3 @@
const base64Enc = std.base64.Base64Encoder.init(std.base64.standard_alphabet_chars, '=');
const base64Dec = std.base64.Base64Decoder.init(std.base64.standard_alphabet_chars, '=');
/// Type tag for Message union.
/// This is the first value in the actual packet sent over the network.
pub const PacketType = enum(u16) {
@@ -23,155 +20,255 @@ pub const ConnectionOptions = packed struct(u8) {
opt8: bool = false,
};
pub const Error = error{
pub const MessageTypeError = error{
NotImplementedSaprusType,
UnknownSaprusType,
};
pub const MessageParseError = MessageTypeError || error{
InvalidMessage,
};
// ZERO COPY STUFF
// &payload could be a void value that is treated as a pointer to a [*]u8
/// All Saprus messages
pub const Message = union(PacketType) {
pub const Relay = struct {
pub const Header = packed struct {
pub const Message = packed struct {
const Relay = packed struct {
dest: @Vector(4, u8),
payload: void,
pub fn getPayload(self: *align(1) Relay) []u8 {
const len: *u16 = @ptrFromInt(@intFromPtr(self) - @sizeOf(u16));
return @as([*]u8, @ptrCast(&self.payload))[0 .. len.* - @bitSizeOf(Relay) / 8];
}
};
header: Header,
payload: []const u8,
};
pub const Connection = struct {
pub const Header = packed struct {
const Connection = packed struct {
src_port: u16, // random number > 1024
dest_port: u16, // random number > 1024
seq_num: u32 = 0,
msg_id: u32 = 0,
reserved: u8 = 0,
options: ConnectionOptions = .{},
};
header: Header,
payload: []const u8,
};
relay: Relay,
file_transfer: void, // unimplemented
connection: Connection,
payload: void,
/// Should be called for any Message that was declared using a function that you pass an allocator to.
pub fn deinit(self: Message, allocator: Allocator) void {
switch (self) {
.relay => |r| allocator.free(r.payload),
.connection => |c| allocator.free(c.payload),
pub fn getPayload(self: *align(1) Connection) []u8 {
const len: *u16 = @ptrFromInt(@intFromPtr(self) - @sizeOf(u16));
return @as([*]u8, @ptrCast(&self.payload))[0 .. len.* - @bitSizeOf(Connection) / 8];
}
fn nativeFromNetworkEndian(self: *align(1) Connection) void {
self.src_port = bigToNative(@TypeOf(self.src_port), self.src_port);
self.dest_port = bigToNative(@TypeOf(self.dest_port), self.dest_port);
self.seq_num = bigToNative(@TypeOf(self.seq_num), self.seq_num);
self.msg_id = bigToNative(@TypeOf(self.msg_id), self.msg_id);
}
fn networkFromNativeEndian(self: *align(1) Connection) void {
self.src_port = nativeToBig(@TypeOf(self.src_port), self.src_port);
self.dest_port = nativeToBig(@TypeOf(self.dest_port), self.dest_port);
self.seq_num = nativeToBig(@TypeOf(self.seq_num), self.seq_num);
self.msg_id = nativeToBig(@TypeOf(self.msg_id), self.msg_id);
}
};
const Self = @This();
const SelfBytes = []align(@alignOf(Self)) u8;
type: PacketType,
length: u16,
bytes: void = {},
/// Takes a byte slice, and returns a Message struct backed by the slice.
/// This properly initializes the top level headers within the slice.
/// This is used for creating new messages. For reading messages from the network,
/// see: networkBytesAsValue.
pub fn init(@"type": PacketType, bytes: []align(@alignOf(Self)) u8) *Self {
std.debug.assert(bytes.len >= @sizeOf(Self));
const res: *Self = @ptrCast(bytes.ptr);
res.type = @"type";
res.length = @intCast(bytes.len - @sizeOf(Self));
return res;
}
/// Compute the number of bytes required to store a given payload size for a given message type.
pub fn calcSize(comptime @"type": PacketType, payload_len: usize) MessageTypeError!u16 {
const header_size = @bitSizeOf(switch (@"type") {
.relay => Relay,
.connection => Connection,
.file_transfer => return MessageTypeError.NotImplementedSaprusType,
else => return MessageTypeError.UnknownSaprusType,
}) / 8;
return @intCast(payload_len + @sizeOf(Self) + header_size);
}
fn getRelay(self: *Self) *align(1) Relay {
return std.mem.bytesAsValue(Relay, &self.bytes);
}
fn getConnection(self: *Self) *align(1) Connection {
return std.mem.bytesAsValue(Connection, &self.bytes);
}
/// Access the message Saprus payload.
pub fn getSaprusTypePayload(self: *Self) MessageTypeError!(union(PacketType) {
relay: *align(1) Relay,
file_transfer: void,
connection: *align(1) Connection,
}) {
return switch (self.type) {
.relay => .{ .relay = self.getRelay() },
.connection => .{ .connection = self.getConnection() },
.file_transfer => MessageTypeError.NotImplementedSaprusType,
else => MessageTypeError.UnknownSaprusType,
};
}
/// Convert the message to native endianness from network endianness in-place.
pub fn nativeFromNetworkEndian(self: *Self) MessageTypeError!void {
self.type = @enumFromInt(bigToNative(
@typeInfo(@TypeOf(self.type)).@"enum".tag_type,
@intFromEnum(self.type),
));
self.length = bigToNative(@TypeOf(self.length), self.length);
errdefer {
// If the payload specific headers fail, revert the top level header values
self.type = @enumFromInt(nativeToBig(
@typeInfo(@TypeOf(self.type)).@"enum".tag_type,
@intFromEnum(self.type),
));
self.length = nativeToBig(@TypeOf(self.length), self.length);
}
switch (try self.getSaprusTypePayload()) {
.relay => {},
.connection => |*con| con.*.nativeFromNetworkEndian(),
// We know other values are unreachable,
// because they would have returned an error from the switch condition.
else => unreachable,
}
}
fn toBytesAux(
header: anytype,
payload: []const u8,
buf: *std.ArrayList(u8),
allocator: Allocator,
) !void {
const Header = @TypeOf(header);
// Create a growable string to store the base64 bytes in.
// Doing this first so I can use the length of the encoded bytes for the length field.
var payload_list = std.ArrayList(u8).init(allocator);
defer payload_list.deinit();
const buf_w = payload_list.writer();
// Write the payload bytes as base64 to the growable string.
try base64Enc.encodeWriter(buf_w, payload);
// At this point, payload_list contains the base64 encoded payload.
// Add the payload length to the output buf.
try buf.*.appendSlice(
asBytes(&nativeToBig(u16, @intCast(payload_list.items.len + @bitSizeOf(Header) / 8))),
);
// Add the header bytes to the output buf.
var header_buf: [@sizeOf(Header)]u8 = undefined;
var header_buf_stream = std.io.fixedBufferStream(&header_buf);
try header_buf_stream.writer().writeStructEndian(header, .big);
// Add the exact number of bits in the header without padding.
try buf.*.appendSlice(header_buf[0 .. @bitSizeOf(Header) / 8]);
try buf.*.appendSlice(payload_list.items);
/// Convert the message to network endianness from native endianness in-place.
pub fn networkFromNativeEndian(self: *Self) MessageTypeError!void {
try switch (try self.getSaprusTypePayload()) {
.relay => {},
.connection => |*con| con.*.networkFromNativeEndian(),
.file_transfer => MessageTypeError.NotImplementedSaprusType,
else => MessageTypeError.UnknownSaprusType,
};
self.type = @enumFromInt(nativeToBig(
@typeInfo(@TypeOf(self.type)).@"enum".tag_type,
@intFromEnum(self.type),
));
self.length = nativeToBig(@TypeOf(self.length), self.length);
}
/// Caller is responsible for freeing the returned bytes.
pub fn toBytes(self: Message, allocator: Allocator) ![]u8 {
// Create a growable list of bytes to store the output in.
var buf = std.ArrayList(u8).init(allocator);
errdefer buf.deinit();
// Start with writing the message type, which is the first 16 bits of every Saprus message.
try buf.appendSlice(asBytes(&nativeToBig(u16, @intFromEnum(self))));
// Write the proper header and payload for the given packet type.
switch (self) {
.relay => |r| try toBytesAux(r.header, r.payload, &buf, allocator),
.connection => |c| try toBytesAux(c.header, c.payload, &buf, allocator),
.file_transfer => return Error.NotImplementedSaprusType,
/// Convert network endian bytes to a native endian value in-place.
pub fn networkBytesAsValue(bytes: SelfBytes) MessageParseError!*Self {
const res = std.mem.bytesAsValue(Self, bytes);
try res.nativeFromNetworkEndian();
return .bytesAsValue(bytes);
}
// Collect the growable list as a slice and return it.
return buf.toOwnedSlice();
/// Create a structured view of the bytes without initializing the length or type,
/// and without converting the endianness.
pub fn bytesAsValue(bytes: SelfBytes) MessageParseError!*Self {
const res = std.mem.bytesAsValue(Self, bytes);
return switch (res.type) {
.relay, .connection => if (bytes.len == res.length + @sizeOf(Self))
res
else
MessageParseError.InvalidMessage,
.file_transfer => MessageParseError.NotImplementedSaprusType,
else => MessageParseError.UnknownSaprusType,
};
}
fn fromBytesAux(
comptime packet: PacketType,
len: u16,
r: std.io.FixedBufferStream([]const u8).Reader,
allocator: Allocator,
) !Message {
const Header = @field(@FieldType(Message, @tagName(packet)), "Header");
// Read the header for the current message type.
var header_bytes: [@sizeOf(Header)]u8 = undefined;
_ = try r.read(header_bytes[0 .. @bitSizeOf(Header) / 8]);
var header_stream = std.io.fixedBufferStream(&header_bytes);
const header = try header_stream.reader().readStructEndian(Header, .big);
// Read the base64 bytes into a list to be able to call the decoder on it.
const payload_buf = try allocator.alloc(u8, len - @bitSizeOf(Header) / 8);
defer allocator.free(payload_buf);
_ = try r.readAll(payload_buf);
// Create a buffer to store the payload in, and decode the base64 bytes into the payload field.
const payload = try allocator.alloc(u8, try base64Dec.calcSizeForSlice(payload_buf));
try base64Dec.decode(payload, payload_buf);
// Return the type of Message specified by the `packet` argument.
return @unionInit(Message, @tagName(packet), .{
.header = header,
.payload = payload,
});
}
/// Caller is responsible for calling .deinit on the returned value.
pub fn fromBytes(bytes: []const u8, allocator: Allocator) !Message {
var s = std.io.fixedBufferStream(bytes);
const r = s.reader();
// Read packet type
const packet_type = @as(PacketType, @enumFromInt(try r.readInt(u16, .big)));
// Read the length of the header + base64 encoded payload.
const len = try r.readInt(u16, .big);
switch (packet_type) {
.relay => return fromBytesAux(.relay, len, r, allocator),
.connection => return fromBytesAux(.connection, len, r, allocator),
.file_transfer => return Error.NotImplementedSaprusType,
else => return Error.UnknownSaprusType,
}
/// Deprecated.
/// If I need the bytes, I should just pass around the slice that is backing this to begin with.
pub fn asBytes(self: *Self) SelfBytes {
const size = @sizeOf(Self) + self.length;
return @as([*]align(@alignOf(Self)) u8, @ptrCast(self))[0..size];
}
};
test "testing variable length zero copy struct" {
{
// Relay test
const payload = "Hello darkness my old friend";
var msg_bytes: [try Message.calcSize(.relay, payload.len)]u8 align(@alignOf(Message)) = undefined;
// Create a view of the byte slice as a Message
const msg: *Message = .init(.relay, &msg_bytes);
{
// Set the message values
{
// These are both set by the init call.
// msg.type = .relay;
// msg.length = payload_len;
}
const relay = (try msg.getSaprusTypePayload()).relay;
relay.dest = .{ 1, 2, 3, 4 };
@memcpy(relay.getPayload(), payload);
}
{
// Print the message as hex using the network byte order
try msg.networkFromNativeEndian();
// We know the error from nativeFromNetworkEndian is unreachable because
// it would have returned an error from networkFromNativeEndian.
defer msg.nativeFromNetworkEndian() catch unreachable;
std.debug.print("relay network bytes: {x}\n", .{msg_bytes});
std.debug.print("bytes len: {d}\n", .{msg_bytes.len});
}
if (false) {
// Illegal behavior
std.debug.print("{any}\n", .{(try msg.getSaprusTypePayload()).connection});
}
try std.testing.expectEqualDeep(msg, try Message.bytesAsValue(msg.asBytes()));
}
{
// Connection test
const payload = "Hello darkness my old friend";
var msg_bytes: [try Message.calcSize(.connection, payload.len)]u8 align(@alignOf(Message)) = undefined;
// Create a view of the byte slice as a Message
const msg: *Message = .init(.connection, &msg_bytes);
{
// Initializing connection header values
const connection = (try msg.getSaprusTypePayload()).connection;
connection.src_port = 1;
connection.dest_port = 2;
connection.seq_num = 3;
connection.msg_id = 4;
connection.reserved = 5;
connection.options = @bitCast(@as(u8, 6));
@memcpy(connection.getPayload(), payload);
}
{
// Print the message as hex using the network byte order
try msg.networkFromNativeEndian();
// We know the error from nativeFromNetworkEndian is unreachable because
// it would have returned an error from networkFromNativeEndian.
defer msg.nativeFromNetworkEndian() catch unreachable;
std.debug.print("connection network bytes: {x}\n", .{msg_bytes});
std.debug.print("bytes len: {d}\n", .{msg_bytes.len});
}
}
}
const std = @import("std");
const Allocator = std.mem.Allocator;
const asBytes = std.mem.asBytes;
const nativeToBig = std.mem.nativeToBig;
const bigToNative = std.mem.bigToNative;
test "Round trip Relay toBytes and fromBytes" {
if (false) {
const gpa = std.testing.allocator;
const msg = Message{
.relay = .{
@@ -187,9 +284,12 @@ test "Round trip Relay toBytes and fromBytes" {
defer from_bytes.deinit(gpa);
try std.testing.expectEqualDeep(msg, from_bytes);
}
return error.SkipZigTest;
}
test "Round trip Connection toBytes and fromBytes" {
if (false) {
const gpa = std.testing.allocator;
const msg = Message{
.connection = .{
@@ -208,4 +308,10 @@ test "Round trip Connection toBytes and fromBytes" {
defer from_bytes.deinit(gpa);
try std.testing.expectEqualDeep(msg, from_bytes);
}
return error.SkipZigTest;
}
test {
std.testing.refAllDeclsRecursive(@This());
}

View File

@@ -1,2 +1,12 @@
pub const Client = @import("Client.zig");
pub usingnamespace @import("message.zig");
pub const Connection = @import("Connection.zig");
pub const RawSocketWriter = @import("RawSocketWriter.zig");
const msg = @import("message.zig");
pub const PacketType = msg.PacketType;
pub const foo = msg.foo;
pub const ConnectionOptions = msg.ConnectionOptions;
pub const MessageTypeError = msg.MessageTypeError;
pub const MessageParseError = msg.MessageParseError;
pub const Message = msg.Message;