mirror of
https://git.robbyzambito.me/zits
synced 2026-02-04 03:34:48 +00:00
fixed issue where not all data was being sent request reply has a performance issue but technically works
440 lines
14 KiB
Zig
440 lines
14 KiB
Zig
const std = @import("std");
|
|
const Allocator = std.mem.Allocator;
|
|
const ArrayList = std.ArrayList;
|
|
const AutoHashMapUnmanaged = std.AutoHashMapUnmanaged;
|
|
|
|
const Io = std.Io;
|
|
const Dir = Io.Dir;
|
|
const Group = Io.Group;
|
|
const IpAddress = std.Io.net.IpAddress;
|
|
const Mutex = Io.Mutex;
|
|
const Queue = Io.Queue;
|
|
const Stream = std.Io.net.Stream;
|
|
|
|
pub const Client = @import("./Server/Client.zig");
|
|
|
|
pub const message = @import("./Server/message.zig");
|
|
const parse = message.parse;
|
|
|
|
const MessageType = message.Control;
|
|
const Message = message.Message;
|
|
const ServerInfo = Message.ServerInfo;
|
|
|
|
const Msgs = Client.Msgs;
|
|
const Server = @This();
|
|
|
|
const builtin = @import("builtin");
|
|
|
|
const Subscription = struct {
|
|
subject: []const u8,
|
|
client_id: usize,
|
|
sid: []const u8,
|
|
queue_group: ?[]const u8,
|
|
queue_lock: *Mutex,
|
|
queue: *Queue(u8),
|
|
|
|
fn deinit(self: Subscription, alloc: Allocator) void {
|
|
alloc.free(self.subject);
|
|
alloc.free(self.sid);
|
|
if (self.queue_group) |g| alloc.free(g);
|
|
}
|
|
};
|
|
|
|
const eql = std.mem.eql;
|
|
const log = std.log.scoped(.zits);
|
|
const panic = std.debug.panic;
|
|
|
|
info: ServerInfo,
|
|
clients: AutoHashMapUnmanaged(usize, *Client) = .empty,
|
|
|
|
subs_lock: Mutex = .init,
|
|
subscriptions: ArrayList(Subscription) = .empty,
|
|
|
|
pub fn deinit(server: *Server, io: Io, alloc: Allocator) void {
|
|
server.subs_lock.lockUncancelable(io);
|
|
defer server.subs_lock.unlock(io);
|
|
for (server.subscriptions.items) |sub| {
|
|
sub.deinit(alloc);
|
|
}
|
|
// TODO drain subscription queues
|
|
server.subscriptions.deinit(alloc);
|
|
server.clients.deinit(alloc);
|
|
}
|
|
|
|
pub fn start(server: *Server, io: Io, gpa: Allocator) !void {
|
|
var tcp_server = try IpAddress.listen(try IpAddress.parse(
|
|
server.info.host,
|
|
server.info.port,
|
|
), io, .{});
|
|
defer tcp_server.deinit(io);
|
|
log.debug("Server headers: {s}", .{if (server.info.headers) "true" else "false"});
|
|
log.debug("Server max payload: {d}", .{server.info.max_payload});
|
|
log.info("Server ID: {s}", .{server.info.server_id});
|
|
log.info("Server name: {s}", .{server.info.server_name});
|
|
log.info("Server listening on {s}:{d}", .{ server.info.host, server.info.port });
|
|
|
|
var client_group: Group = .init;
|
|
defer client_group.cancel(io);
|
|
|
|
const read_buffer_size, const write_buffer_size = getBufferSizes(io);
|
|
log.debug("read buf: {d} write buf: {d}", .{ read_buffer_size, write_buffer_size });
|
|
|
|
var id: usize = 0;
|
|
while (true) : (id +%= 1) {
|
|
if (server.clients.contains(id)) continue;
|
|
log.debug("Accepting next client", .{});
|
|
const stream = try tcp_server.accept(io);
|
|
log.debug("Accepted connection {d}", .{id});
|
|
_ = client_group.concurrent(io, handleConnectionInfallible, .{
|
|
server,
|
|
gpa,
|
|
io,
|
|
id,
|
|
stream,
|
|
read_buffer_size,
|
|
write_buffer_size,
|
|
}) catch {
|
|
log.err("Could not start concurrent handler for {d}", .{id});
|
|
stream.close(io);
|
|
};
|
|
}
|
|
}
|
|
|
|
fn addClient(server: *Server, allocator: Allocator, id: usize, client: *Client) !void {
|
|
try server.clients.put(allocator, id, client);
|
|
}
|
|
|
|
fn removeClient(server: *Server, io: Io, allocator: Allocator, id: usize) void {
|
|
server.subs_lock.lockUncancelable(io);
|
|
defer server.subs_lock.unlock(io);
|
|
if (server.clients.remove(id)) {
|
|
const len = server.subscriptions.items.len;
|
|
for (0..len) |from_end| {
|
|
const i = len - from_end - 1;
|
|
const sub = server.subscriptions.items[i];
|
|
if (sub.client_id == id) {
|
|
sub.deinit(allocator);
|
|
_ = server.subscriptions.swapRemove(i);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
fn handleConnectionInfallible(
|
|
server: *Server,
|
|
server_allocator: Allocator,
|
|
io: Io,
|
|
id: usize,
|
|
stream: Stream,
|
|
r_buf_size: usize,
|
|
w_buf_size: usize,
|
|
) !void {
|
|
handleConnection(server, server_allocator, io, id, stream, r_buf_size, w_buf_size) catch |err| switch (err) {
|
|
error.Canceled => return error.Canceled,
|
|
error.ClientDisconnected => log.debug("Client {d} disconnected", .{id}),
|
|
else => log.err("Failed processing client {d}: {t}", .{ id, err }),
|
|
};
|
|
}
|
|
|
|
fn handleConnection(
|
|
server: *Server,
|
|
server_allocator: Allocator,
|
|
io: Io,
|
|
id: usize,
|
|
stream: Stream,
|
|
r_buf_size: usize,
|
|
w_buf_size: usize,
|
|
) !void {
|
|
defer stream.close(io);
|
|
|
|
var dba: std.heap.DebugAllocator(.{}) = .init;
|
|
dba.backing_allocator = server_allocator;
|
|
defer _ = dba.deinit();
|
|
const alloc = if (builtin.mode == .Debug or builtin.mode == .ReleaseSafe)
|
|
dba.allocator()
|
|
else
|
|
server_allocator;
|
|
|
|
// Set up client writer
|
|
const w_buffer: []u8 = try alloc.alloc(u8, w_buf_size);
|
|
defer alloc.free(w_buffer);
|
|
var writer = stream.writer(io, w_buffer);
|
|
const out = &writer.interface;
|
|
|
|
// Set up client reader
|
|
const r_buffer: []u8 = try alloc.alloc(u8, r_buf_size);
|
|
defer alloc.free(r_buffer);
|
|
var reader = stream.reader(io, r_buffer);
|
|
const in = &reader.interface;
|
|
|
|
// Set up buffer queue
|
|
const qbuf: []u8 = try alloc.alloc(u8, r_buf_size);
|
|
defer alloc.free(qbuf);
|
|
var recv_queue: Queue(u8) = .init(qbuf);
|
|
defer recv_queue.close(io);
|
|
|
|
// Create client
|
|
var client: Client = .init(null, &recv_queue, in, out);
|
|
defer client.deinit(server_allocator);
|
|
|
|
try server.addClient(server_allocator, id, &client);
|
|
defer server.removeClient(io, server_allocator, id);
|
|
|
|
// Do initial handshake with client
|
|
_ = try out.write("INFO ");
|
|
try std.json.Stringify.value(server.info, .{}, out);
|
|
_ = try out.write("\r\n");
|
|
try out.flush();
|
|
|
|
var client_task = try io.concurrent(Client.start, .{ &client, io });
|
|
defer client_task.cancel(io) catch {};
|
|
|
|
while (client.next()) |ctrl| {
|
|
switch (ctrl) {
|
|
.PING => {
|
|
// Respond to ping with pong.
|
|
try client.recv_queue_write_lock.lock(io);
|
|
defer client.recv_queue_write_lock.unlock(io);
|
|
_ = try client.from_client.take(2);
|
|
try client.recv_queue.putAll(io, "PONG\r\n");
|
|
// try client.send(io, "PONG\r\n");
|
|
},
|
|
.PUB => {
|
|
@branchHint(.likely);
|
|
// log.debug("received a pub msg", .{});
|
|
try server.publishMessage(io, server_allocator, &client, .@"pub");
|
|
},
|
|
.HPUB => {
|
|
@branchHint(.likely);
|
|
try server.publishMessage(io, server_allocator, &client, .hpub);
|
|
},
|
|
.SUB => {
|
|
try server.subscribe(io, server_allocator, &client, id);
|
|
},
|
|
.UNSUB => {
|
|
try server.unsubscribe(io, server_allocator, client, id);
|
|
},
|
|
.CONNECT => {
|
|
if (client.connect) |*current| {
|
|
current.deinit(server_allocator);
|
|
}
|
|
client.connect = try parse.connect(server_allocator, client.from_client);
|
|
},
|
|
else => |e| {
|
|
panic("Unimplemented message: {any}\n", .{e});
|
|
},
|
|
}
|
|
} else |err| switch (err) {
|
|
error.EndOfStream => return error.ClientDisconnected,
|
|
error.ReadFailed => return reader.err.?,
|
|
else => |e| return e,
|
|
}
|
|
}
|
|
|
|
fn subjectMatches(sub_subject: []const u8, pub_subject: []const u8) bool {
|
|
// TODO: assert that sub_subject and pub_subject are valid.
|
|
var sub_iter = std.mem.splitScalar(u8, sub_subject, '.');
|
|
var pub_iter = std.mem.splitScalar(u8, pub_subject, '.');
|
|
|
|
while (sub_iter.next()) |st| {
|
|
const pt = pub_iter.next() orelse return false;
|
|
|
|
if (eql(u8, st, ">")) return true;
|
|
|
|
if (!eql(u8, st, "*") and !eql(u8, st, pt)) {
|
|
return false;
|
|
}
|
|
}
|
|
|
|
return pub_iter.next() == null;
|
|
}
|
|
|
|
test subjectMatches {
|
|
const expect = std.testing.expect;
|
|
try expect(subjectMatches("foo", "foo"));
|
|
try expect(!subjectMatches("foo", "bar"));
|
|
|
|
try expect(subjectMatches("foo.*", "foo.bar"));
|
|
try expect(!subjectMatches("foo.*", "foo"));
|
|
try expect(!subjectMatches("foo.>", "foo"));
|
|
|
|
// the wildcard subscriptions foo.*.quux and foo.> both match foo.bar.quux, but only the latter matches foo.bar.baz.
|
|
try expect(subjectMatches("foo.*.quux", "foo.bar.quux"));
|
|
try expect(subjectMatches("foo.>", "foo.bar.quux"));
|
|
try expect(!subjectMatches("foo.*.quux", "foo.bar.baz"));
|
|
try expect(subjectMatches("foo.>", "foo.bar.baz"));
|
|
}
|
|
|
|
fn publishMessage(
|
|
server: *Server,
|
|
io: Io,
|
|
alloc: Allocator,
|
|
source_client: *Client,
|
|
comptime pub_or_hpub: enum { @"pub", hpub },
|
|
) !void {
|
|
defer if (source_client.connect) |c| {
|
|
if (c.verbose) {
|
|
if (source_client.recv_queue_write_lock.lock(io)) |_| {
|
|
defer source_client.recv_queue_write_lock.unlock(io);
|
|
source_client.recv_queue.putAll(io, "+OK\r\n") catch {};
|
|
} else |_| {}
|
|
}
|
|
};
|
|
|
|
const hpubmsg = switch (pub_or_hpub) {
|
|
.@"pub" => {},
|
|
.hpub => try parse.hpub(source_client.from_client),
|
|
};
|
|
|
|
const msg: Message.Pub = switch (pub_or_hpub) {
|
|
.@"pub" => try parse.@"pub"(source_client.from_client),
|
|
.hpub => hpubmsg.@"pub",
|
|
};
|
|
|
|
// const subject = switch (pub_or_hpub) {
|
|
// .PUB => |pb| pb.subject,
|
|
// .HPUB => |hp| hp.@"pub".subject,
|
|
// else => unreachable,
|
|
// };
|
|
try server.subs_lock.lock(io);
|
|
defer server.subs_lock.unlock(io);
|
|
var published_queue_groups: ArrayList([]const u8) = .empty;
|
|
defer published_queue_groups.deinit(alloc);
|
|
var published_queue_sub_idxs: ArrayList(usize) = .empty;
|
|
defer published_queue_sub_idxs.deinit(alloc);
|
|
|
|
var line_writer_allocating: std.Io.Writer.Allocating = .init(alloc);
|
|
defer line_writer_allocating.deinit();
|
|
var line_writer = &line_writer_allocating.writer;
|
|
|
|
subs: for (0..server.subscriptions.items.len) |i| {
|
|
const subscription = server.subscriptions.items[i];
|
|
if (subjectMatches(subscription.subject, msg.subject)) {
|
|
if (subscription.queue_group) |sg| {
|
|
for (published_queue_groups.items) |g| {
|
|
if (eql(u8, g, sg)) {
|
|
continue :subs;
|
|
}
|
|
}
|
|
// Don't republish to the same queue
|
|
try published_queue_groups.append(alloc, sg);
|
|
// Move this index to the end of the subscription list,
|
|
// to prioritize other subscriptions in the queue next time.
|
|
try published_queue_sub_idxs.append(alloc, i);
|
|
}
|
|
|
|
line_writer_allocating.clearRetainingCapacity();
|
|
|
|
switch (pub_or_hpub) {
|
|
.@"pub" => _ = try line_writer.write("MSG "),
|
|
.hpub => _ = try line_writer.write("HMSG "),
|
|
}
|
|
try line_writer.print("{s} {s} ", .{ msg.subject, subscription.sid });
|
|
if (msg.reply_to) |reply_to| {
|
|
try line_writer.print("{s} ", .{reply_to});
|
|
}
|
|
switch (pub_or_hpub) {
|
|
.hpub => {
|
|
try line_writer.print("{d} ", .{hpubmsg.header_bytes});
|
|
},
|
|
else => {},
|
|
}
|
|
try line_writer.print("{d}\r\n", .{msg.payload.len});
|
|
|
|
try subscription.queue_lock.lock(io);
|
|
defer subscription.queue_lock.unlock(io);
|
|
try subscription.queue.putAll(io, line_writer.buffered());
|
|
try subscription.queue.putAll(io, msg.payload);
|
|
try subscription.queue.putAll(io, "\r\n");
|
|
}
|
|
}
|
|
|
|
for (0..published_queue_sub_idxs.items.len) |from_end| {
|
|
const i = published_queue_sub_idxs.items.len - from_end - 1;
|
|
server.subscriptions.appendAssumeCapacity(server.subscriptions.orderedRemove(i));
|
|
}
|
|
}
|
|
|
|
fn subscribe(
|
|
server: *Server,
|
|
io: Io,
|
|
gpa: Allocator,
|
|
client: *Client,
|
|
id: usize,
|
|
// msg: Message.Sub,
|
|
) !void {
|
|
const msg = try parse.sub(client.from_client);
|
|
try server.subs_lock.lock(io);
|
|
defer server.subs_lock.unlock(io);
|
|
const subject = try gpa.dupe(u8, msg.subject);
|
|
errdefer gpa.free(subject);
|
|
const sid = try gpa.dupe(u8, msg.sid);
|
|
errdefer gpa.free(sid);
|
|
const queue_group = if (msg.queue_group) |q| try gpa.dupe(u8, q) else null;
|
|
errdefer if (queue_group) |q| gpa.free(q);
|
|
try server.subscriptions.append(gpa, .{
|
|
.subject = subject,
|
|
.client_id = id,
|
|
.sid = sid,
|
|
.queue_group = queue_group,
|
|
.queue_lock = &client.recv_queue_write_lock,
|
|
.queue = client.recv_queue,
|
|
});
|
|
}
|
|
|
|
fn unsubscribe(
|
|
server: *Server,
|
|
io: Io,
|
|
gpa: Allocator,
|
|
client: Client,
|
|
id: usize,
|
|
) !void {
|
|
const msg = try parse.unsub(client.from_client);
|
|
try server.subs_lock.lock(io);
|
|
defer server.subs_lock.unlock(io);
|
|
const len = server.subscriptions.items.len;
|
|
for (0..len) |from_end| {
|
|
const i = len - from_end - 1;
|
|
const sub = server.subscriptions.items[i];
|
|
if (sub.client_id == id and eql(u8, sub.sid, msg.sid)) {
|
|
sub.deinit(gpa);
|
|
_ = server.subscriptions.swapRemove(i);
|
|
}
|
|
}
|
|
}
|
|
|
|
/// Probes the system for an appropriate buffer size.
|
|
/// Try to match the kernel socket buffers to maximize
|
|
/// the amount of data we push through each syscall.
|
|
fn getBufferSizes(io: Io) @Tuple(&.{ usize, usize }) {
|
|
const default_size = 4 * 1024;
|
|
const default = .{ default_size, default_size };
|
|
|
|
const dir = Dir.openDirAbsolute(io, "/proc/sys/net/core", .{}) catch {
|
|
log.warn("couldn't open /proc/sys/net/core", .{});
|
|
return default;
|
|
};
|
|
|
|
var buf: [64]u8 = undefined;
|
|
|
|
const rmem_max = readBufferSize(io, dir, "rmem_max", &buf, default_size);
|
|
const wmem_max = readBufferSize(io, dir, "wmem_max", &buf, default_size);
|
|
|
|
return .{ rmem_max, wmem_max };
|
|
}
|
|
|
|
fn readBufferSize(io: Io, dir: anytype, filename: []const u8, buf: []u8, default: usize) usize {
|
|
const bytes = dir.readFile(io, filename, buf) catch |err| {
|
|
log.err("couldn't open {s}: {any}", .{ filename, err });
|
|
return default;
|
|
};
|
|
|
|
return std.fmt.parseUnsigned(usize, bytes[0 .. bytes.len - 1], 10) catch |err| {
|
|
log.err("couldn't parse {s}: {any}", .{ bytes[0 .. bytes.len - 1], err });
|
|
return default;
|
|
};
|
|
}
|
|
|
|
pub const default_id = "server-id-123";
|
|
pub const default_name = "Zits Server";
|