mirror of
https://git.robbyzambito.me/zits
synced 2026-02-04 11:44:48 +00:00
Reorganized things
This commit is contained in:
@@ -44,7 +44,11 @@ pub fn main() !void {
|
||||
const pub_cmd = app.createCommand("pub", "Publish a message.");
|
||||
try zits_app.addSubcommand(pub_cmd);
|
||||
|
||||
const matches = try app.parseProcess();
|
||||
var io_impl: std.Io.Threaded = .init_single_threaded;
|
||||
defer io_impl.deinit();
|
||||
const io = io_impl.io();
|
||||
|
||||
const matches = try app.parseProcess(io);
|
||||
|
||||
if (matches.subcommandMatches("serve")) |serve_matches| {
|
||||
var info: zits.Server.ServerInfo = .{
|
||||
|
||||
@@ -2,85 +2,46 @@ const Message = @import("message_parser.zig").Message;
|
||||
const std = @import("std");
|
||||
|
||||
pub const ClientState = struct {
|
||||
connect: Message.AllocatedConnect,
|
||||
connect: ?Message.Connect,
|
||||
|
||||
/// Messages that this client should receive.
|
||||
recv_queue: std.Io.Queue(Message) = undefined,
|
||||
recv_queue_buffer: [1024]Message = undefined,
|
||||
// Used to take ownership of values as they are put in the queue.
|
||||
recv_alloc: std.mem.Allocator,
|
||||
write_lock: std.Io.Mutex,
|
||||
|
||||
from_client: *std.Io.Reader,
|
||||
to_client: *std.Io.Writer,
|
||||
|
||||
task: ?std.Io.Future(void) = null,
|
||||
|
||||
pub fn init(
|
||||
connect: Message.AllocatedConnect,
|
||||
alloc: std.mem.Allocator,
|
||||
connect: ?Message.Connect,
|
||||
in: *std.Io.Reader,
|
||||
out: *std.Io.Writer,
|
||||
) !ClientState {
|
||||
var res: ClientState = .{
|
||||
) ClientState {
|
||||
return .{
|
||||
.connect = connect,
|
||||
.recv_alloc = alloc,
|
||||
.write_lock = .init,
|
||||
.from_client = in,
|
||||
.to_client = out,
|
||||
};
|
||||
res.recv_queue = .init(&res.recv_queue_buffer);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn start(self: *ClientState, io: std.Io) !void {
|
||||
self.task = try io.concurrent(processWrite, .{ self, io });
|
||||
}
|
||||
|
||||
fn processWrite(
|
||||
self: *ClientState,
|
||||
io: std.Io,
|
||||
) void {
|
||||
while (true) {
|
||||
const message = self.recv_queue.getOne(io) catch break;
|
||||
switch (message) {
|
||||
.@"+ok" => {
|
||||
writeOk(self.to_client) catch break;
|
||||
},
|
||||
.pong => {
|
||||
writePong(self.to_client) catch break;
|
||||
},
|
||||
.info => |info| {
|
||||
writeInfo(self.to_client, info) catch break;
|
||||
},
|
||||
.msg => |m| {
|
||||
defer m.deinit(self.recv_alloc);
|
||||
writeMsg(self.to_client, m) catch break;
|
||||
},
|
||||
else => {
|
||||
std.debug.panic("unimplemented write", .{});
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deinit(self: *ClientState, io: std.Io, allocator: std.mem.Allocator) void {
|
||||
if (self.task) |*t| {
|
||||
t.cancel(io);
|
||||
}
|
||||
self.connect.deinit();
|
||||
_ = allocator;
|
||||
// allocator.destroy(self.recv_queue);
|
||||
}
|
||||
|
||||
/// Return true if the value was put in the clients buffer to process, else false.
|
||||
pub fn send(self: *ClientState, io: std.Io, msg: Message) !void {
|
||||
// Client needs to own msg that is put in its queue
|
||||
try self.write_lock.lock(io);
|
||||
defer self.write_lock.unlock(io);
|
||||
|
||||
switch (msg) {
|
||||
.@"+ok" => {
|
||||
try writeOk(self.to_client);
|
||||
},
|
||||
.pong => {
|
||||
try writePong(self.to_client);
|
||||
},
|
||||
.info => |info| {
|
||||
try writeInfo(self.to_client, info);
|
||||
},
|
||||
.msg => |m| {
|
||||
try self.recv_queue.putOne(io, .{ .msg = try m.dupe(self.recv_alloc) });
|
||||
try writeMsg(self.to_client, m);
|
||||
},
|
||||
else => {
|
||||
try self.recv_queue.putOne(io, msg);
|
||||
std.debug.panic("unimplemented write", .{});
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,9 +14,11 @@ const Subscription = struct {
|
||||
info: ServerInfo,
|
||||
clients: std.AutoHashMapUnmanaged(usize, *ClientState) = .empty,
|
||||
|
||||
subs_lock: std.Thread.Mutex = .{},
|
||||
subs_lock: std.Io.Mutex = .init,
|
||||
subscriptions: std.ArrayList(Subscription) = .empty,
|
||||
|
||||
msg_queue: std.Io.Queue(Message.Pub),
|
||||
|
||||
var keep_running = std.atomic.Value(bool).init(true);
|
||||
|
||||
fn handleSigInt(sig: std.os.linux.SIG) callconv(.c) void {
|
||||
@@ -35,14 +37,22 @@ pub fn main(gpa: std.mem.Allocator, server_config: ServerInfo) !void {
|
||||
// // Register the handler for SIGINT (Ctrl+C)
|
||||
// std.posix.sigaction(std.posix.SIG.INT, &act, null);
|
||||
|
||||
// 64 mb buffer for messages
|
||||
const queue_buf = try gpa.alloc(Message.Pub, 1024 * 1024);
|
||||
defer gpa.free(queue_buf);
|
||||
|
||||
var server: Server = .{
|
||||
.info = server_config,
|
||||
.msg_queue = .init(queue_buf),
|
||||
};
|
||||
|
||||
var threaded: std.Io.Threaded = .init(gpa);
|
||||
var threaded: std.Io.Threaded = .init(gpa, .{});
|
||||
defer threaded.deinit();
|
||||
const io = threaded.io();
|
||||
|
||||
var msgProcess = try io.concurrent(processMsgs, .{ &server, io, gpa });
|
||||
defer msgProcess.cancel(io) catch {};
|
||||
|
||||
var tcp_server = try std.Io.net.IpAddress.listen(try std.Io.net.IpAddress.parse(
|
||||
server.info.host,
|
||||
server.info.port,
|
||||
@@ -65,15 +75,37 @@ pub fn main(gpa: std.mem.Allocator, server_config: ServerInfo) !void {
|
||||
std.debug.print("Exiting gracefully\n", .{});
|
||||
}
|
||||
|
||||
fn processMsgs(server: *Server, io: std.Io, alloc: std.mem.Allocator) !void {
|
||||
while (true) {
|
||||
const msg = try server.msg_queue.getOne(io);
|
||||
defer msg.deinit(alloc);
|
||||
|
||||
for (server.subscriptions.items) |subscription| {
|
||||
if (subjectMatches(subscription.subject, msg.subject)) {
|
||||
const client = server.clients.get(subscription.client_id) orelse {
|
||||
std.debug.print("trying to publish to a client that no longer exists: {d}", .{subscription.client_id});
|
||||
continue;
|
||||
};
|
||||
try client.send(io, .{ .msg = .{
|
||||
.subject = msg.subject,
|
||||
.sid = subscription.sid,
|
||||
.reply_to = msg.reply_to,
|
||||
.payload = msg.payload,
|
||||
} });
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn addClient(server: *Server, allocator: std.mem.Allocator, id: usize, client: *ClientState) !void {
|
||||
// server.clients.lockPointers();
|
||||
try server.clients.put(allocator, id, client);
|
||||
// server.clients.unlockPointers();
|
||||
}
|
||||
|
||||
fn removeClient(server: *Server, allocator: std.mem.Allocator, id: usize) void {
|
||||
server.subs_lock.lock();
|
||||
defer server.subs_lock.unlock();
|
||||
fn removeClient(server: *Server, io: std.Io, allocator: std.mem.Allocator, id: usize) void {
|
||||
server.subs_lock.lockUncancelable(io);
|
||||
defer server.subs_lock.unlock(io);
|
||||
_ = server.clients.remove(id);
|
||||
const len = server.subscriptions.items.len;
|
||||
for (0..len) |i| {
|
||||
@@ -95,72 +127,49 @@ fn handleConnection(
|
||||
) !void {
|
||||
var client_allocator: std.heap.DebugAllocator(.{}) = .init;
|
||||
client_allocator.backing_allocator = server_allocator;
|
||||
defer {
|
||||
std.debug.print("deinitializing debug allocator\n", .{});
|
||||
_ = client_allocator.deinit();
|
||||
}
|
||||
defer _ = client_allocator.deinit();
|
||||
|
||||
const allocator = client_allocator.allocator();
|
||||
defer stream.close(io);
|
||||
var w_buffer: [4096]u8 = undefined;
|
||||
var writer = stream.writer(io, &w_buffer);
|
||||
const out = &writer.interface;
|
||||
|
||||
std.debug.print("out pointer in client handler: {*}\n", .{out});
|
||||
|
||||
var r_buffer: [8192]u8 = undefined;
|
||||
var reader = stream.reader(io, &r_buffer);
|
||||
const in = &reader.interface;
|
||||
|
||||
@import("./client.zig").writeInfo(out, server.info) catch return;
|
||||
var client_state: ClientState = .init(null, in, out);
|
||||
try client_state.send(io, .{ .info = server.info });
|
||||
|
||||
var connect_arena: std.heap.ArenaAllocator = .init(allocator);
|
||||
defer connect_arena.deinit();
|
||||
const connect = (Message.next(connect_arena.allocator(), in) catch return).connect;
|
||||
var client_state: ClientState = try .init(connect, allocator, in, out);
|
||||
try client_state.start(io);
|
||||
defer client_state.deinit(io, allocator);
|
||||
client_state.connect = (Message.next(connect_arena.allocator(), in) catch return).connect;
|
||||
|
||||
try server.addClient(server_allocator, id, &client_state);
|
||||
defer server.removeClient(server_allocator, id);
|
||||
defer server.removeClient(io, server_allocator, id);
|
||||
|
||||
var msg_arena: std.heap.ArenaAllocator = .init(allocator);
|
||||
defer msg_arena.deinit();
|
||||
const msg_allocator = msg_arena.allocator();
|
||||
|
||||
while (client_state.next(msg_allocator)) |msg| {
|
||||
defer _ = msg_arena.reset(.retain_capacity);
|
||||
// Messages are owned by the server after they are received from the client
|
||||
while (client_state.next(server_allocator)) |msg| {
|
||||
switch (msg) {
|
||||
.ping => {
|
||||
// Respond to ping with pong.
|
||||
try client_state.send(io, .pong);
|
||||
},
|
||||
.@"pub" => |pb| {
|
||||
defer {
|
||||
msg_allocator.free(pb.payload);
|
||||
msg_allocator.free(pb.subject);
|
||||
if (pb.reply_to) |r| {
|
||||
msg_allocator.free(r);
|
||||
}
|
||||
}
|
||||
// Do not free pb, server.publishMessage takes ownership.
|
||||
try server.publishMessage(io, pb);
|
||||
if (client_state.connect.connect.verbose) {
|
||||
try client_state.send(io, .@"+ok");
|
||||
if (client_state.connect) |c| {
|
||||
if (c.verbose) {
|
||||
try client_state.send(io, .@"+ok");
|
||||
}
|
||||
}
|
||||
},
|
||||
.sub => |sub| {
|
||||
defer {
|
||||
msg_allocator.free(sub.subject);
|
||||
msg_allocator.free(sub.sid);
|
||||
if (sub.queue_group) |q| {
|
||||
msg_allocator.free(q);
|
||||
}
|
||||
}
|
||||
try server.subscribe(server_allocator, id, sub);
|
||||
try server.subscribe(io, server_allocator, id, sub);
|
||||
},
|
||||
.unsub => |unsub| {
|
||||
defer {
|
||||
msg_allocator.free(unsub.sid);
|
||||
}
|
||||
try server.unsubscribe(server_allocator, id, unsub);
|
||||
try server.unsubscribe(io, server_allocator, id, unsub);
|
||||
},
|
||||
else => |e| {
|
||||
std.debug.panic("Unimplemented message: {any}\n", .{e});
|
||||
@@ -186,36 +195,26 @@ fn subjectMatches(expected: []const u8, actual: []const u8) bool {
|
||||
}
|
||||
|
||||
fn publishMessage(server: *Server, io: std.Io, msg: Message.Pub) !void {
|
||||
for (server.subscriptions.items) |subscription| {
|
||||
if (subjectMatches(subscription.subject, msg.subject)) {
|
||||
const client = server.clients.get(subscription.client_id) orelse {
|
||||
std.debug.print("trying to publish to a client that no longer exists: {d}", .{subscription.client_id});
|
||||
continue;
|
||||
};
|
||||
try client.send(io, .{ .msg = .{
|
||||
.subject = msg.subject,
|
||||
.sid = subscription.sid,
|
||||
.reply_to = msg.reply_to,
|
||||
.payload = msg.payload,
|
||||
} });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn subscribe(server: *Server, gpa: std.mem.Allocator, id: usize, msg: Message.Sub) !void {
|
||||
std.debug.print("Recieved SUBSCRIBE message: {any}\n\n", .{msg});
|
||||
server.subs_lock.lock();
|
||||
defer server.subs_lock.unlock();
|
||||
try server.subscriptions.append(gpa, .{
|
||||
.subject = try gpa.dupe(u8, msg.subject),
|
||||
.client_id = id,
|
||||
.sid = try gpa.dupe(u8, msg.sid),
|
||||
try server.msg_queue.putOne(io, .{
|
||||
.payload = msg.payload,
|
||||
.reply_to = msg.reply_to,
|
||||
.subject = msg.subject,
|
||||
});
|
||||
}
|
||||
|
||||
fn unsubscribe(server: *Server, gpa: std.mem.Allocator, id: usize, msg: Message.Unsub) !void {
|
||||
server.subs_lock.lock();
|
||||
defer server.subs_lock.unlock();
|
||||
fn subscribe(server: *Server, io: std.Io, gpa: std.mem.Allocator, id: usize, msg: Message.Sub) !void {
|
||||
try server.subs_lock.lock(io);
|
||||
defer server.subs_lock.unlock(io);
|
||||
try server.subscriptions.append(gpa, .{
|
||||
.subject = msg.subject,
|
||||
.client_id = id,
|
||||
.sid = msg.sid,
|
||||
});
|
||||
}
|
||||
|
||||
fn unsubscribe(server: *Server, io: std.Io, gpa: std.mem.Allocator, id: usize, msg: Message.Unsub) !void {
|
||||
try server.subs_lock.lock(io);
|
||||
defer server.subs_lock.unlock(io);
|
||||
const len = server.subscriptions.items.len;
|
||||
for (0..len) |i| {
|
||||
const sub = server.subscriptions.items[len - i - 1];
|
||||
|
||||
@@ -33,7 +33,7 @@ pub const MessageType = enum {
|
||||
|
||||
pub const Message = union(MessageType) {
|
||||
info: ServerInfo,
|
||||
connect: AllocatedConnect,
|
||||
connect: Connect,
|
||||
@"pub": Pub,
|
||||
hpub: void,
|
||||
sub: Sub,
|
||||
@@ -71,14 +71,6 @@ pub const Message = union(MessageType) {
|
||||
/// feature.
|
||||
proto: u32 = 1,
|
||||
};
|
||||
pub const AllocatedConnect = struct {
|
||||
allocator: std.heap.ArenaAllocator,
|
||||
connect: Connect,
|
||||
|
||||
pub fn deinit(self: AllocatedConnect) void {
|
||||
self.allocator.deinit();
|
||||
}
|
||||
};
|
||||
pub const Connect = struct {
|
||||
verbose: bool = false,
|
||||
pedantic: bool = false,
|
||||
@@ -96,6 +88,39 @@ pub const Message = union(MessageType) {
|
||||
no_responders: ?bool = null,
|
||||
headers: ?bool = null,
|
||||
nkey: ?[]const u8 = null,
|
||||
|
||||
pub fn deinit(self: Connect, alloc: std.mem.Allocator) void {
|
||||
if (self.auth_token) |a| alloc.free(a);
|
||||
if (self.user) |u| alloc.free(u);
|
||||
if (self.pass) |p| alloc.free(p);
|
||||
if (self.name) |n| alloc.free(n);
|
||||
alloc.free(self.lang);
|
||||
alloc.free(self.version);
|
||||
if (self.sig) |s| alloc.free(s);
|
||||
if (self.jwt) |j| alloc.free(j);
|
||||
if (self.nkey) |n| alloc.free(n);
|
||||
}
|
||||
|
||||
pub fn dupe(self: Connect, alloc: std.mem.Allocator) !Connect {
|
||||
return .{
|
||||
.verbose = self.verbose,
|
||||
.pedantic = self.pedantic,
|
||||
.tls_required = self.tls_required,
|
||||
.auth_token = if (self.auth_token) |a| try alloc.dupe(u8, a) else null,
|
||||
.user = if (self.user) |u| try alloc.dupe(u8, u) else null,
|
||||
.pass = if (self.pass) |p| try alloc.dupe(u8, p) else null,
|
||||
.name = if (self.name) |n| try alloc.dupe(u8, n) else null,
|
||||
.lang = self.lang,
|
||||
.version = self.version,
|
||||
.protocol = self.protocol,
|
||||
.echo = self.echo,
|
||||
.sig = if (self.sig) |s| try alloc.dupe(u8, s) else null,
|
||||
.jwt = if (self.jwt) |j| try alloc.dupe(u8, j) else null,
|
||||
.no_responders = self.no_responders,
|
||||
.headers = self.headers,
|
||||
.nkey = if (self.nkey) |n| try alloc.dupe(u8, n) else null,
|
||||
};
|
||||
}
|
||||
};
|
||||
pub const Pub = struct {
|
||||
/// The destination subject to publish to.
|
||||
@@ -208,8 +233,8 @@ pub const Message = union(MessageType) {
|
||||
|
||||
switch (operation) {
|
||||
.connect => {
|
||||
// TODO: should be ARENA allocator
|
||||
var connect_arena_allocator: std.heap.ArenaAllocator = .init(alloc);
|
||||
defer connect_arena_allocator.deinit();
|
||||
const connect_allocator = connect_arena_allocator.allocator();
|
||||
const connect_string_writer_allocating: std.Io.Writer.Allocating = try .initCapacity(connect_allocator, 1024);
|
||||
var connect_string_writer = connect_string_writer_allocating.writer;
|
||||
@@ -223,7 +248,7 @@ pub const Message = union(MessageType) {
|
||||
// TODO: should be CONNECTION allocator
|
||||
const res = try std.json.parseFromSliceLeaky(Connect, connect_allocator, connect_string_writer.buffered(), .{ .allocate = .alloc_always });
|
||||
|
||||
return .{ .connect = .{ .allocator = connect_arena_allocator, .connect = res } };
|
||||
return .{ .connect = try res.dupe(alloc) };
|
||||
},
|
||||
.@"pub" => {
|
||||
try in.discardAll(1); // throw away space
|
||||
|
||||
Reference in New Issue
Block a user