Sleep to go faster

The problem was I was basically flushing twice for every message when
doing request reply.
This gives the sender the opportunity to finish writing a full message
to the queue, which we then check for before flushing.

This makes request reply latency benchmarks go down from like 90ms to
200us.
This commit is contained in:
2026-01-10 16:08:23 -05:00
parent 99ea755658
commit 0861703ddc
2 changed files with 15 additions and 1 deletions

View File

@@ -47,6 +47,7 @@ const Subscription = struct {
// would put an invalid set series of bytes in the receivers queue. // would put an invalid set series of bytes in the receivers queue.
_ = try self.queue.putUncancelable(io, chunk, chunk.len); _ = try self.queue.putUncancelable(io, chunk, chunk.len);
} }
try io.checkCancel();
} }
}; };
@@ -382,7 +383,10 @@ fn publishMessage(
) catch unreachable; ) catch unreachable;
msg_chunks.appendBounded(msg.payload) catch unreachable; msg_chunks.appendBounded(msg.payload) catch unreachable;
try subscription.send(io, msg_chunks.items[0..chunk_count]); subscription.send(io, msg_chunks.items[0..chunk_count]) catch |err| switch (err) {
error.Closed => {},
error.Canceled => |e| return e,
};
} }
} }

View File

@@ -40,6 +40,16 @@ pub fn start(self: *Client, io: std.Io) !void {
std.debug.assert(self.to_client.end == 0); std.debug.assert(self.to_client.end == 0);
while (true) { while (true) {
self.to_client.end = try self.recv_queue.get(io, self.to_client.buffer, 1); self.to_client.end = try self.recv_queue.get(io, self.to_client.buffer, 1);
// Wait 1 nanosecond to see if more data is in the queue.
// If there is, add it to the write buffer before sending it.
// The reason for this is because if we send the first chunk as soon as we get it,
// we will likely be sending a partial message, which will end up being way slower.
try io.sleep(.fromNanoseconds(1), .awake);
self.to_client.end += try self.recv_queue.get(
io,
self.to_client.buffer[self.to_client.end..],
0,
);
try self.to_client.flush(); try self.to_client.flush();
} }
} }