mirror of
https://git.robbyzambito.me/zits
synced 2026-02-04 11:44:48 +00:00
Sleep to go faster
The problem was I was basically flushing twice for every message when doing request reply. This gives the sender the opportunity to finish writing a full message to the queue, which we then check for before flushing. This makes request reply latency benchmarks go down from like 90ms to 200us.
This commit is contained in:
@@ -40,6 +40,16 @@ pub fn start(self: *Client, io: std.Io) !void {
|
||||
std.debug.assert(self.to_client.end == 0);
|
||||
while (true) {
|
||||
self.to_client.end = try self.recv_queue.get(io, self.to_client.buffer, 1);
|
||||
// Wait 1 nanosecond to see if more data is in the queue.
|
||||
// If there is, add it to the write buffer before sending it.
|
||||
// The reason for this is because if we send the first chunk as soon as we get it,
|
||||
// we will likely be sending a partial message, which will end up being way slower.
|
||||
try io.sleep(.fromNanoseconds(1), .awake);
|
||||
self.to_client.end += try self.recv_queue.get(
|
||||
io,
|
||||
self.to_client.buffer[self.to_client.end..],
|
||||
0,
|
||||
);
|
||||
try self.to_client.flush();
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user