Slow Consumers
One way some of the libraries deal with bursty message traffic is to buffer incoming messages for a subscription. So if an application can handle 10 messages per second and sometimes receives 20 messages per second, the library may hold the extra 10 to give the application time to catch up. To the server, the application will appear to be handling the messages and consider the connection healthy. Most client libraries will notify the application that there is a SlowConsumer error and discard messages.
Receiving and dropping messages from the server keeps the connection to the server healthy, but creates an application requirement. There are several common patterns:
- Use request/reply to throttle the sender and prevent overloading the subscriber
- Use a queue with multiple subscribers splitting the work
- Persist messages with something like NATS streaming
Libraries that cache incoming messages may provide two controls on the incoming queue, or pending messages. These are useful if the problem is bursty publishers and not a continuous performance mismatch. Disabling these limits can be dangerous in production and although setting these limits to 0 may help find problems, it is also a dangerous proposition in production.
The incoming cache is usually per subscriber, but again, check the specific documentation for your client library.
The first way that the incoming queue can be limited is by message count. The second way to limit the incoming queue is by total size. For example, to limit the incoming cache to 1,000 messages or 5mb whichever comes first:
Java
Dispatcher d = nc.createDispatcher((msg) -> {
// do something
});
d.subscribe("updates");
d.setPendingLimits(1_000, 5 * 1024 * 1024); // Set limits on a dispatcher
// Subscribe
Subscription sub = nc.subscribe("updates");
sub.setPendingLimits(1_000, 5 * 1024 * 1024); // Set limits on a subscription
// Do something
// Close the connection
nc.close();
JavaScript
// slow pending limits are not configurable on node-nats
Python
nc = NATS()
await nc.connect(servers=["nats://demo.nats.io:4222"])
future = asyncio.Future()
async def cb(msg):
nonlocal future
future.set_result(msg)
# Set limits of 1000 messages or 5MB
await nc.subscribe("updates", cb=cb, pending_bytes_limit=5*1024*1024, pending_msgs_limit=1000)
Ruby
TypeScript
// slow pending limits are not configurable on TypeScript NATS client.
C
natsConnection *conn = NULL;
natsSubscription *sub1 = NULL;
natsSubscription *sub2 = NULL;
natsStatus s = NATS_OK;
s = natsConnection_ConnectTo(&conn, NATS_DEFAULT_URL);
// Subscribe
s = natsConnection_Subscribe(&sub1, conn, "updates", onMsg, NULL);
// Set limits of 1000 messages or 5MB, whichever comes first
s = natsSubscription_SetPendingLimits(sub1, 1000, 5*1024*1024);
// Subscribe
if (s == NATS_OK)
s = natsConnection_Subscribe(&sub2, conn, "updates", onMsg, NULL);
// Set no limits for this subscription
if (s == NATS_OK)
s = natsSubscription_SetPendingLimits(sub2, -1, -1);
(...)
// Destroy objects that were created
natsSubscription_Destroy(sub1);
natsSubscription_Destroy(sub2);
natsConnection_Destroy(conn);
Detect a Slow Consumer and Check for Dropped Messages
Some libraries, like Java, will not send this notification on every dropped message because that could be noisy. Rather the notification may be sent once per time the subscriber gets behind. Libraries may also provide a way to get a count of dropped messages so that applications can at least detect a problem is occurring.
Go
// Set the callback that will be invoked when an asynchronous error occurs.
nc, err := nats.Connect("demo.nats.io", nats.ErrorHandler(logSlowConsumer))
if err != nil {
log.Fatal(err)
}
defer nc.Close()
// Do something with the connection
Java
JavaScript
// slow consumer detection is not configurable on NATS JavaScript client.
Python
nc = NATS()
async def error_cb(e):
if type(e) is nats.aio.errors.ErrSlowConsumer:
print("Slow consumer error, unsubscribing from handling further messages...")
await nc.unsubscribe(e.sid)
await nc.connect(
servers=["nats://demo.nats.io:4222"],
error_cb=error_cb,
)
msgs = []
future = asyncio.Future()
async def cb(msg):
nonlocal msgs
nonlocal future
msgs.append(msg)
if len(msgs) == 3:
# Head of line blocking on other messages caused
# by single message processing taking too long...
await asyncio.sleep(1)
await nc.subscribe("updates", cb=cb, pending_msgs_limit=5)
for i in range(0, 10):
await nc.publish("updates", "msg #{}".format(i).encode())
await asyncio.sleep(0)
try:
await asyncio.wait_for(future, 1)
except asyncio.TimeoutError:
pass
for msg in msgs:
print("[Received]", msg)
await nc.close()
Ruby
# The Ruby NATS client currently does not have option to customize slow consumer limits per sub.
C
static void
errorCB(natsConnection *conn, natsSubscription *sub, natsStatus s, void *closure)
{
// Do something
printf("Error: %d - %s", s, natsStatus_GetText(s));
}
(...)
natsConnection *conn = NULL;
natsOptions *opts = NULL;
natsStatus s = NATS_OK;
s = natsOptions_Create(&opts);
if (s == NATS_OK)
s = natsOptions_SetErrorHandler(opts, errorCB, NULL);
if (s == NATS_OK)
s = natsConnection_Connect(&conn, opts);
(...)
// Destroy objects that were created
natsOptions_Destroy(opts);