Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 26 additions & 4 deletions crates/adapters/src/transport/kafka/ft/input.rs
Original file line number Diff line number Diff line change
Expand Up @@ -394,8 +394,9 @@ impl KafkaFtInputReaderInner {
.split_partition_queue(topic, *partition)
.ok_or_else(|| anyhow!("could not split queue for partition {partition}"))?;

let unparker = thread.parker.unparker().clone();
queue.set_nonempty_callback({
let unparker = thread.parker.unparker().clone();
let unparker = unparker.clone();
move || unparker.unpark()
});

Expand All @@ -404,6 +405,7 @@ impl KafkaFtInputReaderInner {
queue,
next_offset,
&config,
unparker,
));
receivers.insert(partition, receiver.clone());
thread.receivers.push(receiver);
Expand Down Expand Up @@ -981,6 +983,12 @@ fn update_backpressure(topic: &str, partition: i32, has_backpressure: bool) {
#[cfg(not(test))]
fn update_backpressure(_topic: &str, _partition: i32, _has_backpressure: bool) {}

/// Returns true if a partition queue that holds `n_bytes` should pause for
/// backpressure.
fn needs_backpressure(n_bytes: usize) -> bool {
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

will this become some kind of configuration parameter?

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't see value in that yet.

n_bytes >= 1_000_000
}

struct PartitionReceiver {
partition: i32,
queue: PartitionQueue<KafkaFtInputContext>,
Expand Down Expand Up @@ -1016,6 +1024,9 @@ struct PartitionReceiver {
/// lock.
n_bytes: AtomicUsize,

/// Wakes up the [RecvThread] that receives into this partition.
unparker: Unparker,

eof: AtomicBool,
fatal_error: AtomicBool,
}
Expand All @@ -1026,6 +1037,7 @@ impl PartitionReceiver {
queue: PartitionQueue<KafkaFtInputContext>,
next_offset: i64,
config: &KafkaInputConfig,
unparker: Unparker,
) -> Self {
let metadata_requested = config.metadata_requested();

Expand All @@ -1041,6 +1053,7 @@ impl PartitionReceiver {
fatal_error: AtomicBool::new(false),
config: config.clone(),
metadata_requested,
unparker,
}
}

Expand All @@ -1050,8 +1063,17 @@ impl PartitionReceiver {
match messages.first_key_value() {
Some((offset, _)) if *offset <= max => {
let (offset, (buffer, timestamp)) = messages.pop_first().unwrap();
self.n_bytes
.fetch_sub(buffer.len().bytes, Ordering::Relaxed);

// Account the subtraction of the buffer from `self.n_bytes`.
// If that releases backpressure, then wake up its receiver
// thread.
let buffer_len = buffer.len().bytes;
let old_nbytes = self.n_bytes.fetch_sub(buffer_len, Ordering::Relaxed);
let new_bytes = old_nbytes - buffer_len;
if needs_backpressure(old_nbytes) && !needs_backpressure(new_bytes) {
self.unparker.unpark();
}

Some((offset, (buffer, timestamp)))
}
_ => None,
Expand Down Expand Up @@ -1204,7 +1226,7 @@ impl PartitionReceiver {
// be dequeued quicker than we can start queuing them again.
//
// [1]: https://github.com/confluentinc/librdkafka/wiki/FAQ#what-are-partition-queues-and-why-are-some-partitions-slower-than-others
let backpressure = self.n_bytes.load(Ordering::Relaxed) >= 1_000_000;
let backpressure = needs_backpressure(self.n_bytes.load(Ordering::Relaxed));
#[cfg(test)]
let backpressure = backpressure || self.messages.lock().unwrap().len() >= 1000;
update_backpressure(&self.config.topic, self.partition, backpressure);
Expand Down
Loading