Skip to main content

laminar_connectors/kafka/
sink_config.rs

1//! Kafka sink connector configuration.
2//!
3//! [`KafkaSinkConfig`] encapsulates all tuning knobs for the Kafka producer,
4//! parsed from a SQL `WITH (...)` clause via [`ConnectorConfig`].
5
6use std::collections::HashMap;
7use std::time::Duration;
8
9use rdkafka::ClientConfig;
10
11use crate::config::ConnectorConfig;
12use crate::error::ConnectorError;
13use crate::kafka::config::{CompatibilityLevel, SaslMechanism, SecurityProtocol, SrAuth};
14use crate::serde::Format;
15
16/// Configuration for the Kafka Sink Connector.
17///
18/// Parsed from SQL `WITH (...)` clause options.
19///
20/// Uses a custom `Debug` impl that redacts `sasl_password` and
21/// `ssl_key_password` to prevent credential leakage in logs.
22#[derive(Clone)]
23pub struct KafkaSinkConfig {
24    /// Kafka broker addresses (comma-separated).
25    pub bootstrap_servers: String,
26    /// Target Kafka topic name.
27    pub topic: String,
28    /// Security protocol for broker connections.
29    pub security_protocol: SecurityProtocol,
30    /// SASL authentication mechanism.
31    pub sasl_mechanism: Option<SaslMechanism>,
32    /// SASL username (for PLAIN, SCRAM-SHA-256, SCRAM-SHA-512).
33    pub sasl_username: Option<String>,
34    /// SASL password (for PLAIN, SCRAM-SHA-256, SCRAM-SHA-512).
35    pub sasl_password: Option<String>,
36    /// Path to SSL CA certificate file (PEM format).
37    pub ssl_ca_location: Option<String>,
38    /// Path to client SSL certificate file (PEM format).
39    pub ssl_certificate_location: Option<String>,
40    /// Path to client SSL private key file (PEM format).
41    pub ssl_key_location: Option<String>,
42    /// Password for encrypted SSL private key.
43    pub ssl_key_password: Option<String>,
44    /// Serialization format.
45    pub format: Format,
46    /// Schema Registry URL for Avro/Protobuf.
47    pub schema_registry_url: Option<String>,
48    /// Schema Registry authentication.
49    pub schema_registry_auth: Option<SrAuth>,
50    /// Schema compatibility level override.
51    pub schema_compatibility: Option<CompatibilityLevel>,
52    /// Schema Registry SSL CA certificate path.
53    pub schema_registry_ssl_ca_location: Option<String>,
54    /// Delivery guarantee level.
55    pub delivery_guarantee: DeliveryGuarantee,
56    /// Transactional ID prefix for exactly-once.
57    ///
58    // TODO(distributed): embed the lease epoch here so a new lease holder
59    // fences the previous one via Kafka's producer epoch.
60    pub transactional_id: Option<String>,
61    /// Transaction timeout.
62    pub transaction_timeout: Duration,
63    /// Acknowledgment level.
64    pub acks: Acks,
65    /// Maximum number of in-flight requests per connection.
66    pub max_in_flight: usize,
67    /// Maximum time to wait for delivery confirmation.
68    pub delivery_timeout: Duration,
69    /// Key column name for partitioning.
70    pub key_column: Option<String>,
71    /// Partitioning strategy.
72    pub partitioner: PartitionStrategy,
73    /// Maximum time to wait before sending a batch (milliseconds).
74    pub linger_ms: u64,
75    /// Maximum batch size in bytes.
76    pub batch_size: usize,
77    /// Maximum number of messages per batch.
78    pub batch_num_messages: Option<usize>,
79    /// Compression algorithm.
80    pub compression: CompressionType,
81    /// Dead letter queue topic for failed records.
82    pub dlq_topic: Option<String>,
83    /// Maximum records to buffer before flushing.
84    pub flush_batch_size: usize,
85    /// Additional rdkafka client properties (pass-through).
86    pub kafka_properties: HashMap<String, String>,
87}
88
89impl std::fmt::Debug for KafkaSinkConfig {
90    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
91        f.debug_struct("KafkaSinkConfig")
92            .field("bootstrap_servers", &self.bootstrap_servers)
93            .field("topic", &self.topic)
94            .field("format", &self.format)
95            .field("delivery_guarantee", &self.delivery_guarantee)
96            .field("security_protocol", &self.security_protocol)
97            .field("sasl_mechanism", &self.sasl_mechanism)
98            .field("sasl_password", &self.sasl_password.as_ref().map(|_| "***"))
99            .field(
100                "ssl_key_password",
101                &self.ssl_key_password.as_ref().map(|_| "***"),
102            )
103            .field("partitioner", &self.partitioner)
104            .field("acks", &self.acks)
105            .finish_non_exhaustive()
106    }
107}
108
109impl Default for KafkaSinkConfig {
110    fn default() -> Self {
111        Self {
112            bootstrap_servers: String::new(),
113            topic: String::new(),
114            security_protocol: SecurityProtocol::default(),
115            sasl_mechanism: None,
116            sasl_username: None,
117            sasl_password: None,
118            ssl_ca_location: None,
119            ssl_certificate_location: None,
120            ssl_key_location: None,
121            ssl_key_password: None,
122            format: Format::Json,
123            schema_registry_url: None,
124            schema_registry_auth: None,
125            schema_compatibility: None,
126            schema_registry_ssl_ca_location: None,
127            delivery_guarantee: DeliveryGuarantee::AtLeastOnce,
128            transactional_id: None,
129            transaction_timeout: Duration::from_secs(60),
130            acks: Acks::All,
131            max_in_flight: 5,
132            delivery_timeout: Duration::from_secs(120),
133            key_column: None,
134            partitioner: PartitionStrategy::KeyHash,
135            linger_ms: 5,
136            batch_size: 16_384,
137            batch_num_messages: None,
138            compression: CompressionType::None,
139            dlq_topic: None,
140            flush_batch_size: 1_000,
141            kafka_properties: HashMap::new(),
142        }
143    }
144}
145
146impl KafkaSinkConfig {
147    /// Parses a sink config from a [`ConnectorConfig`] (SQL WITH clause).
148    ///
149    /// # Errors
150    ///
151    /// Returns `ConnectorError::MissingConfig` if required keys are absent,
152    /// or `ConnectorError::ConfigurationError` on invalid values.
153    #[allow(clippy::too_many_lines, clippy::field_reassign_with_default)]
154    pub fn from_config(config: &ConnectorConfig) -> Result<Self, ConnectorError> {
155        let mut cfg = Self::default();
156
157        cfg.bootstrap_servers = config
158            .get("bootstrap.servers")
159            .ok_or_else(|| ConnectorError::missing_config("bootstrap.servers"))?
160            .to_string();
161
162        cfg.topic = config
163            .get("topic")
164            .ok_or_else(|| ConnectorError::missing_config("topic"))?
165            .to_string();
166
167        if let Some(s) = config.get("security.protocol") {
168            cfg.security_protocol = s.parse()?;
169        }
170
171        if let Some(s) = config.get("sasl.mechanism") {
172            cfg.sasl_mechanism = Some(s.parse()?);
173        }
174
175        cfg.sasl_username = config.get("sasl.username").map(String::from);
176        cfg.sasl_password = config.get("sasl.password").map(String::from);
177        cfg.ssl_ca_location = config.get("ssl.ca.location").map(String::from);
178        cfg.ssl_certificate_location = config.get("ssl.certificate.location").map(String::from);
179        cfg.ssl_key_location = config.get("ssl.key.location").map(String::from);
180        cfg.ssl_key_password = config.get("ssl.key.password").map(String::from);
181
182        if let Some(fmt) = config.get("format") {
183            cfg.format = fmt.parse().map_err(ConnectorError::Serde)?;
184        }
185
186        cfg.schema_registry_url = config.get("schema.registry.url").map(String::from);
187
188        let sr_user = config.get("schema.registry.username");
189        let sr_pass = config.get("schema.registry.password");
190        if let (Some(user), Some(pass)) = (sr_user, sr_pass) {
191            cfg.schema_registry_auth = Some(SrAuth {
192                username: user.to_string(),
193                password: pass.to_string(),
194            });
195        }
196
197        if let Some(c) = config.get("schema.compatibility") {
198            cfg.schema_compatibility = Some(c.parse().map_err(|_| {
199                ConnectorError::ConfigurationError(format!("invalid schema.compatibility: '{c}'"))
200            })?);
201        }
202
203        cfg.schema_registry_ssl_ca_location = config
204            .get("schema.registry.ssl.ca.location")
205            .map(String::from);
206
207        if let Some(dg) = config.get("delivery.guarantee") {
208            cfg.delivery_guarantee = dg.parse().map_err(|_| {
209                ConnectorError::ConfigurationError(format!(
210                    "invalid delivery.guarantee: '{dg}' (expected 'at-least-once' or 'exactly-once')"
211                ))
212            })?;
213        }
214
215        cfg.transactional_id = config.get("transactional.id").map(String::from);
216
217        if let Some(v) = config.get("transaction.timeout.ms") {
218            let ms: u64 = v.parse().map_err(|_| {
219                ConnectorError::ConfigurationError(format!("invalid transaction.timeout.ms: '{v}'"))
220            })?;
221            cfg.transaction_timeout = Duration::from_millis(ms);
222        }
223
224        if let Some(a) = config.get("acks") {
225            cfg.acks = a.parse().map_err(|_| {
226                ConnectorError::ConfigurationError(format!(
227                    "invalid acks: '{a}' (expected 'all', '1', or '0')"
228                ))
229            })?;
230        }
231
232        if let Some(v) = config.get("max.in.flight.requests") {
233            cfg.max_in_flight = v.parse().map_err(|_| {
234                ConnectorError::ConfigurationError(format!("invalid max.in.flight.requests: '{v}'"))
235            })?;
236        }
237
238        if let Some(v) = config.get("delivery.timeout.ms") {
239            let ms: u64 = v.parse().map_err(|_| {
240                ConnectorError::ConfigurationError(format!("invalid delivery.timeout.ms: '{v}'"))
241            })?;
242            cfg.delivery_timeout = Duration::from_millis(ms);
243        }
244
245        cfg.key_column = config.get("key.column").map(String::from);
246
247        if let Some(p) = config.get("partitioner") {
248            cfg.partitioner = p.parse().map_err(|_| {
249                ConnectorError::ConfigurationError(format!(
250                    "invalid partitioner: '{p}' (expected 'key-hash', 'round-robin', or 'sticky')"
251                ))
252            })?;
253        }
254
255        if let Some(v) = config.get("linger.ms") {
256            cfg.linger_ms = v.parse().map_err(|_| {
257                ConnectorError::ConfigurationError(format!("invalid linger.ms: '{v}'"))
258            })?;
259        }
260
261        if let Some(v) = config.get("batch.size") {
262            cfg.batch_size = v.parse().map_err(|_| {
263                ConnectorError::ConfigurationError(format!("invalid batch.size: '{v}'"))
264            })?;
265        }
266
267        if let Some(v) = config.get("batch.num.messages") {
268            cfg.batch_num_messages = Some(v.parse().map_err(|_| {
269                ConnectorError::ConfigurationError(format!("invalid batch.num.messages: '{v}'"))
270            })?);
271        }
272
273        if let Some(c) = config.get("compression.type") {
274            cfg.compression = c.parse().map_err(|_| {
275                ConnectorError::ConfigurationError(format!("invalid compression.type: '{c}'"))
276            })?;
277        }
278
279        cfg.dlq_topic = config.get("dlq.topic").map(String::from);
280
281        if let Some(v) = config.get("flush.batch.size") {
282            cfg.flush_batch_size = v.parse().map_err(|_| {
283                ConnectorError::ConfigurationError(format!("invalid flush.batch.size: '{v}'"))
284            })?;
285        }
286
287        for (key, value) in config.properties_with_prefix("kafka.") {
288            cfg.kafka_properties.insert(key, value);
289        }
290
291        cfg.validate()?;
292        Ok(cfg)
293    }
294
295    /// Validates the configuration.
296    ///
297    /// # Errors
298    ///
299    /// Returns `ConnectorError::ConfigurationError` on invalid combinations.
300    pub fn validate(&self) -> Result<(), ConnectorError> {
301        if self.bootstrap_servers.is_empty() {
302            return Err(ConnectorError::missing_config("bootstrap.servers"));
303        }
304        if self.topic.is_empty() {
305            return Err(ConnectorError::missing_config("topic"));
306        }
307
308        if self.security_protocol.uses_sasl() && self.sasl_mechanism.is_none() {
309            return Err(ConnectorError::ConfigurationError(
310                "sasl.mechanism is required when security.protocol is sasl_plaintext or sasl_ssl"
311                    .into(),
312            ));
313        }
314
315        if let Some(mechanism) = &self.sasl_mechanism {
316            if mechanism.requires_credentials()
317                && (self.sasl_username.is_none() || self.sasl_password.is_none())
318            {
319                return Err(ConnectorError::ConfigurationError(format!(
320                    "sasl.username and sasl.password are required for {mechanism} mechanism"
321                )));
322            }
323        }
324
325        if self.security_protocol.uses_ssl() {
326            if let Some(ref ca) = self.ssl_ca_location {
327                if ca.is_empty() {
328                    return Err(ConnectorError::ConfigurationError(
329                        "ssl.ca.location cannot be empty when specified".into(),
330                    ));
331                }
332            }
333        }
334
335        if self.format == Format::Debezium {
336            return Err(ConnectorError::ConfigurationError(
337                "Debezium is a deserialization-only format and cannot be used for sinks".into(),
338            ));
339        }
340
341        if self.format == Format::Avro && self.schema_registry_url.is_none() {
342            return Err(ConnectorError::ConfigurationError(
343                "Avro format requires 'schema.registry.url'".into(),
344            ));
345        }
346
347        if self.max_in_flight == 0 {
348            return Err(ConnectorError::ConfigurationError(
349                "max.in.flight.requests must be > 0".into(),
350            ));
351        }
352
353        if self.delivery_guarantee == DeliveryGuarantee::ExactlyOnce && self.max_in_flight > 5 {
354            return Err(ConnectorError::ConfigurationError(
355                "exactly-once requires max.in.flight.requests <= 5".into(),
356            ));
357        }
358
359        Ok(())
360    }
361
362    /// Builds an rdkafka [`ClientConfig`] from this configuration.
363    ///
364    /// Always sets `enable.idempotence=true`. For exactly-once delivery,
365    /// also sets `transactional.id` and `transaction.timeout.ms`.
366    #[must_use]
367    pub fn to_rdkafka_config(&self) -> ClientConfig {
368        let mut config = ClientConfig::new();
369
370        config.set("bootstrap.servers", &self.bootstrap_servers);
371        config.set("security.protocol", self.security_protocol.as_rdkafka_str());
372
373        if let Some(ref mechanism) = self.sasl_mechanism {
374            config.set("sasl.mechanism", mechanism.as_rdkafka_str());
375        }
376
377        if let Some(ref username) = self.sasl_username {
378            config.set("sasl.username", username);
379        }
380
381        if let Some(ref password) = self.sasl_password {
382            config.set("sasl.password", password);
383        }
384
385        if let Some(ref ca) = self.ssl_ca_location {
386            config.set("ssl.ca.location", ca);
387        }
388
389        if let Some(ref cert) = self.ssl_certificate_location {
390            config.set("ssl.certificate.location", cert);
391        }
392
393        if let Some(ref key) = self.ssl_key_location {
394            config.set("ssl.key.location", key);
395        }
396
397        if let Some(ref key_pass) = self.ssl_key_password {
398            config.set("ssl.key.password", key_pass);
399        }
400
401        config
402            .set("enable.idempotence", "true")
403            .set("acks", self.acks.as_rdkafka_str())
404            .set("linger.ms", self.linger_ms.to_string())
405            .set("batch.size", self.batch_size.to_string())
406            .set("compression.type", self.compression.as_rdkafka_str())
407            .set(
408                "max.in.flight.requests.per.connection",
409                self.max_in_flight.to_string(),
410            )
411            .set(
412                "message.timeout.ms",
413                self.delivery_timeout.as_millis().to_string(),
414            );
415
416        if let Some(num_msgs) = self.batch_num_messages {
417            config.set("batch.num.messages", num_msgs.to_string());
418        }
419
420        if self.delivery_guarantee == DeliveryGuarantee::ExactlyOnce {
421            let txn_id = self
422                .transactional_id
423                .clone()
424                .unwrap_or_else(|| format!("laminardb-sink-{}", self.topic));
425            config.set("transactional.id", txn_id);
426            config.set(
427                "transaction.timeout.ms",
428                self.transaction_timeout.as_millis().to_string(),
429            );
430        }
431
432        // Apply pass-through properties, blocking security-critical keys
433        // that could silently downgrade authentication or break semantics.
434        for (key, value) in &self.kafka_properties {
435            if is_blocked_passthrough_key(key) {
436                tracing::warn!(
437                    key,
438                    "ignoring kafka.* pass-through property that overrides a security setting"
439                );
440                continue;
441            }
442            config.set(key, value);
443        }
444
445        config
446    }
447
448    /// Builds an rdkafka [`ClientConfig`] for the dead letter queue producer.
449    ///
450    /// Inherits security settings (SASL, SSL) from the main config but is
451    /// non-transactional. Does not set `transactional.id`.
452    #[must_use]
453    pub fn to_dlq_rdkafka_config(&self) -> ClientConfig {
454        let mut config = ClientConfig::new();
455
456        config.set("bootstrap.servers", &self.bootstrap_servers);
457        config.set("security.protocol", self.security_protocol.as_rdkafka_str());
458
459        if let Some(ref mechanism) = self.sasl_mechanism {
460            config.set("sasl.mechanism", mechanism.as_rdkafka_str());
461        }
462        if let Some(ref username) = self.sasl_username {
463            config.set("sasl.username", username);
464        }
465        if let Some(ref password) = self.sasl_password {
466            config.set("sasl.password", password);
467        }
468        if let Some(ref ca) = self.ssl_ca_location {
469            config.set("ssl.ca.location", ca);
470        }
471        if let Some(ref cert) = self.ssl_certificate_location {
472            config.set("ssl.certificate.location", cert);
473        }
474        if let Some(ref key) = self.ssl_key_location {
475            config.set("ssl.key.location", key);
476        }
477        if let Some(ref key_pass) = self.ssl_key_password {
478            config.set("ssl.key.password", key_pass);
479        }
480
481        config.set("enable.idempotence", "true");
482
483        config
484    }
485}
486
487/// Returns `true` if a pass-through kafka.* key must not override explicit settings.
488fn is_blocked_passthrough_key(key: &str) -> bool {
489    key.starts_with("sasl.kerberos.")
490        || matches!(
491            key,
492            "security.protocol"
493                | "sasl.mechanism"
494                | "sasl.username"
495                | "sasl.password"
496                | "sasl.oauthbearer.config"
497                | "ssl.ca.location"
498                | "ssl.certificate.location"
499                | "ssl.key.location"
500                | "ssl.key.password"
501                | "ssl.endpoint.identification.algorithm"
502                | "enable.auto.commit"
503                | "enable.idempotence"
504                | "transactional.id"
505        )
506}
507
508pub use crate::connector::DeliveryGuarantee;
509
510/// Partitioning strategy for distributing records across Kafka partitions.
511#[derive(Debug, Clone, Copy, PartialEq, Eq)]
512pub enum PartitionStrategy {
513    /// Hash the key column (Murmur2, Kafka-compatible).
514    KeyHash,
515    /// Round-robin across all partitions.
516    RoundRobin,
517    /// Sticky: batch records to the same partition until full.
518    Sticky,
519}
520
521str_enum!(PartitionStrategy, lowercase_udash, String, "unknown partition strategy",
522    KeyHash => "key-hash", "keyhash", "hash";
523    RoundRobin => "round-robin", "roundrobin";
524    Sticky => "sticky"
525);
526
527/// Compression type for produced Kafka messages.
528#[derive(Debug, Clone, Copy, PartialEq, Eq)]
529pub enum CompressionType {
530    /// No compression.
531    None,
532    /// Gzip compression.
533    Gzip,
534    /// Snappy compression.
535    Snappy,
536    /// LZ4 compression.
537    Lz4,
538    /// Zstandard compression.
539    Zstd,
540}
541
542impl CompressionType {
543    /// Returns the rdkafka configuration string.
544    #[must_use]
545    pub fn as_rdkafka_str(&self) -> &'static str {
546        match self {
547            Self::None => "none",
548            Self::Gzip => "gzip",
549            Self::Snappy => "snappy",
550            Self::Lz4 => "lz4",
551            Self::Zstd => "zstd",
552        }
553    }
554}
555
556str_enum!(fromstr CompressionType, lowercase_nodash, String, "unknown compression type",
557    None => "none";
558    Gzip => "gzip";
559    Snappy => "snappy";
560    Lz4 => "lz4";
561    Zstd => "zstd", "zstandard"
562);
563
564impl std::fmt::Display for CompressionType {
565    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
566        write!(f, "{}", self.as_rdkafka_str())
567    }
568}
569
570/// Acknowledgment level for Kafka producer.
571#[derive(Debug, Clone, Copy, PartialEq, Eq)]
572pub enum Acks {
573    /// No acknowledgment (fire-and-forget).
574    None,
575    /// Leader acknowledgment only.
576    Leader,
577    /// All in-sync replica acknowledgment.
578    All,
579}
580
581impl Acks {
582    /// Returns the rdkafka configuration string.
583    #[must_use]
584    pub fn as_rdkafka_str(&self) -> &'static str {
585        match self {
586            Self::None => "0",
587            Self::Leader => "1",
588            Self::All => "all",
589        }
590    }
591}
592
593str_enum!(fromstr Acks, lowercase_nodash, String, "unknown acks value",
594    None => "0", "none";
595    Leader => "1", "leader";
596    All => "-1", "all"
597);
598
599impl std::fmt::Display for Acks {
600    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
601        write!(f, "{}", self.as_rdkafka_str())
602    }
603}
604
605#[cfg(test)]
606mod tests {
607    use super::*;
608
609    fn make_config(pairs: &[(&str, &str)]) -> ConnectorConfig {
610        let mut config = ConnectorConfig::new("kafka");
611        for (k, v) in pairs {
612            config.set(*k, *v);
613        }
614        config
615    }
616
617    fn required_pairs() -> Vec<(&'static str, &'static str)> {
618        vec![
619            ("bootstrap.servers", "localhost:9092"),
620            ("topic", "output-events"),
621        ]
622    }
623
624    #[test]
625    fn test_parse_required_fields() {
626        let config = make_config(&required_pairs());
627        let cfg = KafkaSinkConfig::from_config(&config).unwrap();
628        assert_eq!(cfg.bootstrap_servers, "localhost:9092");
629        assert_eq!(cfg.topic, "output-events");
630        assert_eq!(cfg.delivery_guarantee, DeliveryGuarantee::AtLeastOnce);
631        assert_eq!(cfg.format, Format::Json);
632        assert_eq!(cfg.security_protocol, SecurityProtocol::Plaintext);
633    }
634
635    #[test]
636    fn test_missing_bootstrap_servers() {
637        let config = make_config(&[("topic", "t")]);
638        assert!(KafkaSinkConfig::from_config(&config).is_err());
639    }
640
641    #[test]
642    fn test_missing_topic() {
643        let config = make_config(&[("bootstrap.servers", "b:9092")]);
644        assert!(KafkaSinkConfig::from_config(&config).is_err());
645    }
646
647    #[test]
648    fn test_parse_delivery_guarantee() {
649        let mut pairs = required_pairs();
650        pairs.push(("delivery.guarantee", "exactly-once"));
651        let config = make_config(&pairs);
652        let cfg = KafkaSinkConfig::from_config(&config).unwrap();
653        assert_eq!(cfg.delivery_guarantee, DeliveryGuarantee::ExactlyOnce);
654    }
655
656    #[test]
657    fn test_parse_security_sasl_ssl() {
658        let mut pairs = required_pairs();
659        pairs.extend_from_slice(&[
660            ("security.protocol", "sasl_ssl"),
661            ("sasl.mechanism", "SCRAM-SHA-512"),
662            ("sasl.username", "producer"),
663            ("sasl.password", "secret123"),
664            ("ssl.ca.location", "/etc/ssl/ca.pem"),
665        ]);
666        let config = make_config(&pairs);
667        let cfg = KafkaSinkConfig::from_config(&config).unwrap();
668
669        assert_eq!(cfg.security_protocol, SecurityProtocol::SaslSsl);
670        assert_eq!(cfg.sasl_mechanism, Some(SaslMechanism::ScramSha512));
671        assert_eq!(cfg.sasl_username, Some("producer".to_string()));
672        assert_eq!(cfg.sasl_password, Some("secret123".to_string()));
673        assert_eq!(cfg.ssl_ca_location, Some("/etc/ssl/ca.pem".to_string()));
674    }
675
676    #[test]
677    fn test_parse_security_ssl_only() {
678        let mut pairs = required_pairs();
679        pairs.extend_from_slice(&[
680            ("security.protocol", "ssl"),
681            ("ssl.ca.location", "/etc/ssl/ca.pem"),
682            ("ssl.certificate.location", "/etc/ssl/client.pem"),
683            ("ssl.key.location", "/etc/ssl/client.key"),
684            ("ssl.key.password", "keypass"),
685        ]);
686        let config = make_config(&pairs);
687        let cfg = KafkaSinkConfig::from_config(&config).unwrap();
688
689        assert_eq!(cfg.security_protocol, SecurityProtocol::Ssl);
690        assert_eq!(cfg.ssl_ca_location, Some("/etc/ssl/ca.pem".to_string()));
691        assert_eq!(
692            cfg.ssl_certificate_location,
693            Some("/etc/ssl/client.pem".to_string())
694        );
695        assert_eq!(
696            cfg.ssl_key_location,
697            Some("/etc/ssl/client.key".to_string())
698        );
699        assert_eq!(cfg.ssl_key_password, Some("keypass".to_string()));
700    }
701
702    #[test]
703    fn test_parse_all_optional_fields() {
704        let mut pairs = required_pairs();
705        pairs.extend_from_slice(&[
706            ("format", "avro"),
707            ("delivery.guarantee", "exactly-once"),
708            ("transactional.id", "my-txn"),
709            ("transaction.timeout.ms", "30000"),
710            ("key.column", "order_id"),
711            ("partitioner", "round-robin"),
712            ("linger.ms", "10"),
713            ("batch.size", "32768"),
714            ("batch.num.messages", "5000"),
715            ("compression.type", "zstd"),
716            ("acks", "1"),
717            ("max.in.flight.requests", "3"),
718            ("delivery.timeout.ms", "60000"),
719            ("dlq.topic", "my-dlq"),
720            ("flush.batch.size", "500"),
721            ("schema.registry.url", "http://sr:8081"),
722            ("schema.registry.username", "user"),
723            ("schema.registry.password", "pass"),
724            ("schema.registry.ssl.ca.location", "/etc/ssl/sr-ca.pem"),
725        ]);
726        let config = make_config(&pairs);
727        let cfg = KafkaSinkConfig::from_config(&config).unwrap();
728
729        assert_eq!(cfg.format, Format::Avro);
730        assert_eq!(cfg.delivery_guarantee, DeliveryGuarantee::ExactlyOnce);
731        assert_eq!(cfg.transactional_id.as_deref(), Some("my-txn"));
732        assert_eq!(cfg.transaction_timeout, Duration::from_secs(30));
733        assert_eq!(cfg.key_column.as_deref(), Some("order_id"));
734        assert_eq!(cfg.partitioner, PartitionStrategy::RoundRobin);
735        assert_eq!(cfg.linger_ms, 10);
736        assert_eq!(cfg.batch_size, 32_768);
737        assert_eq!(cfg.batch_num_messages, Some(5000));
738        assert_eq!(cfg.compression, CompressionType::Zstd);
739        assert_eq!(cfg.acks, Acks::Leader);
740        assert_eq!(cfg.max_in_flight, 3);
741        assert_eq!(cfg.delivery_timeout, Duration::from_secs(60));
742        assert_eq!(cfg.dlq_topic.as_deref(), Some("my-dlq"));
743        assert_eq!(cfg.flush_batch_size, 500);
744        assert_eq!(cfg.schema_registry_url.as_deref(), Some("http://sr:8081"));
745        assert!(cfg.schema_registry_auth.is_some());
746        assert_eq!(
747            cfg.schema_registry_ssl_ca_location,
748            Some("/etc/ssl/sr-ca.pem".to_string())
749        );
750    }
751
752    #[test]
753    fn test_validate_avro_requires_sr() {
754        let mut cfg = KafkaSinkConfig::default();
755        cfg.bootstrap_servers = "b:9092".into();
756        cfg.topic = "t".into();
757        cfg.format = Format::Avro;
758        assert!(cfg.validate().is_err());
759    }
760
761    #[test]
762    fn test_validate_exactly_once_max_in_flight() {
763        let mut cfg = KafkaSinkConfig::default();
764        cfg.bootstrap_servers = "b:9092".into();
765        cfg.topic = "t".into();
766        cfg.delivery_guarantee = DeliveryGuarantee::ExactlyOnce;
767        cfg.max_in_flight = 10;
768        assert!(cfg.validate().is_err());
769    }
770
771    #[test]
772    fn test_validate_sasl_without_mechanism() {
773        let mut cfg = KafkaSinkConfig::default();
774        cfg.bootstrap_servers = "b:9092".into();
775        cfg.topic = "t".into();
776        cfg.security_protocol = SecurityProtocol::SaslSsl;
777        // sasl_mechanism not set
778        assert!(cfg.validate().is_err());
779    }
780
781    #[test]
782    fn test_validate_sasl_plain_without_credentials() {
783        let mut cfg = KafkaSinkConfig::default();
784        cfg.bootstrap_servers = "b:9092".into();
785        cfg.topic = "t".into();
786        cfg.security_protocol = SecurityProtocol::SaslPlaintext;
787        cfg.sasl_mechanism = Some(SaslMechanism::ScramSha256);
788        // username/password not set
789        assert!(cfg.validate().is_err());
790    }
791
792    #[test]
793    fn test_rdkafka_config_at_least_once() {
794        let mut cfg = KafkaSinkConfig::default();
795        cfg.bootstrap_servers = "b:9092".into();
796        cfg.topic = "t".into();
797        let rdk = cfg.to_rdkafka_config();
798        assert_eq!(rdk.get("enable.idempotence"), Some("true"));
799        assert!(rdk.get("transactional.id").is_none());
800        assert_eq!(rdk.get("security.protocol"), Some("plaintext"));
801    }
802
803    #[test]
804    fn test_rdkafka_config_exactly_once() {
805        let mut cfg = KafkaSinkConfig::default();
806        cfg.bootstrap_servers = "b:9092".into();
807        cfg.topic = "t".into();
808        cfg.delivery_guarantee = DeliveryGuarantee::ExactlyOnce;
809        let rdk = cfg.to_rdkafka_config();
810        assert_eq!(rdk.get("enable.idempotence"), Some("true"));
811        assert!(rdk.get("transactional.id").is_some());
812    }
813
814    #[test]
815    fn test_rdkafka_config_with_security() {
816        let mut cfg = KafkaSinkConfig::default();
817        cfg.bootstrap_servers = "b:9092".into();
818        cfg.topic = "t".into();
819        cfg.security_protocol = SecurityProtocol::SaslSsl;
820        cfg.sasl_mechanism = Some(SaslMechanism::Plain);
821        cfg.sasl_username = Some("user".into());
822        cfg.sasl_password = Some("pass".into());
823        cfg.ssl_ca_location = Some("/ca.pem".into());
824
825        let rdk = cfg.to_rdkafka_config();
826        assert_eq!(rdk.get("security.protocol"), Some("sasl_ssl"));
827        assert_eq!(rdk.get("sasl.mechanism"), Some("PLAIN"));
828        assert_eq!(rdk.get("sasl.username"), Some("user"));
829        assert_eq!(rdk.get("sasl.password"), Some("pass"));
830        assert_eq!(rdk.get("ssl.ca.location"), Some("/ca.pem"));
831    }
832
833    #[test]
834    fn test_rdkafka_config_with_batch_num_messages() {
835        let mut cfg = KafkaSinkConfig::default();
836        cfg.bootstrap_servers = "b:9092".into();
837        cfg.topic = "t".into();
838        cfg.batch_num_messages = Some(10_000);
839
840        let rdk = cfg.to_rdkafka_config();
841        assert_eq!(rdk.get("batch.num.messages"), Some("10000"));
842    }
843
844    #[test]
845    fn test_kafka_passthrough_properties() {
846        let mut pairs = required_pairs();
847        pairs.push(("kafka.socket.timeout.ms", "5000"));
848        pairs.push(("kafka.queue.buffering.max.messages", "100000"));
849        let config = make_config(&pairs);
850        let cfg = KafkaSinkConfig::from_config(&config).unwrap();
851        assert_eq!(
852            cfg.kafka_properties.get("socket.timeout.ms").unwrap(),
853            "5000"
854        );
855    }
856
857    #[test]
858    fn test_defaults() {
859        let cfg = KafkaSinkConfig::default();
860        assert_eq!(cfg.delivery_guarantee, DeliveryGuarantee::AtLeastOnce);
861        assert_eq!(cfg.partitioner, PartitionStrategy::KeyHash);
862        assert_eq!(cfg.compression, CompressionType::None);
863        assert_eq!(cfg.acks, Acks::All);
864        assert_eq!(cfg.linger_ms, 5);
865        assert_eq!(cfg.batch_size, 16_384);
866        assert_eq!(cfg.max_in_flight, 5);
867        assert_eq!(cfg.flush_batch_size, 1_000);
868        assert_eq!(cfg.security_protocol, SecurityProtocol::Plaintext);
869        assert!(cfg.sasl_mechanism.is_none());
870        assert!(cfg.batch_num_messages.is_none());
871    }
872
873    #[test]
874    fn test_enum_display() {
875        assert_eq!(DeliveryGuarantee::AtLeastOnce.to_string(), "at-least-once");
876        assert_eq!(DeliveryGuarantee::ExactlyOnce.to_string(), "exactly-once");
877        assert_eq!(PartitionStrategy::KeyHash.to_string(), "key-hash");
878        assert_eq!(PartitionStrategy::RoundRobin.to_string(), "round-robin");
879        assert_eq!(PartitionStrategy::Sticky.to_string(), "sticky");
880        assert_eq!(CompressionType::Zstd.to_string(), "zstd");
881        assert_eq!(Acks::All.to_string(), "all");
882    }
883
884    #[test]
885    fn test_enum_parse() {
886        assert_eq!(
887            "at-least-once".parse::<DeliveryGuarantee>().unwrap(),
888            DeliveryGuarantee::AtLeastOnce
889        );
890        assert_eq!(
891            "exactly-once".parse::<DeliveryGuarantee>().unwrap(),
892            DeliveryGuarantee::ExactlyOnce
893        );
894        assert_eq!(
895            "key-hash".parse::<PartitionStrategy>().unwrap(),
896            PartitionStrategy::KeyHash
897        );
898        assert_eq!(
899            "round-robin".parse::<PartitionStrategy>().unwrap(),
900            PartitionStrategy::RoundRobin
901        );
902        assert_eq!(
903            "sticky".parse::<PartitionStrategy>().unwrap(),
904            PartitionStrategy::Sticky
905        );
906        assert_eq!(
907            "gzip".parse::<CompressionType>().unwrap(),
908            CompressionType::Gzip
909        );
910        assert_eq!(
911            "snappy".parse::<CompressionType>().unwrap(),
912            CompressionType::Snappy
913        );
914        assert_eq!(
915            "lz4".parse::<CompressionType>().unwrap(),
916            CompressionType::Lz4
917        );
918        assert_eq!(
919            "zstd".parse::<CompressionType>().unwrap(),
920            CompressionType::Zstd
921        );
922        assert_eq!("all".parse::<Acks>().unwrap(), Acks::All);
923        assert_eq!("1".parse::<Acks>().unwrap(), Acks::Leader);
924        assert_eq!("0".parse::<Acks>().unwrap(), Acks::None);
925    }
926}