diff --git a/checkstyle/import-control.xml b/checkstyle/import-control.xml index eb4131fbc47f..e4e897941c22 100644 --- a/checkstyle/import-control.xml +++ b/checkstyle/import-control.xml @@ -84,6 +84,11 @@ + + + + + diff --git a/checkstyle/suppressions.xml b/checkstyle/suppressions.xml index 458738185355..2aa6e339d978 100644 --- a/checkstyle/suppressions.xml +++ b/checkstyle/suppressions.xml @@ -182,6 +182,9 @@ + + diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerInterceptor.java b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerInterceptor.java index c04afccd8aaf..206e6d04a2c2 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerInterceptor.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerInterceptor.java @@ -39,6 +39,8 @@ * {@link org.apache.kafka.clients.consumer.KafkaConsumer#poll(java.time.Duration)}. *

* Implement {@link org.apache.kafka.common.ClusterResourceListener} to receive cluster metadata once it's available. Please see the class documentation for ClusterResourceListener for more information. + * Implement {@link org.apache.kafka.common.metrics.Monitorable} to enable the interceptor to register metrics. The following tags are automatically added to + * all metrics registered: config set to interceptor.classes, and class set to the ConsumerInterceptor class name. */ public interface ConsumerInterceptor extends Configurable, AutoCloseable { diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java index 7fda9a20c056..849b16f81323 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java @@ -303,12 +303,12 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { this.retryBackoffMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG); List> interceptorList = configuredConsumerInterceptors(config); - this.interceptors = new ConsumerInterceptors<>(interceptorList); - this.deserializers = new Deserializers<>(config, keyDeserializer, valueDeserializer); + this.interceptors = new ConsumerInterceptors<>(interceptorList, metrics); + this.deserializers = new Deserializers<>(config, keyDeserializer, valueDeserializer, metrics); this.subscriptions = createSubscriptionState(config, logContext); ClusterResourceListeners clusterResourceListeners = ClientUtils.configureClusterResourceListeners(metrics.reporters(), interceptorList, - Arrays.asList(deserializers.keyDeserializer, deserializers.valueDeserializer)); + Arrays.asList(deserializers.keyDeserializer(), deserializers.valueDeserializer())); this.metadata = metadataFactory.build(config, subscriptions, logContext, clusterResourceListeners); final List addresses = ClientUtils.parseAndValidateAddresses(config); metadata.bootstrap(addresses); @@ -460,13 +460,13 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { this.autoCommitEnabled = config.getBoolean(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG); this.fetchBuffer = new FetchBuffer(logContext); this.isolationLevel = IsolationLevel.READ_UNCOMMITTED; - this.interceptors = new ConsumerInterceptors<>(Collections.emptyList()); this.time = time; this.metrics = new Metrics(time); + this.interceptors = new ConsumerInterceptors<>(Collections.emptyList(), metrics); this.metadata = metadata; this.retryBackoffMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG); this.defaultApiTimeoutMs = config.getInt(ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG); - this.deserializers = new Deserializers<>(keyDeserializer, valueDeserializer); + this.deserializers = new Deserializers<>(keyDeserializer, valueDeserializer, metrics); this.clientTelemetryReporter = Optional.empty(); ConsumerMetrics metricsRegistry = new ConsumerMetrics(CONSUMER_METRIC_GROUP_PREFIX); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ClassicKafkaConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ClassicKafkaConsumer.java index 82a9bd2a53bf..e7ea5af09beb 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ClassicKafkaConsumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ClassicKafkaConsumer.java @@ -179,13 +179,13 @@ public class ClassicKafkaConsumer implements ConsumerDelegate { this.retryBackoffMaxMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MAX_MS_CONFIG); List> interceptorList = configuredConsumerInterceptors(config); - this.interceptors = new ConsumerInterceptors<>(interceptorList); - this.deserializers = new Deserializers<>(config, keyDeserializer, valueDeserializer); + this.interceptors = new ConsumerInterceptors<>(interceptorList, metrics); + this.deserializers = new Deserializers<>(config, keyDeserializer, valueDeserializer, metrics); this.subscriptions = createSubscriptionState(config, logContext); ClusterResourceListeners clusterResourceListeners = ClientUtils.configureClusterResourceListeners( metrics.reporters(), interceptorList, - Arrays.asList(this.deserializers.keyDeserializer, this.deserializers.valueDeserializer)); + Arrays.asList(this.deserializers.keyDeserializer(), this.deserializers.valueDeserializer())); this.metadata = new ConsumerMetadata(config, subscriptions, logContext, clusterResourceListeners); List addresses = ClientUtils.parseAndValidateAddresses(config); this.metadata.bootstrap(addresses); @@ -289,12 +289,12 @@ public class ClassicKafkaConsumer implements ConsumerDelegate { this.metrics = new Metrics(time); this.clientId = config.getString(ConsumerConfig.CLIENT_ID_CONFIG); this.groupId = Optional.ofNullable(config.getString(ConsumerConfig.GROUP_ID_CONFIG)); - this.deserializers = new Deserializers<>(keyDeserializer, valueDeserializer); + this.deserializers = new Deserializers<>(keyDeserializer, valueDeserializer, metrics); this.isolationLevel = ConsumerUtils.configuredIsolationLevel(config); this.defaultApiTimeoutMs = config.getInt(ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG); this.assignors = assignors; this.kafkaConsumerMetrics = new KafkaConsumerMetrics(metrics, CONSUMER_METRIC_GROUP_PREFIX); - this.interceptors = new ConsumerInterceptors<>(Collections.emptyList()); + this.interceptors = new ConsumerInterceptors<>(Collections.emptyList(), metrics); this.retryBackoffMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG); this.retryBackoffMaxMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MAX_MS_CONFIG); this.requestTimeoutMs = config.getInt(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CompletedFetch.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CompletedFetch.java index 2cba76588e5f..a505de2dc120 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CompletedFetch.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CompletedFetch.java @@ -319,13 +319,13 @@ ConsumerRecord parseRecord(Deserializers deserializers, K key; V value; try { - key = keyBytes == null ? null : deserializers.keyDeserializer.deserialize(partition.topic(), headers, keyBytes); + key = keyBytes == null ? null : deserializers.keyDeserializer().deserialize(partition.topic(), headers, keyBytes); } catch (RuntimeException e) { log.error("Key Deserializers with error: {}", deserializers); throw newRecordDeserializationException(DeserializationExceptionOrigin.KEY, partition, timestampType, record, e, headers); } try { - value = valueBytes == null ? null : deserializers.valueDeserializer.deserialize(partition.topic(), headers, valueBytes); + value = valueBytes == null ? null : deserializers.valueDeserializer().deserialize(partition.topic(), headers, valueBytes); } catch (RuntimeException e) { log.error("Value Deserializers with error: {}", deserializers); throw newRecordDeserializationException(DeserializationExceptionOrigin.VALUE, partition, timestampType, record, e, headers); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerInterceptors.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerInterceptors.java index c56ea1a03e97..c58b60ba0f25 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerInterceptors.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerInterceptors.java @@ -17,10 +17,13 @@ package org.apache.kafka.clients.consumer.internals; +import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerInterceptor; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.internals.Plugin; +import org.apache.kafka.common.metrics.Metrics; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -35,15 +38,15 @@ */ public class ConsumerInterceptors implements Closeable { private static final Logger log = LoggerFactory.getLogger(ConsumerInterceptors.class); - private final List> interceptors; + private final List>> interceptorPlugins; - public ConsumerInterceptors(List> interceptors) { - this.interceptors = interceptors; + public ConsumerInterceptors(List> interceptors, Metrics metrics) { + this.interceptorPlugins = Plugin.wrapInstances(interceptors, metrics, ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG); } /** Returns true if no interceptors are defined. All other methods will be no-ops in this case. */ public boolean isEmpty() { - return interceptors.isEmpty(); + return interceptorPlugins.isEmpty(); } /** @@ -62,9 +65,9 @@ public boolean isEmpty() { */ public ConsumerRecords onConsume(ConsumerRecords records) { ConsumerRecords interceptRecords = records; - for (ConsumerInterceptor interceptor : this.interceptors) { + for (Plugin> interceptorPlugin : this.interceptorPlugins) { try { - interceptRecords = interceptor.onConsume(interceptRecords); + interceptRecords = interceptorPlugin.get().onConsume(interceptRecords); } catch (Exception e) { // do not propagate interceptor exception, log and continue calling other interceptors log.warn("Error executing interceptor onConsume callback", e); @@ -83,9 +86,9 @@ public ConsumerRecords onConsume(ConsumerRecords records) { * @param offsets A map of offsets by partition with associated metadata */ public void onCommit(Map offsets) { - for (ConsumerInterceptor interceptor : this.interceptors) { + for (Plugin> interceptorPlugin : this.interceptorPlugins) { try { - interceptor.onCommit(offsets); + interceptorPlugin.get().onCommit(offsets); } catch (Exception e) { // do not propagate interceptor exception, just log log.warn("Error executing interceptor onCommit callback", e); @@ -98,9 +101,9 @@ public void onCommit(Map offsets) { */ @Override public void close() { - for (ConsumerInterceptor interceptor : this.interceptors) { + for (Plugin> interceptorPlugin : this.interceptorPlugins) { try { - interceptor.close(); + interceptorPlugin.close(); } catch (Exception e) { log.error("Failed to close consumer interceptor ", e); } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Deserializers.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Deserializers.java index 5de2a888775a..0926c720c0c6 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Deserializers.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Deserializers.java @@ -19,6 +19,8 @@ import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.errors.InterruptException; +import org.apache.kafka.common.internals.Plugin; +import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.serialization.Deserializer; import org.apache.kafka.common.utils.Utils; @@ -28,44 +30,54 @@ public class Deserializers implements AutoCloseable { - public final Deserializer keyDeserializer; - public final Deserializer valueDeserializer; + private final Plugin> keyDeserializerPlugin; + private final Plugin> valueDeserializerPlugin; - public Deserializers(Deserializer keyDeserializer, Deserializer valueDeserializer) { - this.keyDeserializer = Objects.requireNonNull(keyDeserializer, "Key deserializer provided to Deserializers should not be null"); - this.valueDeserializer = Objects.requireNonNull(valueDeserializer, "Value deserializer provided to Deserializers should not be null"); - } - - public Deserializers(ConsumerConfig config) { - this(config, null, null); + public Deserializers(Deserializer keyDeserializer, Deserializer valueDeserializer, Metrics metrics) { + this.keyDeserializerPlugin = Plugin.wrapInstance( + Objects.requireNonNull(keyDeserializer, "Key deserializer provided to Deserializers should not be null"), + metrics, + ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG); + this.valueDeserializerPlugin = Plugin.wrapInstance( + Objects.requireNonNull(valueDeserializer, "Value deserializer provided to Deserializers should not be null"), + metrics, + ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG); } @SuppressWarnings("unchecked") - public Deserializers(ConsumerConfig config, Deserializer keyDeserializer, Deserializer valueDeserializer) { + public Deserializers(ConsumerConfig config, Deserializer keyDeserializer, Deserializer valueDeserializer, Metrics metrics) { String clientId = config.getString(ConsumerConfig.CLIENT_ID_CONFIG); if (keyDeserializer == null) { - this.keyDeserializer = config.getConfiguredInstance(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, Deserializer.class); - this.keyDeserializer.configure(config.originals(Collections.singletonMap(ConsumerConfig.CLIENT_ID_CONFIG, clientId)), true); + keyDeserializer = config.getConfiguredInstance(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, Deserializer.class); + keyDeserializer.configure(config.originals(Collections.singletonMap(ConsumerConfig.CLIENT_ID_CONFIG, clientId)), true); } else { config.ignore(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG); - this.keyDeserializer = keyDeserializer; } + this.keyDeserializerPlugin = Plugin.wrapInstance(keyDeserializer, metrics, ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG); if (valueDeserializer == null) { - this.valueDeserializer = config.getConfiguredInstance(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, Deserializer.class); - this.valueDeserializer.configure(config.originals(Collections.singletonMap(ConsumerConfig.CLIENT_ID_CONFIG, clientId)), false); + valueDeserializer = config.getConfiguredInstance(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, Deserializer.class); + valueDeserializer.configure(config.originals(Collections.singletonMap(ConsumerConfig.CLIENT_ID_CONFIG, clientId)), false); } else { config.ignore(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG); - this.valueDeserializer = valueDeserializer; } + this.valueDeserializerPlugin = Plugin.wrapInstance(valueDeserializer, metrics, ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG); + } + + public Deserializer keyDeserializer() { + return keyDeserializerPlugin.get(); + } + + public Deserializer valueDeserializer() { + return valueDeserializerPlugin.get(); } @Override public void close() { AtomicReference firstException = new AtomicReference<>(); - Utils.closeQuietly(keyDeserializer, "key deserializer", firstException); - Utils.closeQuietly(valueDeserializer, "value deserializer", firstException); + Utils.closeQuietly(keyDeserializerPlugin, "key deserializer", firstException); + Utils.closeQuietly(valueDeserializerPlugin, "value deserializer", firstException); Throwable exception = firstException.get(); if (exception != null) { @@ -79,8 +91,8 @@ public void close() { @Override public String toString() { return "Deserializers{" + - "keyDeserializer=" + keyDeserializer + - ", valueDeserializer=" + valueDeserializer + + "keyDeserializer=" + keyDeserializerPlugin.get() + + ", valueDeserializer=" + valueDeserializerPlugin.get() + '}'; } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareCompletedFetch.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareCompletedFetch.java index 74760beec6d7..838416b8428b 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareCompletedFetch.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareCompletedFetch.java @@ -296,13 +296,13 @@ ConsumerRecord parseRecord(final Deserializers deserializers, K key; V value; try { - key = keyBytes == null ? null : deserializers.keyDeserializer.deserialize(partition.topic(), headers, keyBytes); + key = keyBytes == null ? null : deserializers.keyDeserializer().deserialize(partition.topic(), headers, keyBytes); } catch (RuntimeException e) { log.error("Key Deserializers with error: {}", deserializers); throw newRecordDeserializationException(RecordDeserializationException.DeserializationExceptionOrigin.KEY, partition.topicPartition(), timestampType, record, e, headers); } try { - value = valueBytes == null ? null : deserializers.valueDeserializer.deserialize(partition.topic(), headers, valueBytes); + value = valueBytes == null ? null : deserializers.valueDeserializer().deserialize(partition.topic(), headers, valueBytes); } catch (RuntimeException e) { log.error("Value Deserializers with error: {}", deserializers); throw newRecordDeserializationException(RecordDeserializationException.DeserializationExceptionOrigin.VALUE, partition.topicPartition(), timestampType, record, e, headers); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImpl.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImpl.java index e209ec00b0d1..82d92e125cc6 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImpl.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImpl.java @@ -253,12 +253,12 @@ private enum AcknowledgementMode { this.clientTelemetryReporter.ifPresent(reporters::add); this.metrics = createMetrics(config, time, reporters); - this.deserializers = new Deserializers<>(config, keyDeserializer, valueDeserializer); + this.deserializers = new Deserializers<>(config, keyDeserializer, valueDeserializer, metrics); this.currentFetch = ShareFetch.empty(); this.subscriptions = createSubscriptionState(config, logContext); ClusterResourceListeners clusterResourceListeners = ClientUtils.configureClusterResourceListeners( metrics.reporters(), - Arrays.asList(deserializers.keyDeserializer, deserializers.valueDeserializer)); + Arrays.asList(deserializers.keyDeserializer(), deserializers.valueDeserializer())); this.metadata = new ConsumerMetadata(config, subscriptions, logContext, clusterResourceListeners); final List addresses = ClientUtils.parseAndValidateAddresses(config); metadata.bootstrap(addresses); @@ -355,7 +355,7 @@ private enum AcknowledgementMode { this.time = time; this.metrics = new Metrics(time); this.clientTelemetryReporter = Optional.empty(); - this.deserializers = new Deserializers<>(config, keyDeserializer, valueDeserializer); + this.deserializers = new Deserializers<>(config, keyDeserializer, valueDeserializer, metrics); this.currentFetch = ShareFetch.empty(); this.subscriptions = subscriptions; this.metadata = metadata; @@ -451,7 +451,7 @@ private enum AcknowledgementMode { this.metrics = metrics; this.metadata = metadata; this.defaultApiTimeoutMs = defaultApiTimeoutMs; - this.deserializers = new Deserializers<>(keyDeserializer, valueDeserializer); + this.deserializers = new Deserializers<>(keyDeserializer, valueDeserializer, metrics); this.currentFetch = ShareFetch.empty(); this.applicationEventHandler = applicationEventHandler; this.kafkaShareConsumerMetrics = new KafkaShareConsumerMetrics(metrics, CONSUMER_SHARE_METRIC_GROUP_PREFIX); diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java index 65d6a1e99036..f5042867aaf6 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java @@ -57,6 +57,7 @@ import org.apache.kafka.common.header.Headers; import org.apache.kafka.common.header.internals.RecordHeaders; import org.apache.kafka.common.internals.ClusterResourceListeners; +import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.metrics.KafkaMetric; import org.apache.kafka.common.metrics.KafkaMetricsContext; import org.apache.kafka.common.metrics.MetricConfig; @@ -247,7 +248,7 @@ public class KafkaProducer implements Producer { // Visible for testing final Metrics metrics; private final KafkaProducerMetrics producerMetrics; - private final Partitioner partitioner; + private final Plugin partitionerPlugin; private final int maxRequestSize; private final long totalMemorySize; private final ProducerMetadata metadata; @@ -257,8 +258,8 @@ public class KafkaProducer implements Producer { private final Compression compression; private final Sensor errors; private final Time time; - private final Serializer keySerializer; - private final Serializer valueSerializer; + private final Plugin> keySerializerPlugin; + private final Plugin> valueSerializerPlugin; private final ProducerConfig producerConfig; private final long maxBlockTimeMs; private final boolean partitionerIgnoreKeys; @@ -332,11 +333,11 @@ public KafkaProducer(Properties properties, Serializer keySerializer, Seriali @SuppressWarnings("deprecation") private void warnIfPartitionerDeprecated() { // Using DefaultPartitioner and UniformStickyPartitioner is deprecated, see KIP-794. - if (partitioner instanceof org.apache.kafka.clients.producer.internals.DefaultPartitioner) { + if (partitionerPlugin.get() instanceof org.apache.kafka.clients.producer.internals.DefaultPartitioner) { log.warn("DefaultPartitioner is deprecated. Please clear " + ProducerConfig.PARTITIONER_CLASS_CONFIG + " configuration setting to get the default partitioning behavior"); } - if (partitioner instanceof org.apache.kafka.clients.producer.UniformStickyPartitioner) { + if (partitionerPlugin.get() instanceof org.apache.kafka.clients.producer.UniformStickyPartitioner) { log.warn("UniformStickyPartitioner is deprecated. Please clear " + ProducerConfig.PARTITIONER_CLASS_CONFIG + " configuration setting and set " + ProducerConfig.PARTITIONER_IGNORE_KEYS_CONFIG + " to 'true' to get the uniform sticky partitioning behavior"); @@ -380,30 +381,33 @@ private void warnIfPartitionerDeprecated() { config.originalsWithPrefix(CommonClientConfigs.METRICS_CONTEXT_PREFIX)); this.metrics = new Metrics(metricConfig, reporters, time, metricsContext); this.producerMetrics = new KafkaProducerMetrics(metrics); - this.partitioner = config.getConfiguredInstance( - ProducerConfig.PARTITIONER_CLASS_CONFIG, - Partitioner.class, - Collections.singletonMap(ProducerConfig.CLIENT_ID_CONFIG, clientId)); + this.partitionerPlugin = Plugin.wrapInstance( + config.getConfiguredInstance( + ProducerConfig.PARTITIONER_CLASS_CONFIG, + Partitioner.class, + Collections.singletonMap(ProducerConfig.CLIENT_ID_CONFIG, clientId)), + metrics, + ProducerConfig.PARTITIONER_CLASS_CONFIG); warnIfPartitionerDeprecated(); this.partitionerIgnoreKeys = config.getBoolean(ProducerConfig.PARTITIONER_IGNORE_KEYS_CONFIG); long retryBackoffMs = config.getLong(ProducerConfig.RETRY_BACKOFF_MS_CONFIG); long retryBackoffMaxMs = config.getLong(ProducerConfig.RETRY_BACKOFF_MAX_MS_CONFIG); if (keySerializer == null) { - this.keySerializer = config.getConfiguredInstance(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, - Serializer.class); - this.keySerializer.configure(config.originals(Collections.singletonMap(ProducerConfig.CLIENT_ID_CONFIG, clientId)), true); + keySerializer = config.getConfiguredInstance(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, Serializer.class); + keySerializer.configure(config.originals(Collections.singletonMap(ProducerConfig.CLIENT_ID_CONFIG, clientId)), true); } else { config.ignore(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG); - this.keySerializer = keySerializer; } + this.keySerializerPlugin = Plugin.wrapInstance(keySerializer, metrics, ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG); + if (valueSerializer == null) { - this.valueSerializer = config.getConfiguredInstance(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, - Serializer.class); - this.valueSerializer.configure(config.originals(Collections.singletonMap(ProducerConfig.CLIENT_ID_CONFIG, clientId)), false); + valueSerializer = config.getConfiguredInstance(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, Serializer.class); + valueSerializer.configure(config.originals(Collections.singletonMap(ProducerConfig.CLIENT_ID_CONFIG, clientId)), false); } else { config.ignore(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG); - this.valueSerializer = valueSerializer; } + this.valueSerializerPlugin = Plugin.wrapInstance(valueSerializer, metrics, ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG); + List> interceptorList = ClientUtils.configuredInterceptors(config, ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, @@ -411,11 +415,11 @@ private void warnIfPartitionerDeprecated() { if (interceptors != null) this.interceptors = interceptors; else - this.interceptors = new ProducerInterceptors<>(interceptorList); + this.interceptors = new ProducerInterceptors<>(interceptorList, metrics); ClusterResourceListeners clusterResourceListeners = ClientUtils.configureClusterResourceListeners( interceptorList, reporters, - Arrays.asList(this.keySerializer, this.valueSerializer)); + Arrays.asList(this.keySerializerPlugin.get(), this.valueSerializerPlugin.get())); this.maxRequestSize = config.getInt(ProducerConfig.MAX_REQUEST_SIZE_CONFIG); this.totalMemorySize = config.getLong(ProducerConfig.BUFFER_MEMORY_CONFIG); this.compression = configureCompression(config); @@ -426,7 +430,7 @@ private void warnIfPartitionerDeprecated() { this.apiVersions = new ApiVersions(); this.transactionManager = configureTransactionState(config, logContext); // There is no need to do work required for adaptive partitioning, if we use a custom partitioner. - boolean enableAdaptivePartitioning = partitioner == null && + boolean enableAdaptivePartitioning = partitionerPlugin.get() == null && config.getBoolean(ProducerConfig.PARTITIONER_ADPATIVE_PARTITIONING_ENABLE_CONFIG); RecordAccumulator.PartitionerConfig partitionerConfig = new RecordAccumulator.PartitionerConfig( enableAdaptivePartitioning, @@ -500,9 +504,9 @@ private void warnIfPartitionerDeprecated() { this.log = logContext.logger(KafkaProducer.class); this.metrics = metrics; this.producerMetrics = new KafkaProducerMetrics(metrics); - this.partitioner = partitioner; - this.keySerializer = keySerializer; - this.valueSerializer = valueSerializer; + this.partitionerPlugin = Plugin.wrapInstance(partitioner, metrics, ProducerConfig.PARTITIONER_CLASS_CONFIG); + this.keySerializerPlugin = Plugin.wrapInstance(keySerializer, metrics, ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG); + this.valueSerializerPlugin = Plugin.wrapInstance(valueSerializer, metrics, ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG); this.interceptors = interceptors; this.maxRequestSize = config.getInt(ProducerConfig.MAX_REQUEST_SIZE_CONFIG); this.totalMemorySize = config.getLong(ProducerConfig.BUFFER_MEMORY_CONFIG); @@ -1007,8 +1011,8 @@ private void throwIfProducerClosed() { */ @SuppressWarnings("deprecation") private void onNewBatch(String topic, Cluster cluster, int prevPartition) { - assert partitioner != null; - partitioner.onNewBatch(topic, cluster, prevPartition); + assert partitionerPlugin.get() != null; + partitionerPlugin.get().onNewBatch(topic, cluster, prevPartition); } /** @@ -1037,7 +1041,7 @@ private Future doSend(ProducerRecord record, Callback call Cluster cluster = clusterAndWaitTime.cluster; byte[] serializedKey; try { - serializedKey = keySerializer.serialize(record.topic(), record.headers(), record.key()); + serializedKey = keySerializerPlugin.get().serialize(record.topic(), record.headers(), record.key()); } catch (ClassCastException cce) { throw new SerializationException("Can't convert key of class " + record.key().getClass().getName() + " to class " + producerConfig.getClass(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG).getName() + @@ -1045,7 +1049,7 @@ private Future doSend(ProducerRecord record, Callback call } byte[] serializedValue; try { - serializedValue = valueSerializer.serialize(record.topic(), record.headers(), record.value()); + serializedValue = valueSerializerPlugin.get().serialize(record.topic(), record.headers(), record.value()); } catch (ClassCastException cce) { throw new SerializationException("Can't convert value of class " + record.value().getClass().getName() + " to class " + producerConfig.getClass(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG).getName() + @@ -1066,7 +1070,7 @@ private Future doSend(ProducerRecord record, Callback call long timestamp = record.timestamp() == null ? nowMs : record.timestamp(); // A custom partitioner may take advantage on the onNewBatch callback. - boolean abortOnNewBatch = partitioner != null; + boolean abortOnNewBatch = partitionerPlugin.get() != null; // Append the record to the accumulator. Note, that the actual partition may be // calculated there and can be accessed via appendCallbacks.topicPartition. @@ -1480,9 +1484,9 @@ private void close(Duration timeout, boolean swallowException) { Utils.closeQuietly(interceptors, "producer interceptors", firstException); Utils.closeQuietly(producerMetrics, "producer metrics wrapper", firstException); Utils.closeQuietly(metrics, "producer metrics", firstException); - Utils.closeQuietly(keySerializer, "producer keySerializer", firstException); - Utils.closeQuietly(valueSerializer, "producer valueSerializer", firstException); - Utils.closeQuietly(partitioner, "producer partitioner", firstException); + Utils.closeQuietly(keySerializerPlugin, "producer keySerializer", firstException); + Utils.closeQuietly(valueSerializerPlugin, "producer valueSerializer", firstException); + Utils.closeQuietly(partitionerPlugin, "producer partitioner", firstException); clientTelemetryReporter.ifPresent(reporter -> Utils.closeQuietly(reporter, "producer telemetry reporter", firstException)); AppInfoParser.unregisterAppInfo(JMX_PREFIX, clientId, metrics); Throwable exception = firstException.get(); @@ -1509,8 +1513,8 @@ private int partition(ProducerRecord record, byte[] serializedKey, byte[] if (record.partition() != null) return record.partition(); - if (partitioner != null) { - int customPartition = partitioner.partition( + if (partitionerPlugin.get() != null) { + int customPartition = partitionerPlugin.get().partition( record.topic(), record.key(), serializedKey, record.value(), serializedValue, cluster); if (customPartition < 0) { throw new IllegalArgumentException(String.format( diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/Partitioner.java b/clients/src/main/java/org/apache/kafka/clients/producer/Partitioner.java index 3db3c3a31eb7..073c1d973bf9 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/Partitioner.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/Partitioner.java @@ -23,6 +23,9 @@ /** * Partitioner Interface + *
+ * Implement {@link org.apache.kafka.common.metrics.Monitorable} to enable the partitioner to register metrics. The following tags are automatically added to + * all metrics registered: config set to partitioner.class, and class set to the Partitioner class name. */ public interface Partitioner extends Configurable, Closeable { diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/ProducerInterceptor.java b/clients/src/main/java/org/apache/kafka/clients/producer/ProducerInterceptor.java index 48caf98d44a3..5bc4b2c2c852 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/ProducerInterceptor.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/ProducerInterceptor.java @@ -33,6 +33,8 @@ * ProducerInterceptor callbacks may be called from multiple threads. Interceptor implementation must ensure thread-safety, if needed. *

* Implement {@link org.apache.kafka.common.ClusterResourceListener} to receive cluster metadata once it's available. Please see the class documentation for ClusterResourceListener for more information. + * Implement {@link org.apache.kafka.common.metrics.Monitorable} to enable the interceptor to register metrics. The following tags are automatically added to + * all metrics registered: config set to interceptor.classes, and class set to the ProducerInterceptor class name. */ public interface ProducerInterceptor extends Configurable, AutoCloseable { /** diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/internals/ProducerInterceptors.java b/clients/src/main/java/org/apache/kafka/clients/producer/internals/ProducerInterceptors.java index 75bf8485e473..9936eef76094 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/internals/ProducerInterceptors.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/internals/ProducerInterceptors.java @@ -17,10 +17,13 @@ package org.apache.kafka.clients.producer.internals; +import org.apache.kafka.clients.producer.ProducerConfig; import org.apache.kafka.clients.producer.ProducerInterceptor; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.clients.producer.RecordMetadata; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.internals.Plugin; +import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.record.RecordBatch; import org.slf4j.Logger; @@ -35,10 +38,10 @@ */ public class ProducerInterceptors implements Closeable { private static final Logger log = LoggerFactory.getLogger(ProducerInterceptors.class); - private final List> interceptors; + private final List>> interceptorPlugins; - public ProducerInterceptors(List> interceptors) { - this.interceptors = interceptors; + public ProducerInterceptors(List> interceptors, Metrics metrics) { + this.interceptorPlugins = Plugin.wrapInstances(interceptors, metrics, ProducerConfig.INTERCEPTOR_CLASSES_CONFIG); } /** @@ -57,9 +60,9 @@ public ProducerInterceptors(List> interceptors) { */ public ProducerRecord onSend(ProducerRecord record) { ProducerRecord interceptRecord = record; - for (ProducerInterceptor interceptor : this.interceptors) { + for (Plugin> interceptorPlugin : this.interceptorPlugins) { try { - interceptRecord = interceptor.onSend(interceptRecord); + interceptRecord = interceptorPlugin.get().onSend(interceptRecord); } catch (Exception e) { // do not propagate interceptor exception, log and continue calling other interceptors // be careful not to throw exception from here @@ -84,9 +87,9 @@ public ProducerRecord onSend(ProducerRecord record) { * @param exception The exception thrown during processing of this record. Null if no error occurred. */ public void onAcknowledgement(RecordMetadata metadata, Exception exception) { - for (ProducerInterceptor interceptor : this.interceptors) { + for (Plugin> interceptorPlugin : this.interceptorPlugins) { try { - interceptor.onAcknowledgement(metadata, exception); + interceptorPlugin.get().onAcknowledgement(metadata, exception); } catch (Exception e) { // do not propagate interceptor exceptions, just log log.warn("Error executing interceptor onAcknowledgement callback", e); @@ -105,15 +108,15 @@ public void onAcknowledgement(RecordMetadata metadata, Exception exception) { * @param exception The exception thrown during processing of this record. */ public void onSendError(ProducerRecord record, TopicPartition interceptTopicPartition, Exception exception) { - for (ProducerInterceptor interceptor : this.interceptors) { + for (Plugin> interceptorPlugin : this.interceptorPlugins) { try { if (record == null && interceptTopicPartition == null) { - interceptor.onAcknowledgement(null, exception); + interceptorPlugin.get().onAcknowledgement(null, exception); } else { if (interceptTopicPartition == null) { interceptTopicPartition = extractTopicPartition(record); } - interceptor.onAcknowledgement(new RecordMetadata(interceptTopicPartition, -1, -1, + interceptorPlugin.get().onAcknowledgement(new RecordMetadata(interceptTopicPartition, -1, -1, RecordBatch.NO_TIMESTAMP, -1, -1), exception); } } catch (Exception e) { @@ -132,9 +135,9 @@ public static TopicPartition extractTopicPartition(ProducerRecord r */ @Override public void close() { - for (ProducerInterceptor interceptor : this.interceptors) { + for (Plugin> interceptorPlugin : this.interceptorPlugins) { try { - interceptor.close(); + interceptorPlugin.close(); } catch (Exception e) { log.error("Failed to close producer interceptor ", e); } diff --git a/clients/src/main/java/org/apache/kafka/common/internals/Plugin.java b/clients/src/main/java/org/apache/kafka/common/internals/Plugin.java new file mode 100644 index 000000000000..8aba8059b259 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/internals/Plugin.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.internals; + +import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.metrics.Monitorable; +import org.apache.kafka.common.metrics.internals.PluginMetricsImpl; + +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.function.Supplier; + +public class Plugin implements Supplier, AutoCloseable { + + private final T instance; + private final Optional pluginMetrics; + + private Plugin(T instance, PluginMetricsImpl pluginMetrics) { + this.instance = instance; + this.pluginMetrics = Optional.ofNullable(pluginMetrics); + } + + public static Plugin wrapInstance(T instance, Metrics metrics, String key) { + return wrapInstance(instance, metrics, () -> tags(key, instance)); + } + + private static Map tags(String key, T instance) { + Map tags = new LinkedHashMap<>(); + tags.put("config", key); + tags.put("class", instance.getClass().getSimpleName()); + return tags; + } + + public static List> wrapInstances(List instances, Metrics metrics, String key) { + List> plugins = new ArrayList<>(); + for (T instance : instances) { + plugins.add(wrapInstance(instance, metrics, key)); + } + return plugins; + } + + public static Plugin wrapInstance(T instance, Metrics metrics, Supplier> tagsSupplier) { + PluginMetricsImpl pluginMetrics = null; + if (instance instanceof Monitorable && metrics != null) { + pluginMetrics = new PluginMetricsImpl(metrics, tagsSupplier.get()); + ((Monitorable) instance).withPluginMetrics(pluginMetrics); + } + return new Plugin<>(instance, pluginMetrics); + } + + @Override + public T get() { + return instance; + } + + @Override + public void close() throws Exception { + if (pluginMetrics.isPresent()) pluginMetrics.get().close(); + if (instance instanceof AutoCloseable) ((AutoCloseable) instance).close(); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/metrics/Monitorable.java b/clients/src/main/java/org/apache/kafka/common/metrics/Monitorable.java new file mode 100644 index 000000000000..fa5a292bf316 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/metrics/Monitorable.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.metrics; + +/** + * Plugins can implement this interface to register their own metrics. + */ +public interface Monitorable { + + /** + * Provides a {@link PluginMetrics} instance from the component that instantiates the plugin. + * PluginMetrics can be used by the plugin to register and unregister metrics + * at any point in their lifecycle prior to their close method being called. + * Any metrics registered will be automatically removed when the plugin is closed. + */ + void withPluginMetrics(PluginMetrics metrics); + +} diff --git a/clients/src/main/java/org/apache/kafka/common/metrics/PluginMetrics.java b/clients/src/main/java/org/apache/kafka/common/metrics/PluginMetrics.java new file mode 100644 index 000000000000..e49c5c30fa78 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/metrics/PluginMetrics.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.metrics; + +import org.apache.kafka.common.MetricName; + +import java.util.Map; + +/** + * This allows plugins to register metrics and sensors. + * Any metrics registered by the plugin is automatically removed when the plugin is closed. + */ +public interface PluginMetrics { + + /** + * Create a {@link MetricName} with the given name, description and tags. The group will be set to "plugins" + * Tags to uniquely identify the plugins are automatically added to the provided tags + * + * @param name The name of the metric + * @param description A human-readable description to include in the metric + * @param tags Additional tags for the metric + * @throws IllegalArgumentException if any of the tag names collide with the default tags for the plugin + */ + MetricName metricName(String name, String description, Map tags); + + /** + * Add a metric to monitor an object that implements {@link MetricValueProvider}. This metric won't be associated with any + * sensor. This is a way to expose existing values as metrics. + * + * @param metricName The name of the metric + * @param metricValueProvider The metric value provider associated with this metric + * @throws IllegalArgumentException if a metric with same name already exists + */ + void addMetric(MetricName metricName, MetricValueProvider metricValueProvider); + + /** + * Remove a metric if it exists. + * + * @param metricName The name of the metric + * @throws IllegalArgumentException if a metric with this name does not exist + */ + void removeMetric(MetricName metricName); + + /** + * Create a {@link Sensor} with the given unique name. The name must only be unique for the plugin, so different + * plugins can use the same names. + * + * @param name The sensor name + * @return The sensor + * @throws IllegalArgumentException if a sensor with same name already exists for this plugin + */ + Sensor addSensor(String name); + + /** + * Remove a {@link Sensor} and its associated metrics. + * + * @param name The name of the sensor to be removed + * @throws IllegalArgumentException if a sensor with this name does not exist + */ + void removeSensor(String name); +} diff --git a/clients/src/main/java/org/apache/kafka/common/metrics/internals/PluginMetricsImpl.java b/clients/src/main/java/org/apache/kafka/common/metrics/internals/PluginMetricsImpl.java new file mode 100644 index 000000000000..08df2e925ce0 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/metrics/internals/PluginMetricsImpl.java @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.metrics.internals; + +import org.apache.kafka.common.MetricName; +import org.apache.kafka.common.metrics.MetricValueProvider; +import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.metrics.PluginMetrics; +import org.apache.kafka.common.metrics.Sensor; + +import java.io.Closeable; +import java.io.IOException; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Set; + +public class PluginMetricsImpl implements PluginMetrics, Closeable { + + private static final String GROUP = "plugins"; + + private final Metrics metrics; + private final Map tags; + private final Set metricNames = new HashSet<>(); + private final Set sensors = new HashSet<>(); + + public PluginMetricsImpl(Metrics metrics, Map tags) { + this.metrics = metrics; + this.tags = tags; + } + + @Override + public MetricName metricName(String name, String description, Map tags) { + for (String tagName : tags.keySet()) { + if (this.tags.containsKey(tagName)) { + throw new IllegalArgumentException("Cannot use " + tagName + " as a tag name"); + } + } + Map metricsTags = new LinkedHashMap<>(this.tags); + metricsTags.putAll(tags); + return metrics.metricName(name, GROUP, description, metricsTags); + } + + @Override + public void addMetric(MetricName metricName, MetricValueProvider metricValueProvider) { + if (metricNames.contains(metricName)) { + throw new IllegalArgumentException("Metric " + metricName + " already exists"); + } + metrics.addMetric(metricName, metricValueProvider); + metricNames.add(metricName); + } + + @Override + public void removeMetric(MetricName metricName) { + if (metricNames.contains(metricName)) { + metrics.removeMetric(metricName); + metricNames.remove(metricName); + } else { + throw new IllegalArgumentException("Unknown metric " + metricName); + } + } + + @Override + public Sensor addSensor(String name) { + if (sensors.contains(name)) { + throw new IllegalArgumentException("Sensor " + name + " already exists"); + } + Sensor sensor = metrics.sensor(name); + sensors.add(name); + return sensor; + } + + @Override + public void removeSensor(String name) { + if (sensors.contains(name)) { + metrics.removeSensor(name); + sensors.remove(name); + } else { + throw new IllegalArgumentException("Unknown sensor " + name); + } + } + + @Override + public void close() throws IOException { + for (String sensor : sensors) { + metrics.removeSensor(sensor); + } + for (MetricName metricName : metricNames) { + metrics.removeMetric(metricName); + } + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/serialization/Deserializer.java b/clients/src/main/java/org/apache/kafka/common/serialization/Deserializer.java index 9fd8d50be744..8747e827e6d6 100644 --- a/clients/src/main/java/org/apache/kafka/common/serialization/Deserializer.java +++ b/clients/src/main/java/org/apache/kafka/common/serialization/Deserializer.java @@ -29,7 +29,8 @@ * A class that implements this interface is expected to have a constructor with no parameters. *

* Implement {@link org.apache.kafka.common.ClusterResourceListener} to receive cluster metadata once it's available. Please see the class documentation for ClusterResourceListener for more information. - * + * Implement {@link org.apache.kafka.common.metrics.Monitorable} to enable the deserializer to register metrics. The following tags are automatically added to + * all metrics registered: config set to either key.deserializer or value.deserializer, and class set to the Deserializer class name. * @param Type to be deserialized into. */ public interface Deserializer extends Closeable { diff --git a/clients/src/main/java/org/apache/kafka/common/serialization/Serializer.java b/clients/src/main/java/org/apache/kafka/common/serialization/Serializer.java index 144b5ab945eb..03eab512aa71 100644 --- a/clients/src/main/java/org/apache/kafka/common/serialization/Serializer.java +++ b/clients/src/main/java/org/apache/kafka/common/serialization/Serializer.java @@ -27,7 +27,8 @@ * A class that implements this interface is expected to have a constructor with no parameter. *

* Implement {@link org.apache.kafka.common.ClusterResourceListener} to receive cluster metadata once it's available. Please see the class documentation for ClusterResourceListener for more information. - * + * Implement {@link org.apache.kafka.common.metrics.Monitorable} to enable the serializer to register metrics. The following tags ae automatically added to + * all metrics registered: config set to either key.serializer or value.serializer, and class set to the Serializer class name. * @param Type to be serialized from. */ public interface Serializer extends Closeable { diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaConsumerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaConsumerTest.java index c260fa48c019..cd9980c082b2 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaConsumerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaConsumerTest.java @@ -58,7 +58,10 @@ import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse; import org.apache.kafka.common.message.SyncGroupResponseData; import org.apache.kafka.common.metrics.JmxReporter; +import org.apache.kafka.common.metrics.Measurable; import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.metrics.Monitorable; +import org.apache.kafka.common.metrics.PluginMetrics; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.metrics.stats.Avg; import org.apache.kafka.common.network.Selectable; @@ -93,6 +96,7 @@ import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.test.MockConsumerInterceptor; +import org.apache.kafka.test.MockDeserializer; import org.apache.kafka.test.MockMetricsReporter; import org.apache.kafka.test.TestUtils; @@ -3536,4 +3540,82 @@ public void configure(Map configs) { CLIENT_IDS.add(configs.get(ConsumerConfig.CLIENT_ID_CONFIG).toString()); } } + + @ParameterizedTest + @EnumSource(value = GroupProtocol.class) + void testMonitorablePlugins(GroupProtocol groupProtocol) { + try { + String clientId = "testMonitorablePlugins"; + Map configs = new HashMap<>(); + configs.put(ConsumerConfig.CLIENT_ID_CONFIG, clientId); + configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); + configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, MonitorableDeserializer.class.getName()); + configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, MonitorableDeserializer.class.getName()); + configs.put(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, MonitorableInterceptor.class.getName()); + + KafkaConsumer consumer = new KafkaConsumer<>(configs); + Map metrics = consumer.metrics(); + + MetricName expectedKeyDeserializerMetric = expectedMetricName( + clientId, + ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, + MonitorableDeserializer.class); + assertTrue(metrics.containsKey(expectedKeyDeserializerMetric)); + assertEquals(VALUE, metrics.get(expectedKeyDeserializerMetric).metricValue()); + + MetricName expectedValueDeserializerMetric = expectedMetricName( + clientId, + ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, + MonitorableDeserializer.class); + assertTrue(metrics.containsKey(expectedValueDeserializerMetric)); + assertEquals(VALUE, metrics.get(expectedValueDeserializerMetric).metricValue()); + + MetricName expectedInterceptorMetric = expectedMetricName( + clientId, + ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, + MonitorableInterceptor.class); + assertTrue(metrics.containsKey(expectedInterceptorMetric)); + assertEquals(VALUE, metrics.get(expectedInterceptorMetric).metricValue()); + + consumer.close(Duration.ZERO); + metrics = consumer.metrics(); + assertFalse(metrics.containsKey(expectedKeyDeserializerMetric)); + assertFalse(metrics.containsKey(expectedValueDeserializerMetric)); + assertFalse(metrics.containsKey(expectedInterceptorMetric)); + } finally { + MockConsumerInterceptor.resetCounters(); + } + } + + private MetricName expectedMetricName(String clientId, String config, Class clazz) { + Map expectedTags = new LinkedHashMap<>(); + expectedTags.put("client-id", clientId); + expectedTags.put("config", config); + expectedTags.put("class", clazz.getSimpleName()); + expectedTags.putAll(TAGS); + return new MetricName(NAME, "plugins", DESCRIPTION, expectedTags); + } + + private static final String NAME = "name"; + private static final String DESCRIPTION = "description"; + private static final Map TAGS = Collections.singletonMap("k", "v"); + private static final double VALUE = 123.0; + + public static class MonitorableDeserializer extends MockDeserializer implements Monitorable { + + @Override + public void withPluginMetrics(PluginMetrics metrics) { + MetricName name = metrics.metricName(NAME, DESCRIPTION, TAGS); + metrics.addMetric(name, (Measurable) (config, now) -> VALUE); + } + } + + public static class MonitorableInterceptor extends MockConsumerInterceptor implements Monitorable { + + @Override + public void withPluginMetrics(PluginMetrics metrics) { + MetricName name = metrics.metricName(NAME, DESCRIPTION, TAGS); + metrics.addMetric(name, (Measurable) (config, now) -> VALUE); + } + } } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumerTest.java index 54a41587b06a..4c6c21ff1497 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumerTest.java @@ -149,6 +149,7 @@ public class AsyncKafkaConsumerTest { private AsyncKafkaConsumer consumer = null; private Time time = new MockTime(0); + private final Metrics metrics = new Metrics(); private final FetchCollector fetchCollector = mock(FetchCollector.class); private final ApplicationEventHandler applicationEventHandler = mock(ApplicationEventHandler.class); private final ConsumerMetadata metadata = mock(ConsumerMetadata.class); @@ -229,7 +230,7 @@ private AsyncKafkaConsumer newConsumer( return new AsyncKafkaConsumer<>( new LogContext(), clientId, - new Deserializers<>(new StringDeserializer(), new StringDeserializer()), + new Deserializers<>(new StringDeserializer(), new StringDeserializer(), metrics), fetchBuffer, fetchCollector, interceptors, @@ -238,7 +239,7 @@ private AsyncKafkaConsumer newConsumer( backgroundEventQueue, backgroundEventReaper, rebalanceListenerInvoker, - new Metrics(), + metrics, subscriptions, metadata, retryBackoffMs, @@ -663,7 +664,7 @@ public void testFailedPartitionRevocationOnClose() { SubscriptionState subscriptions = mock(SubscriptionState.class); consumer = spy(newConsumer( mock(FetchBuffer.class), - new ConsumerInterceptors<>(Collections.emptyList()), + new ConsumerInterceptors<>(Collections.emptyList(), metrics), mock(ConsumerRebalanceListenerInvoker.class), subscriptions, "group-id", @@ -1517,7 +1518,7 @@ public void testEnsurePollEventSentOnConsumerPoll() { SubscriptionState subscriptions = new SubscriptionState(new LogContext(), OffsetResetStrategy.NONE); consumer = newConsumer( mock(FetchBuffer.class), - new ConsumerInterceptors<>(Collections.emptyList()), + new ConsumerInterceptors<>(Collections.emptyList(), metrics), mock(ConsumerRebalanceListenerInvoker.class), subscriptions, "group-id", diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CompletedFetchTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CompletedFetchTest.java index f7be3a58ffd7..cbb5552cd870 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CompletedFetchTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CompletedFetchTest.java @@ -233,11 +233,11 @@ private CompletedFetch newCompletedFetch(long fetchOffset, } private static Deserializers newUuidDeserializers() { - return new Deserializers<>(new UUIDDeserializer(), new UUIDDeserializer()); + return new Deserializers<>(new UUIDDeserializer(), new UUIDDeserializer(), null); } private static Deserializers newStringDeserializers() { - return new Deserializers<>(new StringDeserializer(), new StringDeserializer()); + return new Deserializers<>(new StringDeserializer(), new StringDeserializer(), null); } private static FetchConfig newFetchConfig(IsolationLevel isolationLevel, boolean checkCrcs) { diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerInterceptorsTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerInterceptorsTest.java index 5a7d85369ea0..e6b091e2fd20 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerInterceptorsTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerInterceptorsTest.java @@ -116,7 +116,7 @@ public void testOnConsumeChain() { FilterConsumerInterceptor interceptor2 = new FilterConsumerInterceptor<>(filterPartition2); interceptorList.add(interceptor1); interceptorList.add(interceptor2); - ConsumerInterceptors interceptors = new ConsumerInterceptors<>(interceptorList); + ConsumerInterceptors interceptors = new ConsumerInterceptors<>(interceptorList, null); // verify that onConsumer modifies ConsumerRecords Map>> records = new HashMap<>(); @@ -177,7 +177,7 @@ public void testOnCommitChain() { FilterConsumerInterceptor interceptor2 = new FilterConsumerInterceptor<>(filterPartition2); interceptorList.add(interceptor1); interceptorList.add(interceptor2); - ConsumerInterceptors interceptors = new ConsumerInterceptors<>(interceptorList); + ConsumerInterceptors interceptors = new ConsumerInterceptors<>(interceptorList, null); // verify that onCommit is called for all interceptors in the chain Map offsets = new HashMap<>(); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchCollectorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchCollectorTest.java index 5960fd28fbf0..914eceebf5aa 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchCollectorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchCollectorTest.java @@ -712,7 +712,7 @@ private FetchCollector createFetchCollector(final SubscriptionSt mock(ConsumerMetadata.class), subscriptions, new FetchConfig(new ConsumerConfig(consumerProps)), - new Deserializers<>(new StringDeserializer(), new StringDeserializer()), + new Deserializers<>(new StringDeserializer(), new StringDeserializer(), null), mock(FetchMetricsManager.class), new MockTime() ); @@ -741,12 +741,11 @@ private void buildDependencies(int maxPollRecords, IsolationLevel isolationLevel Properties p = consumerProperties(maxPollRecords); ConsumerConfig config = new ConsumerConfig(p); - deserializers = new Deserializers<>(new StringDeserializer(), new StringDeserializer()); - subscriptions = createSubscriptionState(config, logContext); fetchConfig = createFetchConfig(config, isolationLevel); Metrics metrics = createMetrics(config, time); metricsManager = createFetchMetricsManager(metrics); + deserializers = new Deserializers<>(new StringDeserializer(), new StringDeserializer(), metrics); metadata = new ConsumerMetadata( 0, 1000, diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java index 3e3f70a7443f..ec9e1db116d0 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java @@ -3635,7 +3635,7 @@ private void buildFetcher(MetricConfig metricConfig, SubscriptionState subscriptionState, LogContext logContext) { buildDependencies(metricConfig, metadataExpireMs, subscriptionState, logContext); - Deserializers deserializers = new Deserializers<>(keyDeserializer, valueDeserializer); + Deserializers deserializers = new Deserializers<>(keyDeserializer, valueDeserializer, metrics); FetchConfig fetchConfig = new FetchConfig( minBytes, maxBytes, diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetcherTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetcherTest.java index 6c29d3df82b8..8b1d581275e2 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetcherTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetcherTest.java @@ -2839,7 +2839,7 @@ public void testFetcherConcurrency() throws Exception { isolationLevel, apiVersions); - Deserializers deserializers = new Deserializers<>(new ByteArrayDeserializer(), new ByteArrayDeserializer()); + Deserializers deserializers = new Deserializers<>(new ByteArrayDeserializer(), new ByteArrayDeserializer(), metrics); FetchConfig fetchConfig = new FetchConfig( minBytes, maxBytes, @@ -3895,7 +3895,7 @@ private void buildFetcher(MetricConfig metricConfig, metadata, subscriptionState, fetchConfig, - new Deserializers<>(keyDeserializer, valueDeserializer), + new Deserializers<>(keyDeserializer, valueDeserializer, metrics), metricsManager, time, apiVersions)); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetFetcherTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetFetcherTest.java index 4973624b0a0a..1974be12b99e 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetFetcherTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetFetcherTest.java @@ -1264,7 +1264,7 @@ public void testOffsetValidationSkippedForOldBroker() { metadata, subscriptions, fetchConfig, - new Deserializers<>(new ByteArrayDeserializer(), new ByteArrayDeserializer()), + new Deserializers<>(new ByteArrayDeserializer(), new ByteArrayDeserializer(), metrics), new FetchMetricsManager(metrics, metricsRegistry), time, apiVersions); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareCompletedFetchTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareCompletedFetchTest.java index b117af177b17..3b488d3d002b 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareCompletedFetchTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareCompletedFetchTest.java @@ -374,11 +374,11 @@ private ShareCompletedFetch newShareCompletedFetch(ShareFetchResponseData.Partit } private static Deserializers newUuidDeserializers() { - return new Deserializers<>(new UUIDDeserializer(), new UUIDDeserializer()); + return new Deserializers<>(new UUIDDeserializer(), new UUIDDeserializer(), null); } private static Deserializers newStringDeserializers() { - return new Deserializers<>(new StringDeserializer(), new StringDeserializer()); + return new Deserializers<>(new StringDeserializer(), new StringDeserializer(), null); } private Records newRecords(long baseOffset, int count) { diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManagerTest.java index 6af9509d04b9..af9c3850d637 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManagerTest.java @@ -1475,7 +1475,7 @@ private void buildRequestManager(MetricConfig metricConfig, SubscriptionState subscriptionState, LogContext logContext) { buildDependencies(metricConfig, subscriptionState, logContext); - Deserializers deserializers = new Deserializers<>(keyDeserializer, valueDeserializer); + Deserializers deserializers = new Deserializers<>(keyDeserializer, valueDeserializer, metrics); int maxWaitMs = 0; int maxBytes = Integer.MAX_VALUE; int fetchSize = 1000; diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareFetchCollectorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareFetchCollectorTest.java index fb6a57ac0399..ae64dcd94d1a 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareFetchCollectorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareFetchCollectorTest.java @@ -236,8 +236,8 @@ private void buildDependencies() { ConsumerConfig config = new ConsumerConfig(p); - deserializers = new Deserializers<>(new StringDeserializer(), new StringDeserializer()); Metrics metrics = createMetrics(config, Time.SYSTEM); + deserializers = new Deserializers<>(new StringDeserializer(), new StringDeserializer(), metrics); ShareFetchMetricsManager shareFetchMetricsManager = createShareFetchMetricsManager(metrics); Set partitionSet = new HashSet<>(); partitionSet.add(topicAPartition0.topicPartition()); diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java index 6db988e3d222..f10df453f697 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java @@ -54,7 +54,10 @@ import org.apache.kafka.common.message.InitProducerIdResponseData; import org.apache.kafka.common.message.TxnOffsetCommitRequestData; import org.apache.kafka.common.metrics.JmxReporter; +import org.apache.kafka.common.metrics.Measurable; import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.metrics.Monitorable; +import org.apache.kafka.common.metrics.PluginMetrics; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.metrics.stats.Avg; import org.apache.kafka.common.network.Selectable; @@ -104,6 +107,7 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Optional; @@ -2121,7 +2125,7 @@ public void testCallbackAndInterceptorHandleError() { String invalidTopicName = "topic abc"; // Invalid topic name due to space ProducerInterceptors producerInterceptors = - new ProducerInterceptors<>(Collections.singletonList(new MockProducerInterceptor())); + new ProducerInterceptors<>(Collections.singletonList(new MockProducerInterceptor()), null); try (Producer producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), producerMetadata, client, producerInterceptors, time)) { @@ -2507,7 +2511,7 @@ public KafkaProducer newKafkaProducer() { ProducerConfig producerConfig = new ProducerConfig( ProducerConfig.appendSerializerToConfig(configs, serializer, serializer)); - ProducerInterceptors interceptors = new ProducerInterceptors<>(this.interceptors); + ProducerInterceptors interceptors = new ProducerInterceptors<>(this.interceptors, metrics); return new KafkaProducer<>( producerConfig, @@ -2547,4 +2551,100 @@ void testDeliveryTimeoutAndLingerMsConfig() { assertDoesNotThrow(() -> new KafkaProducer<>(configs, new StringSerializer(), new StringSerializer()).close()); } + @Test + void testMonitorablePlugins() { + try { + String clientId = "testMonitorablePlugins"; + Map configs = new HashMap<>(); + configs.put(ProducerConfig.CLIENT_ID_CONFIG, clientId); + configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); + configs.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, MonitorableSerializer.class.getName()); + configs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, MonitorableSerializer.class.getName()); + configs.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, MonitorablePartitioner.class.getName()); + configs.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, MonitorableInterceptor.class.getName()); + configs.put(MockProducerInterceptor.APPEND_STRING_PROP, ""); + + KafkaProducer producer = new KafkaProducer<>(configs); + Map metrics = producer.metrics(); + + MetricName expectedKeySerializerMetric = expectedMetricName( + clientId, + ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, + MonitorableSerializer.class); + assertTrue(metrics.containsKey(expectedKeySerializerMetric)); + assertEquals(VALUE, metrics.get(expectedKeySerializerMetric).metricValue()); + + MetricName expectedValueSerializerMetric = expectedMetricName( + clientId, + ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, + MonitorableSerializer.class); + assertTrue(metrics.containsKey(expectedValueSerializerMetric)); + assertEquals(VALUE, metrics.get(expectedValueSerializerMetric).metricValue()); + + MetricName expectedPartitionerMetric = expectedMetricName( + clientId, + ProducerConfig.PARTITIONER_CLASS_CONFIG, + MonitorablePartitioner.class); + assertTrue(metrics.containsKey(expectedPartitionerMetric)); + assertEquals(VALUE, metrics.get(expectedPartitionerMetric).metricValue()); + + MetricName expectedInterceptorMetric = expectedMetricName( + clientId, + ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, + MonitorableInterceptor.class); + assertTrue(metrics.containsKey(expectedInterceptorMetric)); + assertEquals(VALUE, metrics.get(expectedInterceptorMetric).metricValue()); + + producer.close(); + metrics = producer.metrics(); + assertFalse(metrics.containsKey(expectedKeySerializerMetric)); + assertFalse(metrics.containsKey(expectedValueSerializerMetric)); + assertFalse(metrics.containsKey(expectedPartitionerMetric)); + assertFalse(metrics.containsKey(expectedInterceptorMetric)); + } finally { + MockProducerInterceptor.resetCounters(); + } + } + + private MetricName expectedMetricName(String clientId, String config, Class clazz) { + Map expectedTags = new LinkedHashMap<>(); + expectedTags.put("client-id", clientId); + expectedTags.put("config", config); + expectedTags.put("class", clazz.getSimpleName()); + expectedTags.putAll(TAGS); + return new MetricName(NAME, "plugins", DESCRIPTION, expectedTags); + } + + private static final String NAME = "name"; + private static final String DESCRIPTION = "description"; + private static final Map TAGS = Collections.singletonMap("k", "v"); + private static final double VALUE = 123.0; + + public static class MonitorableSerializer extends MockSerializer implements Monitorable { + + @Override + public void withPluginMetrics(PluginMetrics metrics) { + MetricName name = metrics.metricName(NAME, DESCRIPTION, TAGS); + metrics.addMetric(name, (Measurable) (config, now) -> VALUE); + } + } + + public static class MonitorablePartitioner extends MockPartitioner implements Monitorable { + + @Override + public void withPluginMetrics(PluginMetrics metrics) { + MetricName name = metrics.metricName(NAME, DESCRIPTION, TAGS); + metrics.addMetric(name, (Measurable) (config, now) -> VALUE); + } + } + + public static class MonitorableInterceptor extends MockProducerInterceptor implements Monitorable { + + @Override + public void withPluginMetrics(PluginMetrics metrics) { + MetricName name = metrics.metricName(NAME, DESCRIPTION, TAGS); + metrics.addMetric(name, (Measurable) (config, now) -> VALUE); + } + } + } diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/internals/ProducerInterceptorsTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/internals/ProducerInterceptorsTest.java index 13d4957a78e4..853b27b25511 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/internals/ProducerInterceptorsTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/internals/ProducerInterceptorsTest.java @@ -104,7 +104,7 @@ public void testOnSendChain() { AppendProducerInterceptor interceptor2 = new AppendProducerInterceptor("Two"); interceptorList.add(interceptor1); interceptorList.add(interceptor2); - ProducerInterceptors interceptors = new ProducerInterceptors<>(interceptorList); + ProducerInterceptors interceptors = new ProducerInterceptors<>(interceptorList, null); // verify that onSend() mutates the record as expected ProducerRecord interceptedRecord = interceptors.onSend(producerRecord); @@ -142,7 +142,7 @@ public void testOnAcknowledgementChain() { AppendProducerInterceptor interceptor2 = new AppendProducerInterceptor("Two"); interceptorList.add(interceptor1); interceptorList.add(interceptor2); - ProducerInterceptors interceptors = new ProducerInterceptors<>(interceptorList); + ProducerInterceptors interceptors = new ProducerInterceptors<>(interceptorList, null); // verify onAck is called on all interceptors RecordMetadata meta = new RecordMetadata(tp, 0, 0, 0, 0, 0); @@ -166,7 +166,7 @@ public void testOnAcknowledgementWithErrorChain() { List> interceptorList = new ArrayList<>(); AppendProducerInterceptor interceptor1 = new AppendProducerInterceptor("One"); interceptorList.add(interceptor1); - ProducerInterceptors interceptors = new ProducerInterceptors<>(interceptorList); + ProducerInterceptors interceptors = new ProducerInterceptors<>(interceptorList, null); // verify that metadata contains both topic and partition interceptors.onSendError(producerRecord, diff --git a/clients/src/test/java/org/apache/kafka/common/internals/PluginTest.java b/clients/src/test/java/org/apache/kafka/common/internals/PluginTest.java new file mode 100644 index 000000000000..7f30cbe3f95d --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/common/internals/PluginTest.java @@ -0,0 +1,124 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.internals; + +import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.metrics.Monitorable; +import org.apache.kafka.common.metrics.PluginMetrics; + +import org.junit.jupiter.api.Test; + +import java.io.Closeable; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class PluginTest { + + private static final String CONFIG = "some.config"; + private static final Metrics METRICS = new Metrics(); + + static class SomePlugin implements Closeable { + + PluginMetrics pluginMetrics; + boolean closed; + + @Override + public void close() throws IOException { + closed = true; + } + } + + static class SomeMonitorablePlugin extends SomePlugin implements Monitorable { + + @Override + public void withPluginMetrics(PluginMetrics metrics) { + pluginMetrics = metrics; + } + } + + @Test + void testWrapInstance() throws Exception { + SomeMonitorablePlugin someMonitorablePlugin = new SomeMonitorablePlugin(); + Plugin pluginMonitorable = Plugin.wrapInstance(someMonitorablePlugin, METRICS, CONFIG); + checkPlugin(pluginMonitorable, someMonitorablePlugin, true); + + someMonitorablePlugin = new SomeMonitorablePlugin(); + assertFalse(someMonitorablePlugin.closed); + pluginMonitorable = Plugin.wrapInstance(someMonitorablePlugin, null, CONFIG); + checkPlugin(pluginMonitorable, someMonitorablePlugin, false); + + SomePlugin somePlugin = new SomePlugin(); + assertFalse(somePlugin.closed); + Plugin plugin = Plugin.wrapInstance(somePlugin, null, CONFIG); + assertSame(somePlugin, plugin.get()); + assertNull(somePlugin.pluginMetrics); + plugin.close(); + assertTrue(somePlugin.closed); + } + + @Test + void testWrapInstances() throws Exception { + List someMonitorablePlugins = Arrays.asList(new SomeMonitorablePlugin(), new SomeMonitorablePlugin()); + List> pluginsMonitorable = Plugin.wrapInstances(someMonitorablePlugins, METRICS, CONFIG); + assertEquals(someMonitorablePlugins.size(), pluginsMonitorable.size()); + for (int i = 0; i < pluginsMonitorable.size(); i++) { + Plugin plugin = pluginsMonitorable.get(i); + SomeMonitorablePlugin somePlugin = someMonitorablePlugins.get(i); + checkPlugin(plugin, somePlugin, true); + } + + someMonitorablePlugins = Arrays.asList(new SomeMonitorablePlugin(), new SomeMonitorablePlugin()); + pluginsMonitorable = Plugin.wrapInstances(someMonitorablePlugins, null, CONFIG); + assertEquals(someMonitorablePlugins.size(), pluginsMonitorable.size()); + for (int i = 0; i < pluginsMonitorable.size(); i++) { + Plugin plugin = pluginsMonitorable.get(i); + SomeMonitorablePlugin somePlugin = someMonitorablePlugins.get(i); + checkPlugin(plugin, somePlugin, false); + } + + List somePlugins = Arrays.asList(new SomePlugin(), new SomePlugin()); + List> plugins = Plugin.wrapInstances(somePlugins, METRICS, CONFIG); + assertEquals(somePlugins.size(), plugins.size()); + for (int i = 0; i < plugins.size(); i++) { + Plugin plugin = plugins.get(i); + SomePlugin somePlugin = somePlugins.get(i); + assertSame(somePlugin, plugin.get()); + assertNull(somePlugin.pluginMetrics); + plugin.close(); + assertTrue(somePlugin.closed); + } + } + + private void checkPlugin(Plugin plugin, SomeMonitorablePlugin instance, boolean metricsSet) throws Exception { + assertSame(instance, plugin.get()); + if (metricsSet) { + assertNotNull(instance.pluginMetrics); + } else { + assertNull(instance.pluginMetrics); + } + plugin.close(); + assertTrue(instance.closed); + } +} diff --git a/clients/src/test/java/org/apache/kafka/common/metrics/internals/PluginMetricsImplTest.java b/clients/src/test/java/org/apache/kafka/common/metrics/internals/PluginMetricsImplTest.java new file mode 100644 index 000000000000..0ff349e361d6 --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/common/metrics/internals/PluginMetricsImplTest.java @@ -0,0 +1,120 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.metrics.internals; + +import org.apache.kafka.common.MetricName; +import org.apache.kafka.common.metrics.Measurable; +import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.metrics.Sensor; +import org.apache.kafka.common.metrics.stats.Max; +import org.apache.kafka.common.metrics.stats.Rate; + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class PluginMetricsImplTest { + + private final Map extraTags = Collections.singletonMap("my-tag", "my-value"); + private Map tags; + private Metrics metrics; + private int initialMetrics; + + @BeforeEach + void setup() { + metrics = new Metrics(); + initialMetrics = metrics.metrics().size(); + tags = new LinkedHashMap<>(); + tags.put("k1", "v1"); + tags.put("k2", "v2"); + } + + @Test + void testMetricName() { + PluginMetricsImpl pmi = new PluginMetricsImpl(metrics, tags); + MetricName metricName = pmi.metricName("name", "description", extraTags); + assertEquals("name", metricName.name()); + assertEquals("plugins", metricName.group()); + assertEquals("description", metricName.description()); + Map expectedTags = new LinkedHashMap<>(tags); + expectedTags.putAll(extraTags); + assertEquals(expectedTags, metricName.tags()); + } + + @Test + void testDuplicateTagName() { + PluginMetricsImpl pmi = new PluginMetricsImpl(metrics, tags); + assertThrows(IllegalArgumentException.class, + () -> pmi.metricName("name", "description", Collections.singletonMap("k1", "value"))); + } + + @Test + void testAddRemoveMetrics() { + PluginMetricsImpl pmi = new PluginMetricsImpl(metrics, tags); + MetricName metricName = pmi.metricName("name", "description", extraTags); + pmi.addMetric(metricName, (Measurable) (config, now) -> 0.0); + assertEquals(initialMetrics + 1, metrics.metrics().size()); + + assertThrows(IllegalArgumentException.class, () -> pmi.addMetric(metricName, (Measurable) (config, now) -> 0.0)); + + pmi.removeMetric(metricName); + assertEquals(initialMetrics, metrics.metrics().size()); + + assertThrows(IllegalArgumentException.class, () -> pmi.removeMetric(metricName)); + } + + @Test + void testAddRemoveSensor() { + PluginMetricsImpl pmi = new PluginMetricsImpl(metrics, tags); + String sensorName = "my-sensor"; + MetricName metricName = pmi.metricName("name", "description", extraTags); + Sensor sensor = pmi.addSensor(sensorName); + assertEquals(initialMetrics, metrics.metrics().size()); + sensor.add(metricName, new Rate()); + sensor.add(metricName, new Max()); + assertEquals(initialMetrics + 1, metrics.metrics().size()); + + assertThrows(IllegalArgumentException.class, () -> pmi.addSensor(sensorName)); + + pmi.removeSensor(sensorName); + assertEquals(initialMetrics, metrics.metrics().size()); + + assertThrows(IllegalArgumentException.class, () -> pmi.removeSensor(sensorName)); + } + + @Test + void testClose() throws IOException { + PluginMetricsImpl pmi = new PluginMetricsImpl(metrics, tags); + String sensorName = "my-sensor"; + MetricName metricName1 = pmi.metricName("name1", "description", extraTags); + Sensor sensor = pmi.addSensor(sensorName); + sensor.add(metricName1, new Rate()); + MetricName metricName2 = pmi.metricName("name2", "description", extraTags); + pmi.addMetric(metricName2, (Measurable) (config, now) -> 1.0); + + assertEquals(initialMetrics + 2, metrics.metrics().size()); + pmi.close(); + assertEquals(initialMetrics, metrics.metrics().size()); + } +} diff --git a/connect/api/src/main/java/org/apache/kafka/connect/connector/ConnectorContext.java b/connect/api/src/main/java/org/apache/kafka/connect/connector/ConnectorContext.java index 10151fa68d06..b648fe0684da 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/connector/ConnectorContext.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/connector/ConnectorContext.java @@ -16,6 +16,8 @@ */ package org.apache.kafka.connect.connector; +import org.apache.kafka.common.metrics.PluginMetrics; + /** * ConnectorContext allows {@link Connector}s to proactively interact with the Kafka Connect runtime. */ @@ -33,4 +35,26 @@ public interface ConnectorContext { * @param e Exception to be raised. */ void raiseError(Exception e); + + /** + * Get a {@link PluginMetrics} that can be used to define metrics + * + *

This method was added in Apache Kafka 4.0. Connectors that use this method but want to + * maintain backward compatibility so they can also be deployed to older Connect runtimes + * should guard the call to this method with a try-catch block, since calling this method will result in a + * {@link NoSuchMethodError} or {@link NoClassDefFoundError} when the connector is deployed to + * Connect runtimes older than Kafka 4.0. For example: + *

+     *     PluginMetrics pluginMetrics;
+     *     try {
+     *         pluginMetrics = context.pluginMetrics();
+     *     } catch (NoSuchMethodError | NoClassDefFoundError e) {
+     *         pluginMetrics = null;
+     *     }
+     * 
+ * + * @return the pluginMetrics instance + * @since 4.0 + */ + PluginMetrics pluginMetrics(); } diff --git a/connect/api/src/main/java/org/apache/kafka/connect/connector/policy/ConnectorClientConfigOverridePolicy.java b/connect/api/src/main/java/org/apache/kafka/connect/connector/policy/ConnectorClientConfigOverridePolicy.java index d41c5cd5b3c6..57c3a371fb54 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/connector/policy/ConnectorClientConfigOverridePolicy.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/connector/policy/ConnectorClientConfigOverridePolicy.java @@ -30,6 +30,11 @@ *

Kafka Connect discovers implementations of this interface using the Java {@link java.util.ServiceLoader} mechanism. * To support this, implementations of this interface should also contain a service provider configuration file in * {@code META-INF/services/org.apache.kafka.connect.connector.policy.ConnectorClientConfigOverridePolicy}. + *
+ * Implement {@link org.apache.kafka.common.metrics.Monitorable} to enable the policy to register metrics. + * The following tags are automatically added to all metrics registered: config set to + * connector.client.config.override.policy, and class set to the + * ConnectorClientConfigOverridePolicy class name. */ public interface ConnectorClientConfigOverridePolicy extends Configurable, AutoCloseable { diff --git a/connect/api/src/main/java/org/apache/kafka/connect/rest/ConnectRestExtension.java b/connect/api/src/main/java/org/apache/kafka/connect/rest/ConnectRestExtension.java index 73f87dd04ee0..87cf16b7060c 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/rest/ConnectRestExtension.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/rest/ConnectRestExtension.java @@ -43,6 +43,10 @@ * *

When the Connect worker shuts down, it will call the extension's {@link #close} method to allow the implementation to release all of * its resources. + *
+ * Implement {@link org.apache.kafka.common.metrics.Monitorable} to enable the extensions to register metrics. + * The following tags are automatically added to all metrics registered: config set to + * rest.extension.classes, and class set to the ConnectRestExtension class name. */ public interface ConnectRestExtension extends Configurable, Versioned, Closeable { diff --git a/connect/api/src/main/java/org/apache/kafka/connect/sink/SinkTaskContext.java b/connect/api/src/main/java/org/apache/kafka/connect/sink/SinkTaskContext.java index 35daae545304..1f9b02e4c432 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/sink/SinkTaskContext.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/sink/SinkTaskContext.java @@ -17,6 +17,7 @@ package org.apache.kafka.connect.sink; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.metrics.PluginMetrics; import java.util.Map; import java.util.Set; @@ -123,4 +124,26 @@ default ErrantRecordReporter errantRecordReporter() { return null; } + /** + * Get a {@link PluginMetrics} that can be used to define metrics + * + *

This method was added in Apache Kafka 4.0. Tasks that use this method but want to + * maintain backward compatibility so they can also be deployed to older Connect runtimes + * should guard the call to this method with a try-catch block, since calling this method will result in a + * {@link NoSuchMethodError} or {@link NoClassDefFoundError} when the connector is deployed to + * Connect runtimes older than Kafka 4.0. For example: + *

+     *     PluginMetrics pluginMetrics;
+     *     try {
+     *         pluginMetrics = context.pluginMetrics();
+     *     } catch (NoSuchMethodError | NoClassDefFoundError e) {
+     *         pluginMetrics = null;
+     *     }
+     * 
+ * + * @return the pluginMetrics instance + * @since 4.0 + */ + PluginMetrics pluginMetrics(); + } diff --git a/connect/api/src/main/java/org/apache/kafka/connect/source/SourceTaskContext.java b/connect/api/src/main/java/org/apache/kafka/connect/source/SourceTaskContext.java index e90b4b11e24d..b9b3a97adab5 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/source/SourceTaskContext.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/source/SourceTaskContext.java @@ -16,6 +16,7 @@ */ package org.apache.kafka.connect.source; +import org.apache.kafka.common.metrics.PluginMetrics; import org.apache.kafka.connect.storage.OffsetStorageReader; import java.util.Map; @@ -63,4 +64,26 @@ public interface SourceTaskContext { default TransactionContext transactionContext() { return null; } + + /** + * Get a {@link PluginMetrics} that can be used to define metrics + * + *

This method was added in Apache Kafka 4.0. Tasks that use this method but want to + * maintain backward compatibility so they can also be deployed to older Connect runtimes + * should guard the call to this method with a try-catch block, since calling this method will result in a + * {@link NoSuchMethodError} or {@link NoClassDefFoundError} when the connector is deployed to + * Connect runtimes older than Kafka 4.0. For example: + *

+     *     PluginMetrics pluginMetrics;
+     *     try {
+     *         pluginMetrics = context.pluginMetrics();
+     *     } catch (NoSuchMethodError | NoClassDefFoundError e) {
+     *         pluginMetrics = null;
+     *     }
+     * 
+ * + * @return the pluginMetrics instance + * @since 4.0 + */ + PluginMetrics pluginMetrics(); } diff --git a/connect/api/src/main/java/org/apache/kafka/connect/storage/Converter.java b/connect/api/src/main/java/org/apache/kafka/connect/storage/Converter.java index a2fb3ba0acb7..f9b05f85d758 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/storage/Converter.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/storage/Converter.java @@ -21,6 +21,8 @@ import org.apache.kafka.connect.data.Schema; import org.apache.kafka.connect.data.SchemaAndValue; +import java.io.Closeable; +import java.io.IOException; import java.util.Map; /** @@ -30,8 +32,12 @@ *

Kafka Connect may discover implementations of this interface using the Java {@link java.util.ServiceLoader} mechanism. * To support this, implementations of this interface should also contain a service provider configuration file in * {@code META-INF/services/org.apache.kafka.connect.storage.Converter}. + *
+ * Implement {@link org.apache.kafka.common.metrics.Monitorable} to enable the converter to register metrics. + * The following tags are automatically added to all metrics registered: connector set to connector name, + * task set to the task id and converter set to either key or value. */ -public interface Converter { +public interface Converter extends Closeable { /** * Configure this class. @@ -98,4 +104,9 @@ default SchemaAndValue toConnectData(String topic, Headers headers, byte[] value default ConfigDef config() { return new ConfigDef(); } + + @Override + default void close() throws IOException { + // no op + } } diff --git a/connect/api/src/main/java/org/apache/kafka/connect/storage/HeaderConverter.java b/connect/api/src/main/java/org/apache/kafka/connect/storage/HeaderConverter.java index de89b1678cbd..36ad6221f9fa 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/storage/HeaderConverter.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/storage/HeaderConverter.java @@ -31,6 +31,10 @@ *

Kafka Connect may discover implementations of this interface using the Java {@link java.util.ServiceLoader} mechanism. * To support this, implementations of this interface should also contain a service provider configuration file in * {@code META-INF/services/org.apache.kafka.connect.storage.HeaderConverter}. + *
+ * Implement {@link org.apache.kafka.common.metrics.Monitorable} to enable the converter to register metrics. + * The following tags are automatically added to all metrics registered: connector set to connector name, + * task set to the task id and converter set to header. */ public interface HeaderConverter extends Configurable, Closeable { diff --git a/connect/api/src/main/java/org/apache/kafka/connect/transforms/Transformation.java b/connect/api/src/main/java/org/apache/kafka/connect/transforms/Transformation.java index d5e42ebe8bc8..d90973732033 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/transforms/Transformation.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/transforms/Transformation.java @@ -31,6 +31,10 @@ * {@code META-INF/services/org.apache.kafka.connect.transforms.Transformation}. * * @param The type of record (must be an implementation of {@link ConnectRecord}) + *
+ * Implement {@link org.apache.kafka.common.metrics.Monitorable} to enable the transformation to register metrics. + * The following tags are automatically added to all metrics registered: connector set to connector name, + * task set to the task id and transformation set to the transformation alias. */ public interface Transformation> extends Configurable, Closeable { diff --git a/connect/api/src/main/java/org/apache/kafka/connect/transforms/predicates/Predicate.java b/connect/api/src/main/java/org/apache/kafka/connect/transforms/predicates/Predicate.java index 1cd7abb75f59..82d4da92568f 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/transforms/predicates/Predicate.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/transforms/predicates/Predicate.java @@ -30,7 +30,10 @@ *

Kafka Connect may discover implementations of this interface using the Java {@link java.util.ServiceLoader} mechanism. * To support this, implementations of this interface should also contain a service provider configuration file in * {@code META-INF/services/org.apache.kafka.connect.transforms.predicates.Predicate}. - * + *
+ * Implement {@link org.apache.kafka.common.metrics.Monitorable} to enable the predicate to register metrics. + * The following tags are automatically added to all metrics registered: connector set to connector name, + * task set to the task id and predicate set to the predicate alias. * @param The type of record. */ public interface Predicate> extends Configurable, AutoCloseable { diff --git a/connect/api/src/test/java/org/apache/kafka/connect/sink/SinkConnectorTest.java b/connect/api/src/test/java/org/apache/kafka/connect/sink/SinkConnectorTest.java index dc89ff59f293..176ebdf2cafc 100644 --- a/connect/api/src/test/java/org/apache/kafka/connect/sink/SinkConnectorTest.java +++ b/connect/api/src/test/java/org/apache/kafka/connect/sink/SinkConnectorTest.java @@ -17,6 +17,7 @@ package org.apache.kafka.connect.sink; import org.apache.kafka.common.config.ConfigDef; +import org.apache.kafka.common.metrics.PluginMetrics; import org.apache.kafka.connect.connector.ConnectorContext; import org.apache.kafka.connect.connector.ConnectorTest; import org.apache.kafka.connect.connector.Task; @@ -53,6 +54,12 @@ public void raiseError(Exception e) { // Unexpected in these tests throw new UnsupportedOperationException(); } + + @Override + public PluginMetrics pluginMetrics() { + // Unexpected in these tests + throw new UnsupportedOperationException(); + } } protected static class TestSinkConnector extends SinkConnector implements ConnectorTest.AssertableConnector { diff --git a/connect/api/src/test/java/org/apache/kafka/connect/source/SourceConnectorTest.java b/connect/api/src/test/java/org/apache/kafka/connect/source/SourceConnectorTest.java index e1a6c54ebfd7..913c5e2019d7 100644 --- a/connect/api/src/test/java/org/apache/kafka/connect/source/SourceConnectorTest.java +++ b/connect/api/src/test/java/org/apache/kafka/connect/source/SourceConnectorTest.java @@ -17,6 +17,7 @@ package org.apache.kafka.connect.source; import org.apache.kafka.common.config.ConfigDef; +import org.apache.kafka.common.metrics.PluginMetrics; import org.apache.kafka.connect.connector.ConnectorContext; import org.apache.kafka.connect.connector.ConnectorTest; import org.apache.kafka.connect.connector.Task; @@ -55,6 +56,12 @@ public void raiseError(Exception e) { throw new UnsupportedOperationException(); } + @Override + public PluginMetrics pluginMetrics() { + // Unexpected in these tests + throw new UnsupportedOperationException(); + } + @Override public OffsetStorageReader offsetStorageReader() { return null; diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractHerder.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractHerder.java index 3f72aefdb5fd..427166bd102e 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractHerder.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractHerder.java @@ -26,6 +26,7 @@ import org.apache.kafka.common.config.ConfigDef.Type; import org.apache.kafka.common.config.ConfigTransformer; import org.apache.kafka.common.config.ConfigValue; +import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.connect.connector.Connector; @@ -133,7 +134,7 @@ public abstract class AbstractHerder implements Herder, TaskStatus.Listener, Con protected final StatusBackingStore statusBackingStore; protected final ConfigBackingStore configBackingStore; private volatile boolean ready = false; - private final ConnectorClientConfigOverridePolicy connectorClientConfigOverridePolicy; + private final Plugin connectorClientConfigOverridePolicyPlugin; private final ExecutorService connectorExecutor; private final Time time; protected final Loggers loggers; @@ -153,7 +154,10 @@ public AbstractHerder(Worker worker, this.kafkaClusterId = kafkaClusterId; this.statusBackingStore = statusBackingStore; this.configBackingStore = configBackingStore; - this.connectorClientConfigOverridePolicy = connectorClientConfigOverridePolicy; + this.connectorClientConfigOverridePolicyPlugin = Plugin.wrapInstance( + connectorClientConfigOverridePolicy, + worker.metrics().metrics(), + WorkerConfig.CONNECTOR_CLIENT_POLICY_CLASS_CONFIG); this.connectorExecutor = Executors.newCachedThreadPool(); this.time = time; this.loggers = new Loggers(time); @@ -177,7 +181,7 @@ protected void stopServices() { this.configBackingStore.stop(); this.worker.stop(); this.connectorExecutor.shutdown(); - Utils.closeQuietly(this.connectorClientConfigOverridePolicy, "connector client config override policy"); + Utils.closeQuietly(this.connectorClientConfigOverridePolicyPlugin, "connector client config override policy"); } protected void ready() { @@ -394,6 +398,11 @@ public ConnectorStateInfo.TaskState taskStatus(ConnectorTaskId id) { status.workerId(), status.trace()); } + @Override + public Worker worker() { + return worker; + } + protected Map validateSinkConnectorConfig(SinkConnector connector, ConfigDef configDef, Map config) { Map result = configDef.validateAll(config); SinkConnectorConfig.validate(config, result); @@ -765,7 +774,7 @@ ConfigInfos validateConnectorConfig( connector.getClass(), connectorType, ConnectorClientConfigRequest.ClientType.PRODUCER, - connectorClientConfigOverridePolicy); + connectorClientConfigOverridePolicyPlugin.get()); } } if (connectorUsesAdmin(connectorType, connectorProps)) { @@ -779,7 +788,7 @@ ConfigInfos validateConnectorConfig( connector.getClass(), connectorType, ConnectorClientConfigRequest.ClientType.ADMIN, - connectorClientConfigOverridePolicy); + connectorClientConfigOverridePolicyPlugin.get()); } } if (connectorUsesConsumer(connectorType, connectorProps)) { @@ -793,7 +802,7 @@ ConfigInfos validateConnectorConfig( connector.getClass(), connectorType, ConnectorClientConfigRequest.ClientType.CONSUMER, - connectorClientConfigOverridePolicy); + connectorClientConfigOverridePolicyPlugin.get()); } } return mergeConfigInfos(connType, diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractWorkerSourceTask.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractWorkerSourceTask.java index dfd1c0d06fdb..a738d00e177f 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractWorkerSourceTask.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractWorkerSourceTask.java @@ -24,6 +24,7 @@ import org.apache.kafka.clients.producer.RecordMetadata; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.header.internals.RecordHeaders; +import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.metrics.stats.Avg; import org.apache.kafka.common.metrics.stats.CumulativeSum; @@ -46,6 +47,7 @@ import org.apache.kafka.connect.source.SourceTask; import org.apache.kafka.connect.source.SourceTaskContext; import org.apache.kafka.connect.storage.CloseableOffsetStorageReader; +import org.apache.kafka.connect.storage.ClusterConfigState; import org.apache.kafka.connect.storage.ConnectorOffsetBackingStore; import org.apache.kafka.connect.storage.Converter; import org.apache.kafka.connect.storage.HeaderConverter; @@ -184,15 +186,16 @@ protected abstract void producerSendFailed( protected final WorkerConfig workerConfig; - protected final WorkerSourceTaskContext sourceTaskContext; protected final ConnectorOffsetBackingStore offsetStore; protected final OffsetStorageWriter offsetWriter; protected final Producer producer; private final SourceTask task; - private final Converter keyConverter; - private final Converter valueConverter; - private final HeaderConverter headerConverter; + private final ClusterConfigState configState; + private final Plugin keyConverterPlugin; + private final Plugin valueConverterPlugin; + private final Plugin headerConverterPlugin; + private final WorkerTransactionContext workerTransactionContext; private final TopicAdmin admin; private final CloseableOffsetStorageReader offsetReader; private final SourceTaskMetricsGroup sourceTaskMetricsGroup; @@ -204,6 +207,7 @@ protected abstract void producerSendFailed( // Visible for testing List toSend; protected Map taskConfig; + protected WorkerSourceTaskContext context; protected boolean started = false; private volatile boolean producerClosed = false; @@ -211,11 +215,12 @@ protected AbstractWorkerSourceTask(ConnectorTaskId id, SourceTask task, TaskStatus.Listener statusListener, TargetState initialState, - Converter keyConverter, - Converter valueConverter, - HeaderConverter headerConverter, + ClusterConfigState configState, + Plugin keyConverterPlugin, + Plugin valueConverterPlugin, + Plugin headerConverterPlugin, TransformationChain transformationChain, - WorkerSourceTaskContext sourceTaskContext, + WorkerTransactionContext workerTransactionContext, Producer producer, TopicAdmin admin, Map topicGroups, @@ -238,20 +243,22 @@ protected AbstractWorkerSourceTask(ConnectorTaskId id, this.workerConfig = workerConfig; this.task = task; - this.keyConverter = keyConverter; - this.valueConverter = valueConverter; - this.headerConverter = headerConverter; + this.configState = configState; + this.keyConverterPlugin = keyConverterPlugin; + this.valueConverterPlugin = valueConverterPlugin; + this.headerConverterPlugin = headerConverterPlugin; + this.workerTransactionContext = workerTransactionContext; this.producer = producer; this.admin = admin; this.offsetReader = offsetReader; this.offsetWriter = offsetWriter; this.offsetStore = Objects.requireNonNull(offsetStore, "offset store cannot be null for source tasks"); this.closeExecutor = closeExecutor; - this.sourceTaskContext = sourceTaskContext; this.stopRequestedLatch = new CountDownLatch(1); this.sourceTaskMetricsGroup = new SourceTaskMetricsGroup(id, connectMetrics); this.topicTrackingEnabled = workerConfig.getBoolean(TOPIC_TRACKING_ENABLE_CONFIG); this.topicCreation = TopicCreation.newTopicCreation(workerConfig, topicGroups); + this.context = new WorkerSourceTaskContext(offsetReader, id, configState, workerTransactionContext, pluginMetrics); } @Override @@ -275,7 +282,7 @@ protected void initializeAndStart() { // the worst thing that happens is another exception gets logged for an already- // failed task started = true; - task.initialize(sourceTaskContext); + task.initialize(context); task.start(taskConfig); log.info("{} Source task finished initialization and start", this); } @@ -320,7 +327,10 @@ protected void close() { } Utils.closeQuietly(offsetReader, "offset reader"); Utils.closeQuietly(offsetStore::stop, "offset backing store"); - Utils.closeQuietly(headerConverter, "header converter"); + Utils.closeQuietly(headerConverterPlugin, "header converter"); + Utils.closeQuietly(keyConverterPlugin, "key converter"); + Utils.closeQuietly(valueConverterPlugin, "value converter"); + Utils.closeQuietly(pluginMetrics, "pluginMetrics"); } private void closeProducer(Duration duration) { @@ -483,13 +493,13 @@ protected ProducerRecord convertTransformedRecord(ProcessingCont return null; } - RecordHeaders headers = retryWithToleranceOperator.execute(context, () -> convertHeaderFor(record), Stage.HEADER_CONVERTER, headerConverter.getClass()); + RecordHeaders headers = retryWithToleranceOperator.execute(context, () -> convertHeaderFor(record), Stage.HEADER_CONVERTER, headerConverterPlugin.get().getClass()); - byte[] key = retryWithToleranceOperator.execute(context, () -> keyConverter.fromConnectData(record.topic(), headers, record.keySchema(), record.key()), - Stage.KEY_CONVERTER, keyConverter.getClass()); + byte[] key = retryWithToleranceOperator.execute(context, () -> keyConverterPlugin.get().fromConnectData(record.topic(), headers, record.keySchema(), record.key()), + Stage.KEY_CONVERTER, keyConverterPlugin.get().getClass()); - byte[] value = retryWithToleranceOperator.execute(context, () -> valueConverter.fromConnectData(record.topic(), headers, record.valueSchema(), record.value()), - Stage.VALUE_CONVERTER, valueConverter.getClass()); + byte[] value = retryWithToleranceOperator.execute(context, () -> valueConverterPlugin.get().fromConnectData(record.topic(), headers, record.valueSchema(), record.value()), + Stage.VALUE_CONVERTER, valueConverterPlugin.get().getClass()); if (context.failed()) { return null; @@ -545,7 +555,7 @@ protected RecordHeaders convertHeaderFor(SourceRecord record) { String topic = record.topic(); for (Header header : headers) { String key = header.key(); - byte[] rawHeader = headerConverter.fromConnectHeader(topic, key, header.schema(), header.value()); + byte[] rawHeader = headerConverterPlugin.get().fromConnectHeader(topic, key, header.schema(), header.value()); result.add(key, rawHeader); } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectMetrics.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectMetrics.java index ff62c25eee58..db0157a63c17 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectMetrics.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectMetrics.java @@ -19,6 +19,7 @@ import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.common.MetricName; import org.apache.kafka.common.MetricNameTemplate; +import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.metrics.Gauge; import org.apache.kafka.common.metrics.KafkaMetricsContext; import org.apache.kafka.common.metrics.MetricConfig; @@ -27,9 +28,16 @@ import org.apache.kafka.common.metrics.MetricsReporter; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.metrics.internals.MetricsUtils; +import org.apache.kafka.common.metrics.internals.PluginMetricsImpl; import org.apache.kafka.common.utils.AppInfoParser; import org.apache.kafka.common.utils.Time; +import org.apache.kafka.connect.connector.ConnectRecord; import org.apache.kafka.connect.runtime.distributed.DistributedConfig; +import org.apache.kafka.connect.storage.Converter; +import org.apache.kafka.connect.storage.HeaderConverter; +import org.apache.kafka.connect.transforms.Transformation; +import org.apache.kafka.connect.transforms.predicates.Predicate; +import org.apache.kafka.connect.util.ConnectorTaskId; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -45,6 +53,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; /** * The Connect metrics with configurable {@link MetricsReporter}s. @@ -167,6 +176,74 @@ public void stop() { AppInfoParser.unregisterAppInfo(JMX_PREFIX, workerId, metrics); } + PluginMetricsImpl connectorPluginMetrics(String connectorId) { + return new PluginMetricsImpl(metrics, connectorPluginTags(connectorId)); + } + + private static Map connectorPluginTags(String connectorId) { + Map tags = new LinkedHashMap<>(); + tags.put("connector", connectorId); + return tags; + } + + PluginMetricsImpl taskPluginMetrics(ConnectorTaskId connectorTaskId) { + return new PluginMetricsImpl(metrics, taskPluginTags(connectorTaskId)); + } + + private static Map taskPluginTags(ConnectorTaskId connectorTaskId) { + Map tags = connectorPluginTags(connectorTaskId.connector()); + tags.put("task", String.valueOf(connectorTaskId.task())); + return tags; + } + + private static Supplier> converterPluginTags(ConnectorTaskId connectorTaskId, boolean isKey) { + return () -> { + Map tags = taskPluginTags(connectorTaskId); + tags.put("converter", isKey ? "key" : "value"); + return tags; + }; + } + + private static Supplier> headerConverterPluginTags(ConnectorTaskId connectorTaskId) { + return () -> { + Map tags = taskPluginTags(connectorTaskId); + tags.put("converter", "header"); + return tags; + }; + } + + private static Supplier> transformationPluginTags(ConnectorTaskId connectorTaskId, String transformationAlias) { + return () -> { + Map tags = taskPluginTags(connectorTaskId); + tags.put("transformation", transformationAlias); + return tags; + }; + } + + private static Supplier> predicatePluginTags(ConnectorTaskId connectorTaskId, String predicateAlias) { + return () -> { + Map tags = taskPluginTags(connectorTaskId); + tags.put("predicate", predicateAlias); + return tags; + }; + } + + public Plugin wrap(HeaderConverter headerConverter, ConnectorTaskId connectorTaskId) { + return Plugin.wrapInstance(headerConverter, metrics, headerConverterPluginTags(connectorTaskId)); + } + + public Plugin wrap(Converter converter, ConnectorTaskId connectorTaskId, boolean isKey) { + return Plugin.wrapInstance(converter, metrics, converterPluginTags(connectorTaskId, isKey)); + } + + public > Plugin> wrap(Transformation transformation, ConnectorTaskId connectorTaskId, String alias) { + return Plugin.wrapInstance(transformation, metrics, transformationPluginTags(connectorTaskId, alias)); + } + + public > Plugin> wrap(Predicate predicate, ConnectorTaskId connectorTaskId, String alias) { + return Plugin.wrapInstance(predicate, metrics, predicatePluginTags(connectorTaskId, alias)); + } + public static class MetricGroupId { private final String groupName; private final Map tags; diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectorConfig.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectorConfig.java index cb604ad73eef..43d6ec521e2c 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectorConfig.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectorConfig.java @@ -22,6 +22,7 @@ import org.apache.kafka.common.config.ConfigDef.Type; import org.apache.kafka.common.config.ConfigDef.Width; import org.apache.kafka.common.config.ConfigException; +import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.connect.connector.ConnectRecord; import org.apache.kafka.connect.errors.ConnectException; @@ -33,6 +34,7 @@ import org.apache.kafka.connect.transforms.Transformation; import org.apache.kafka.connect.transforms.predicates.Predicate; import org.apache.kafka.connect.util.ConcreteSubClassValidator; +import org.apache.kafka.connect.util.ConnectorTaskId; import org.apache.kafka.connect.util.InstantiableClassValidator; import org.slf4j.Logger; @@ -300,7 +302,7 @@ public boolean enforceTasksMax() { * {@link Transformation transformations} and {@link Predicate predicates} * as they are specified in the {@link #TRANSFORMS_CONFIG} and {@link #PREDICATES_CONFIG} */ - public > List> transformationStages() { + public > List> transformationStages(ConnectorTaskId connectorTaskId, ConnectMetrics metrics) { final List transformAliases = getList(TRANSFORMS_CONFIG); final List> transformations = new ArrayList<>(transformAliases.size()); @@ -314,14 +316,16 @@ public > List> transformationS Object predicateAlias = configs.remove(TransformationStage.PREDICATE_CONFIG); Object negate = configs.remove(TransformationStage.NEGATE_CONFIG); transformation.configure(configs); + Plugin> transformationPlugin = metrics.wrap(transformation, connectorTaskId, alias); if (predicateAlias != null) { String predicatePrefix = PREDICATES_PREFIX + predicateAlias + "."; @SuppressWarnings("unchecked") Predicate predicate = Utils.newInstance(getClass(predicatePrefix + "type"), Predicate.class); predicate.configure(originalsWithPrefix(predicatePrefix)); - transformations.add(new TransformationStage<>(predicate, negate != null && Boolean.parseBoolean(negate.toString()), transformation)); + Plugin> predicatePlugin = metrics.wrap(predicate, connectorTaskId, (String) predicateAlias); + transformations.add(new TransformationStage<>(predicatePlugin, negate != null && Boolean.parseBoolean(negate.toString()), transformationPlugin)); } else { - transformations.add(new TransformationStage<>(transformation)); + transformations.add(new TransformationStage<>(transformationPlugin)); } } catch (Exception e) { throw new ConnectException(e); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ExactlyOnceWorkerSourceTask.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ExactlyOnceWorkerSourceTask.java index bcff615c4147..0330c656d9a4 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ExactlyOnceWorkerSourceTask.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ExactlyOnceWorkerSourceTask.java @@ -20,6 +20,7 @@ import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.clients.producer.RecordMetadata; import org.apache.kafka.common.errors.InvalidProducerEpochException; +import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.metrics.stats.Avg; import org.apache.kafka.common.metrics.stats.Max; @@ -78,9 +79,9 @@ public ExactlyOnceWorkerSourceTask(ConnectorTaskId id, SourceTask task, TaskStatus.Listener statusListener, TargetState initialState, - Converter keyConverter, - Converter valueConverter, - HeaderConverter headerConverter, + Plugin keyConverterPlugin, + Plugin valueConverterPlugin, + Plugin headerConverterPlugin, TransformationChain transformationChain, Producer producer, TopicAdmin admin, @@ -101,8 +102,8 @@ public ExactlyOnceWorkerSourceTask(ConnectorTaskId id, Runnable preProducerCheck, Runnable postProducerCheck, Supplier>> errorReportersSupplier) { - super(id, task, statusListener, initialState, keyConverter, valueConverter, headerConverter, transformationChain, - new WorkerSourceTaskContext(offsetReader, id, configState, buildTransactionContext(sourceConfig)), + super(id, task, statusListener, initialState, configState, keyConverterPlugin, valueConverterPlugin, headerConverterPlugin, transformationChain, + buildTransactionContext(sourceConfig), producer, admin, topicGroups, offsetReader, offsetWriter, offsetStore, workerConfig, connectMetrics, errorMetrics, loader, time, retryWithToleranceOperator, statusBackingStore, closeExecutor, errorReportersSupplier); @@ -112,7 +113,7 @@ public ExactlyOnceWorkerSourceTask(ConnectorTaskId id, this.preProducerCheck = preProducerCheck; this.postProducerCheck = postProducerCheck; - this.transactionBoundaryManager = buildTransactionManager(workerConfig, sourceConfig, sourceTaskContext.transactionContext()); + this.transactionBoundaryManager = buildTransactionManager(workerConfig, sourceConfig, context.transactionContext()); this.transactionMetrics = new TransactionMetricsGroup(id, connectMetrics); } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Herder.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Herder.java index fbdfcab09318..c9aa0f3afecf 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Herder.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Herder.java @@ -384,6 +384,12 @@ default void validateConnectorConfig(Map connectorConfig, Callba */ void setClusterLoggerLevel(String namespace, String level); + /** + * Get the worker for this herder + * @return the worker + */ + Worker worker(); + enum ConfigReloadAction { NONE, RESTART diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/HerderConnectorContext.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/HerderConnectorContext.java index d0a90064d5bd..4192dde58fa4 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/HerderConnectorContext.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/HerderConnectorContext.java @@ -16,6 +16,7 @@ */ package org.apache.kafka.connect.runtime; +import org.apache.kafka.common.metrics.PluginMetrics; import org.apache.kafka.connect.errors.ConnectException; import org.slf4j.Logger; @@ -63,6 +64,11 @@ public void raiseError(Exception e) { herder.onFailure(connectorName, e); } + @Override + public PluginMetrics pluginMetrics() { + return null; + } + @Override public void close() { closed = true; diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TransformationStage.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TransformationStage.java index 3831730ad8f5..90b3559866a0 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TransformationStage.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TransformationStage.java @@ -17,6 +17,7 @@ package org.apache.kafka.connect.runtime; +import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.connect.connector.ConnectRecord; import org.apache.kafka.connect.transforms.Transformation; @@ -32,44 +33,44 @@ public class TransformationStage> implements AutoClos static final String PREDICATE_CONFIG = "predicate"; static final String NEGATE_CONFIG = "negate"; - private final Predicate predicate; - private final Transformation transformation; + private final Plugin> predicatePlugin; + private final Plugin> transformationPlugin; private final boolean negate; - TransformationStage(Transformation transformation) { - this(null, false, transformation); + TransformationStage(Plugin> transformationPlugin) { + this(null, false, transformationPlugin); } - TransformationStage(Predicate predicate, boolean negate, Transformation transformation) { - this.predicate = predicate; + TransformationStage(Plugin> predicatePlugin, boolean negate, Plugin> transformationPlugin) { + this.predicatePlugin = predicatePlugin; this.negate = negate; - this.transformation = transformation; + this.transformationPlugin = transformationPlugin; } public Class> transformClass() { @SuppressWarnings("unchecked") - Class> transformClass = (Class>) transformation.getClass(); + Class> transformClass = (Class>) transformationPlugin.get().getClass(); return transformClass; } public R apply(R record) { - if (predicate == null || negate ^ predicate.test(record)) { - return transformation.apply(record); + if (predicatePlugin == null || predicatePlugin.get() == null || negate ^ predicatePlugin.get().test(record)) { + return transformationPlugin.get().apply(record); } return record; } @Override public void close() { - Utils.closeQuietly(transformation, "transformation"); - Utils.closeQuietly(predicate, "predicate"); + Utils.closeQuietly(transformationPlugin, "transformation"); + Utils.closeQuietly(predicatePlugin, "predicate"); } @Override public String toString() { return "TransformationStage{" + - "predicate=" + predicate + - ", transformation=" + transformation + + "predicate=" + predicatePlugin.get() + + ", transformation=" + transformationPlugin.get() + ", negate=" + negate + '}'; } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Worker.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Worker.java index 591e9816a7a5..e6bdf88c2b35 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Worker.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Worker.java @@ -44,6 +44,7 @@ import org.apache.kafka.common.errors.GroupNotEmptyException; import org.apache.kafka.common.errors.GroupSubscribedToTopicException; import org.apache.kafka.common.errors.UnknownMemberIdException; +import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.utils.ThreadUtils; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Timer; @@ -276,6 +277,8 @@ public void stop() { workerConfigTransformer.close(); ThreadUtils.shutdownExecutorServiceQuietly(executor, EXECUTOR_SHUTDOWN_TERMINATION_TIMEOUT_MS, TimeUnit.MILLISECONDS); + Utils.closeQuietly(internalKeyConverter, "internal key converter"); + Utils.closeQuietly(internalValueConverter, "internal value converter"); } /** @@ -695,9 +698,9 @@ private boolean startTask( workerTask = taskBuilder .withTask(task) .withConnectorConfig(connConfig) - .withKeyConverter(keyConverter) - .withValueConverter(valueConverter) - .withHeaderConverter(headerConverter) + .withKeyConverterPlugin(metrics.wrap(keyConverter, id, true)) + .withValueConverterPlugin(metrics.wrap(valueConverter, id, false)) + .withHeaderConverterPlugin(metrics.wrap(headerConverter, id)) .withClassloader(connectorLoader) .build(); @@ -1730,9 +1733,9 @@ abstract class TaskBuilder> { private Task task = null; private ConnectorConfig connectorConfig = null; - private Converter keyConverter = null; - private Converter valueConverter = null; - private HeaderConverter headerConverter = null; + private Plugin keyConverterPlugin = null; + private Plugin valueConverterPlugin = null; + private Plugin headerConverterPlugin = null; private ClassLoader classLoader = null; public TaskBuilder(ConnectorTaskId id, @@ -1755,18 +1758,18 @@ public TaskBuilder withConnectorConfig(ConnectorConfig connectorConfig) { return this; } - public TaskBuilder withKeyConverter(Converter keyConverter) { - this.keyConverter = keyConverter; + public TaskBuilder withKeyConverterPlugin(Plugin keyConverterPlugin) { + this.keyConverterPlugin = keyConverterPlugin; return this; } - public TaskBuilder withValueConverter(Converter valueConverter) { - this.valueConverter = valueConverter; + public TaskBuilder withValueConverterPlugin(Plugin valueConverterPlugin) { + this.valueConverterPlugin = valueConverterPlugin; return this; } - public TaskBuilder withHeaderConverter(HeaderConverter headerConverter) { - this.headerConverter = headerConverter; + public TaskBuilder withHeaderConverterPlugin(Plugin headerConverterPlugin) { + this.headerConverterPlugin = headerConverterPlugin; return this; } @@ -1778,9 +1781,9 @@ public TaskBuilder withClassloader(ClassLoader classLoader) { public WorkerTask build() { Objects.requireNonNull(task, "Task cannot be null"); Objects.requireNonNull(connectorConfig, "Connector config used by task cannot be null"); - Objects.requireNonNull(keyConverter, "Key converter used by task cannot be null"); - Objects.requireNonNull(valueConverter, "Value converter used by task cannot be null"); - Objects.requireNonNull(headerConverter, "Header converter used by task cannot be null"); + Objects.requireNonNull(keyConverterPlugin.get(), "Key converter used by task cannot be null"); + Objects.requireNonNull(valueConverterPlugin.get(), "Value converter used by task cannot be null"); + Objects.requireNonNull(headerConverterPlugin.get(), "Header converter used by task cannot be null"); Objects.requireNonNull(classLoader, "Classloader used by task cannot be null"); ErrorHandlingMetrics errorHandlingMetrics = errorHandlingMetrics(id); @@ -1790,11 +1793,11 @@ public WorkerTask build() { RetryWithToleranceOperator retryWithToleranceOperator = new RetryWithToleranceOperator<>(connectorConfig.errorRetryTimeout(), connectorConfig.errorMaxDelayInMillis(), connectorConfig.errorToleranceType(), Time.SYSTEM, errorHandlingMetrics); - TransformationChain transformationChain = new TransformationChain<>(connectorConfig.transformationStages(), retryWithToleranceOperator); + TransformationChain transformationChain = new TransformationChain<>(connectorConfig.transformationStages(id, metrics), retryWithToleranceOperator); log.info("Initializing: {}", transformationChain); return doBuild(task, id, configState, statusListener, initialState, - connectorConfig, keyConverter, valueConverter, headerConverter, classLoader, + connectorConfig, keyConverterPlugin, valueConverterPlugin, headerConverterPlugin, classLoader, retryWithToleranceOperator, transformationChain, errorHandlingMetrics, connectorClass); } @@ -1806,9 +1809,9 @@ abstract WorkerTask doBuild( TaskStatus.Listener statusListener, TargetState initialState, ConnectorConfig connectorConfig, - Converter keyConverter, - Converter valueConverter, - HeaderConverter headerConverter, + Plugin keyConverterPlugin, + Plugin valueConverterPlugin, + Plugin headerConverterPlugin, ClassLoader classLoader, RetryWithToleranceOperator retryWithToleranceOperator, TransformationChain transformationChain, @@ -1834,9 +1837,9 @@ public WorkerTask, SinkRecord> doBuild( TaskStatus.Listener statusListener, TargetState initialState, ConnectorConfig connectorConfig, - Converter keyConverter, - Converter valueConverter, - HeaderConverter headerConverter, + Plugin keyConverterPlugin, + Plugin valueConverterPlugin, + Plugin headerConverterPlugin, ClassLoader classLoader, RetryWithToleranceOperator> retryWithToleranceOperator, TransformationChain, SinkRecord> transformationChain, @@ -1845,15 +1848,15 @@ public WorkerTask, SinkRecord> doBuild( ) { SinkConnectorConfig sinkConfig = new SinkConnectorConfig(plugins, connectorConfig.originalsStrings()); WorkerErrantRecordReporter workerErrantRecordReporter = createWorkerErrantRecordReporter(sinkConfig, retryWithToleranceOperator, - keyConverter, valueConverter, headerConverter); + keyConverterPlugin.get(), valueConverterPlugin.get(), headerConverterPlugin.get()); Map consumerProps = baseConsumerConfigs( id.connector(), "connector-consumer-" + id, config, connectorConfig, connectorClass, connectorClientConfigOverridePolicy, kafkaClusterId, ConnectorType.SINK); KafkaConsumer consumer = new KafkaConsumer<>(consumerProps); - return new WorkerSinkTask(id, (SinkTask) task, statusListener, initialState, config, configState, metrics, keyConverter, - valueConverter, errorHandlingMetrics, headerConverter, transformationChain, consumer, classLoader, time, + return new WorkerSinkTask(id, (SinkTask) task, statusListener, initialState, config, configState, metrics, keyConverterPlugin, + valueConverterPlugin, errorHandlingMetrics, headerConverterPlugin, transformationChain, consumer, classLoader, time, retryWithToleranceOperator, workerErrantRecordReporter, herder.statusBackingStore(), () -> sinkTaskReporters(id, sinkConfig, errorHandlingMetrics, connectorClass)); } @@ -1875,9 +1878,9 @@ public WorkerTask doBuild( TaskStatus.Listener statusListener, TargetState initialState, ConnectorConfig connectorConfig, - Converter keyConverter, - Converter valueConverter, - HeaderConverter headerConverter, + Plugin keyConverterPlugin, + Plugin valueConverterPlugin, + Plugin headerConverterPlugin, ClassLoader classLoader, RetryWithToleranceOperator retryWithToleranceOperator, TransformationChain transformationChain, @@ -1912,8 +1915,8 @@ public WorkerTask doBuild( OffsetStorageWriter offsetWriter = new OffsetStorageWriter(offsetStore, id.connector(), internalKeyConverter, internalValueConverter); // Note we pass the configState as it performs dynamic transformations under the covers - return new WorkerSourceTask(id, (SourceTask) task, statusListener, initialState, keyConverter, valueConverter, errorHandlingMetrics, - headerConverter, transformationChain, producer, topicAdmin, topicCreationGroups, + return new WorkerSourceTask(id, (SourceTask) task, statusListener, initialState, keyConverterPlugin, valueConverterPlugin, errorHandlingMetrics, + headerConverterPlugin, transformationChain, producer, topicAdmin, topicCreationGroups, offsetReader, offsetWriter, offsetStore, config, configState, metrics, classLoader, time, retryWithToleranceOperator, herder.statusBackingStore(), executor, () -> sourceTaskReporters(id, sourceConfig, errorHandlingMetrics)); } @@ -1942,9 +1945,9 @@ public WorkerTask doBuild( TaskStatus.Listener statusListener, TargetState initialState, ConnectorConfig connectorConfig, - Converter keyConverter, - Converter valueConverter, - HeaderConverter headerConverter, + Plugin keyConverterPlugin, + Plugin valueConverterPlugin, + Plugin headerConverterPlugin, ClassLoader classLoader, RetryWithToleranceOperator retryWithToleranceOperator, TransformationChain transformationChain, @@ -1976,8 +1979,8 @@ public WorkerTask doBuild( OffsetStorageWriter offsetWriter = new OffsetStorageWriter(offsetStore, id.connector(), internalKeyConverter, internalValueConverter); // Note we pass the configState as it performs dynamic transformations under the covers - return new ExactlyOnceWorkerSourceTask(id, (SourceTask) task, statusListener, initialState, keyConverter, valueConverter, - headerConverter, transformationChain, producer, topicAdmin, topicCreationGroups, + return new ExactlyOnceWorkerSourceTask(id, (SourceTask) task, statusListener, initialState, keyConverterPlugin, valueConverterPlugin, + headerConverterPlugin, transformationChain, producer, topicAdmin, topicCreationGroups, offsetReader, offsetWriter, offsetStore, config, configState, metrics, errorHandlingMetrics, classLoader, time, retryWithToleranceOperator, herder.statusBackingStore(), sourceConfig, executor, preProducerCheck, postProducerCheck, () -> sourceTaskReporters(id, sourceConfig, errorHandlingMetrics)); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerConnector.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerConnector.java index 8bb541c01851..c24b00331277 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerConnector.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerConnector.java @@ -16,6 +16,8 @@ */ package org.apache.kafka.connect.runtime; +import org.apache.kafka.common.metrics.PluginMetrics; +import org.apache.kafka.common.metrics.internals.PluginMetricsImpl; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.connect.connector.Connector; import org.apache.kafka.connect.connector.ConnectorContext; @@ -81,12 +83,13 @@ private enum State { private State state; private final CloseableOffsetStorageReader offsetStorageReader; private final ConnectorOffsetBackingStore offsetStore; + private final PluginMetricsImpl pluginMetrics; public WorkerConnector(String connName, Connector connector, ConnectorConfig connectorConfig, CloseableConnectorContext ctx, - ConnectMetrics metrics, + ConnectMetrics connectMetrics, ConnectorStatus.Listener statusListener, CloseableOffsetStorageReader offsetStorageReader, ConnectorOffsetBackingStore offsetStore, @@ -97,7 +100,7 @@ public WorkerConnector(String connName, this.ctx = ctx; this.connector = connector; this.state = State.INIT; - this.metrics = new ConnectorMetricsGroup(metrics, AbstractStatus.State.UNASSIGNED, statusListener); + this.metrics = new ConnectorMetricsGroup(connectMetrics, AbstractStatus.State.UNASSIGNED, statusListener); this.statusListener = this.metrics; this.offsetStorageReader = offsetStorageReader; this.offsetStore = offsetStore; @@ -107,6 +110,7 @@ public WorkerConnector(String connName, this.externalFailure = null; this.stopping = false; this.cancelled = false; + this.pluginMetrics = connectMetrics.connectorPluginMetrics(connName); } public ClassLoader loader() { @@ -191,12 +195,12 @@ void initialize() { log.debug("{} Initializing connector {}", this, connName); if (isSinkConnector()) { SinkConnectorConfig.validate(config); - connector.initialize(new WorkerSinkConnectorContext()); + connector.initialize(new WorkerSinkConnectorContext(pluginMetrics)); } else { Objects.requireNonNull(offsetStore, "Offset store cannot be null for source connectors"); Objects.requireNonNull(offsetStorageReader, "Offset reader cannot be null for source connectors"); offsetStore.start(); - connector.initialize(new WorkerSourceConnectorContext(offsetStorageReader)); + connector.initialize(new WorkerSourceConnectorContext(offsetStorageReader, pluginMetrics)); } } catch (Throwable t) { log.error("{} Error initializing connector", this, t); @@ -324,6 +328,7 @@ void doShutdown() { if (offsetStore != null) { Utils.closeQuietly(offsetStore::stop, "offset backing store for " + connName); } + Utils.closeQuietly(pluginMetrics, "plugin metrics"); } } @@ -585,19 +590,37 @@ public void raiseError(Exception e) { } private class WorkerSinkConnectorContext extends WorkerConnectorContext implements SinkConnectorContext { + + private final PluginMetrics pluginMetrics; + + WorkerSinkConnectorContext(PluginMetrics pluginMetrics) { + this.pluginMetrics = pluginMetrics; + } + + @Override + public PluginMetrics pluginMetrics() { + return pluginMetrics; + } } private class WorkerSourceConnectorContext extends WorkerConnectorContext implements SourceConnectorContext { private final OffsetStorageReader offsetStorageReader; + private final PluginMetrics pluginMetrics; - WorkerSourceConnectorContext(OffsetStorageReader offsetStorageReader) { + WorkerSourceConnectorContext(OffsetStorageReader offsetStorageReader, PluginMetrics pluginMetrics) { this.offsetStorageReader = offsetStorageReader; + this.pluginMetrics = pluginMetrics; } @Override public OffsetStorageReader offsetStorageReader() { return offsetStorageReader; } + + @Override + public PluginMetrics pluginMetrics() { + return pluginMetrics; + } } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java index 1f4e930ae5a4..4e3805242f3d 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java @@ -25,6 +25,7 @@ import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.WakeupException; +import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.metrics.stats.Avg; import org.apache.kafka.common.metrics.stats.CumulativeSum; @@ -82,9 +83,9 @@ class WorkerSinkTask extends WorkerTask, SinkReco private final SinkTask task; private final ClusterConfigState configState; private Map taskConfig; - private final Converter keyConverter; - private final Converter valueConverter; - private final HeaderConverter headerConverter; + private final Plugin keyConverterPlugin; + private final Plugin valueConverterPlugin; + private final Plugin headerConverterPlugin; private final SinkTaskMetricsGroup sinkTaskMetricsGroup; private final boolean isTopicTrackingEnabled; private final Consumer consumer; @@ -110,10 +111,10 @@ public WorkerSinkTask(ConnectorTaskId id, WorkerConfig workerConfig, ClusterConfigState configState, ConnectMetrics connectMetrics, - Converter keyConverter, - Converter valueConverter, + Plugin keyConverterPlugin, + Plugin valueConverterPlugin, ErrorHandlingMetrics errorMetrics, - HeaderConverter headerConverter, + Plugin headerConverterPlugin, TransformationChain, SinkRecord> transformationChain, Consumer consumer, ClassLoader loader, @@ -128,9 +129,9 @@ public WorkerSinkTask(ConnectorTaskId id, this.workerConfig = workerConfig; this.task = task; this.configState = configState; - this.keyConverter = keyConverter; - this.valueConverter = valueConverter; - this.headerConverter = headerConverter; + this.keyConverterPlugin = keyConverterPlugin; + this.valueConverterPlugin = valueConverterPlugin; + this.headerConverterPlugin = headerConverterPlugin; this.messageBatch = new ArrayList<>(); this.lastCommittedOffsets = new HashMap<>(); this.currentOffsets = new HashMap<>(); @@ -180,7 +181,10 @@ protected void close() { } taskStopped = true; Utils.closeQuietly(consumer, "consumer"); - Utils.closeQuietly(headerConverter, "header converter"); + Utils.closeQuietly(headerConverterPlugin, "header converter"); + Utils.closeQuietly(keyConverterPlugin, "key converter"); + Utils.closeQuietly(valueConverterPlugin, "value converter"); + Utils.closeQuietly(pluginMetrics, "plugin metrics"); /* Setting partition count explicitly to 0 to handle the case, when the task fails, which would cause its consumer to leave the group. @@ -535,13 +539,13 @@ private void convertMessages(ConsumerRecords msgs) { } private SinkRecord convertAndTransformRecord(ProcessingContext> context, final ConsumerRecord msg) { - SchemaAndValue keyAndSchema = retryWithToleranceOperator.execute(context, () -> keyConverter.toConnectData(msg.topic(), msg.headers(), msg.key()), - Stage.KEY_CONVERTER, keyConverter.getClass()); + SchemaAndValue keyAndSchema = retryWithToleranceOperator.execute(context, () -> keyConverterPlugin.get().toConnectData(msg.topic(), msg.headers(), msg.key()), + Stage.KEY_CONVERTER, keyConverterPlugin.get().getClass()); - SchemaAndValue valueAndSchema = retryWithToleranceOperator.execute(context, () -> valueConverter.toConnectData(msg.topic(), msg.headers(), msg.value()), - Stage.VALUE_CONVERTER, valueConverter.getClass()); + SchemaAndValue valueAndSchema = retryWithToleranceOperator.execute(context, () -> valueConverterPlugin.get().toConnectData(msg.topic(), msg.headers(), msg.value()), + Stage.VALUE_CONVERTER, valueConverterPlugin.get().getClass()); - Headers headers = retryWithToleranceOperator.execute(context, () -> convertHeadersFor(msg), Stage.HEADER_CONVERTER, headerConverter.getClass()); + Headers headers = retryWithToleranceOperator.execute(context, () -> convertHeadersFor(msg), Stage.HEADER_CONVERTER, headerConverterPlugin.get().getClass()); if (context.failed()) { return null; @@ -576,7 +580,7 @@ private Headers convertHeadersFor(ConsumerRecord record) { if (recordHeaders != null) { String topic = record.topic(); for (org.apache.kafka.common.header.Header recordHeader : recordHeaders) { - SchemaAndValue schemaAndValue = headerConverter.toConnectHeader(topic, recordHeader.key(), recordHeader.value()); + SchemaAndValue schemaAndValue = headerConverterPlugin.get().toConnectHeader(topic, recordHeader.key(), recordHeader.value()); result.add(recordHeader.key(), schemaAndValue); } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTaskContext.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTaskContext.java index e767c7640b47..11b8446b3d81 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTaskContext.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTaskContext.java @@ -18,6 +18,7 @@ import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.metrics.PluginMetrics; import org.apache.kafka.connect.errors.IllegalWorkerStateException; import org.apache.kafka.connect.sink.ErrantRecordReporter; import org.apache.kafka.connect.sink.SinkTaskContext; @@ -165,6 +166,11 @@ public ErrantRecordReporter errantRecordReporter() { return sinkTask.workerErrantRecordReporter(); } + @Override + public PluginMetrics pluginMetrics() { + return sinkTask.pluginMetrics(); + } + @Override public String toString() { return "WorkerSinkTaskContext{" + diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSourceTask.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSourceTask.java index 0d0eba32d86c..a95dddcd570e 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSourceTask.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSourceTask.java @@ -19,6 +19,7 @@ import org.apache.kafka.clients.producer.Producer; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.clients.producer.RecordMetadata; +import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.utils.Time; import org.apache.kafka.connect.errors.ConnectException; import org.apache.kafka.connect.runtime.errors.ErrorHandlingMetrics; @@ -71,10 +72,10 @@ public WorkerSourceTask(ConnectorTaskId id, SourceTask task, TaskStatus.Listener statusListener, TargetState initialState, - Converter keyConverter, - Converter valueConverter, + Plugin keyConverterPlugin, + Plugin valueConverterPlugin, ErrorHandlingMetrics errorMetrics, - HeaderConverter headerConverter, + Plugin headerConverterPlugin, TransformationChain transformationChain, Producer producer, TopicAdmin admin, @@ -92,8 +93,8 @@ public WorkerSourceTask(ConnectorTaskId id, Executor closeExecutor, Supplier>> errorReportersSupplier) { - super(id, task, statusListener, initialState, keyConverter, valueConverter, headerConverter, transformationChain, - new WorkerSourceTaskContext(offsetReader, id, configState, null), producer, + super(id, task, statusListener, initialState, configState, keyConverterPlugin, valueConverterPlugin, headerConverterPlugin, transformationChain, + null, producer, admin, topicGroups, offsetReader, offsetWriter, offsetStore, workerConfig, connectMetrics, errorMetrics, loader, time, retryWithToleranceOperator, statusBackingStore, closeExecutor, errorReportersSupplier); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSourceTaskContext.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSourceTaskContext.java index cb5af463ce77..e1061d70a93a 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSourceTaskContext.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSourceTaskContext.java @@ -16,6 +16,7 @@ */ package org.apache.kafka.connect.runtime; +import org.apache.kafka.common.metrics.PluginMetrics; import org.apache.kafka.connect.source.SourceTaskContext; import org.apache.kafka.connect.storage.ClusterConfigState; import org.apache.kafka.connect.storage.OffsetStorageReader; @@ -29,15 +30,18 @@ public class WorkerSourceTaskContext implements SourceTaskContext { private final ConnectorTaskId id; private final ClusterConfigState configState; private final WorkerTransactionContext transactionContext; + private final PluginMetrics pluginMetrics; public WorkerSourceTaskContext(OffsetStorageReader reader, ConnectorTaskId id, ClusterConfigState configState, - WorkerTransactionContext transactionContext) { + WorkerTransactionContext transactionContext, + PluginMetrics pluginMetrics) { this.reader = reader; this.id = id; this.configState = configState; this.transactionContext = transactionContext; + this.pluginMetrics = pluginMetrics; } @Override @@ -54,4 +58,9 @@ public OffsetStorageReader offsetStorageReader() { public WorkerTransactionContext transactionContext() { return transactionContext; } + + @Override + public PluginMetrics pluginMetrics() { + return pluginMetrics; + } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerTask.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerTask.java index 98171fe47b6a..f19566c8573a 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerTask.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerTask.java @@ -19,7 +19,9 @@ import org.apache.kafka.common.MetricName; import org.apache.kafka.common.MetricNameTemplate; import org.apache.kafka.common.metrics.Gauge; +import org.apache.kafka.common.metrics.PluginMetrics; import org.apache.kafka.common.metrics.Sensor; +import org.apache.kafka.common.metrics.internals.PluginMetricsImpl; import org.apache.kafka.common.metrics.stats.Avg; import org.apache.kafka.common.metrics.stats.Frequencies; import org.apache.kafka.common.metrics.stats.Max; @@ -76,6 +78,7 @@ abstract class WorkerTask> implements Runnable { protected final RetryWithToleranceOperator retryWithToleranceOperator; protected final TransformationChain transformationChain; private final Supplier>> errorReportersSupplier; + protected final PluginMetricsImpl pluginMetrics; public WorkerTask(ConnectorTaskId id, TaskStatus.Listener statusListener, @@ -103,6 +106,7 @@ public WorkerTask(ConnectorTaskId id, this.errorReportersSupplier = errorReportersSupplier; this.time = time; this.statusBackingStore = statusBackingStore; + this.pluginMetrics = connectMetrics.taskPluginMetrics(id); } public ConnectorTaskId id() { @@ -170,6 +174,11 @@ public void removeMetrics() { Utils.closeQuietly(errorMetrics, "Error handling metrics"); } + //TODO + public PluginMetrics pluginMetrics() { + return pluginMetrics; + } + // Visible for testing void doStart() { retryWithToleranceOperator.reporters(errorReportersSupplier.get()); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestServer.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestServer.java index 9468166763ce..78795a74f062 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestServer.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestServer.java @@ -17,6 +17,7 @@ package org.apache.kafka.connect.runtime.rest; import org.apache.kafka.common.config.ConfigException; +import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.connect.errors.ConnectException; import org.apache.kafka.connect.health.ConnectClusterDetails; @@ -95,7 +96,7 @@ public abstract class RestServer { private final Server jettyServer; private final RequestTimeout requestTimeout; - private List connectRestExtensions = Collections.emptyList(); + private List> connectRestExtensionPlugins = Collections.emptyList(); /** * Create a REST server for this herder using the specified configs. @@ -288,6 +289,7 @@ protected final void initializeResources() { try { context.start(); } catch (Exception e) { + e.printStackTrace(); throw new ConnectException("Unable to initialize REST resources", e); } @@ -365,11 +367,11 @@ public void stop() { } } } - for (ConnectRestExtension connectRestExtension : connectRestExtensions) { + for (Plugin connectRestExtensionPlugin : connectRestExtensionPlugins) { try { - connectRestExtension.close(); + connectRestExtensionPlugin.close(); } catch (IOException e) { - log.warn("Error while invoking close on " + connectRestExtension.getClass(), e); + log.warn("Error while invoking close on " + connectRestExtensionPlugin.get().getClass(), e); } } jettyServer.stop(); @@ -499,9 +501,14 @@ ServerConnector findConnector(String protocol) { } protected final void registerRestExtensions(Herder herder, ResourceConfig resourceConfig) { - connectRestExtensions = herder.plugins().newPlugins( - config.restExtensions(), - config, ConnectRestExtension.class); + connectRestExtensionPlugins = Plugin.wrapInstances( + herder.plugins().newPlugins( + config.restExtensions(), + config, + ConnectRestExtension.class + ), + herder.worker().metrics().metrics(), + RestServerConfig.REST_EXTENSION_CLASSES_CONFIG); long herderRequestTimeoutMs = DEFAULT_REST_REQUEST_TIMEOUT_MS; @@ -520,8 +527,8 @@ protected final void registerRestExtensions(Herder herder, ResourceConfig resour new ConnectRestConfigurable(resourceConfig), new ConnectClusterStateImpl(herderRequestTimeoutMs, connectClusterDetails, herder) ); - for (ConnectRestExtension connectRestExtension : connectRestExtensions) { - connectRestExtension.register(connectRestExtensionContext); + for (Plugin connectRestExtensionPlugin : connectRestExtensionPlugins) { + connectRestExtensionPlugin.get().register(connectRestExtensionContext); } } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/BlockingConnectorTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/BlockingConnectorTest.java index 90a0e96a78af..351b1555a362 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/BlockingConnectorTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/BlockingConnectorTest.java @@ -325,9 +325,9 @@ private void createNormalConnector() { normalConnectorHandle.expectedCommits(NUM_RECORDS_PRODUCED); Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getName()); + props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getName()); props.put(TASKS_MAX_CONFIG, "1"); - props.put(MonitorableSourceConnector.TOPIC_CONFIG, TEST_TOPIC); + props.put(TestableSourceConnector.TOPIC_CONFIG, TEST_TOPIC); log.info("Creating normal connector"); try { connect.configureConnector(NORMAL_CONNECTOR_NAME, props); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectWorkerIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectWorkerIntegrationTest.java index 1a78643950d1..c32e80591162 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectWorkerIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectWorkerIntegrationTest.java @@ -84,7 +84,7 @@ import static org.apache.kafka.common.config.TopicConfig.DELETE_RETENTION_MS_CONFIG; import static org.apache.kafka.common.config.TopicConfig.SEGMENT_MS_CONFIG; import static org.apache.kafka.connect.integration.BlockingConnectorTest.TASK_STOP; -import static org.apache.kafka.connect.integration.MonitorableSourceConnector.TOPIC_CONFIG; +import static org.apache.kafka.connect.integration.TestableSourceConnector.TOPIC_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLIENT_PRODUCER_OVERRIDES_PREFIX; import static org.apache.kafka.connect.runtime.ConnectorConfig.HEADER_CONVERTER_CLASS_CONFIG; @@ -333,7 +333,7 @@ public void testTaskStatuses() throws Exception { // base connector props Map props = defaultSourceConnectorProps(TOPIC_NAME); - props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getSimpleName()); // start the connector with only one task int initialNumTasks = 1; @@ -828,7 +828,7 @@ public void testPatchConnectorConfig() throws Exception { private Map defaultSinkConnectorProps(String topics) { // setup props for the sink connector Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, MonitorableSinkConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, TestableSinkConnector.class.getSimpleName()); props.put(TASKS_MAX_CONFIG, String.valueOf(NUM_TASKS)); props.put(TOPICS_CONFIG, topics); @@ -1025,7 +1025,7 @@ public void testTasksMaxEnforcement() throws Exception { int maxTasks = 1; connectorProps.put(TASKS_MAX_CONFIG, Integer.toString(maxTasks)); int numTasks = 2; - connectorProps.put(MonitorableSourceConnector.NUM_TASKS, Integer.toString(numTasks)); + connectorProps.put(TestableSourceConnector.NUM_TASKS, Integer.toString(numTasks)); connect.configureConnector(CONNECTOR_NAME, connectorProps); // A connector that generates excessive tasks will be failed with an expected error message @@ -1057,7 +1057,7 @@ public void testTasksMaxEnforcement() throws Exception { ); for (int i = 0; i < numTasks; i++) { - Map taskConfig = MonitorableSourceConnector.taskConfig( + Map taskConfig = TestableSourceConnector.taskConfig( connectorProps, CONNECTOR_NAME, i @@ -1108,7 +1108,7 @@ public void testTasksMaxEnforcement() throws Exception { ); numTasks++; - connectorProps.put(MonitorableSourceConnector.NUM_TASKS, Integer.toString(numTasks)); + connectorProps.put(TestableSourceConnector.NUM_TASKS, Integer.toString(numTasks)); connect.configureConnector(CONNECTOR_NAME, connectorProps); // A connector will be allowed to generate excessive tasks when tasks.max.enforce is set to false @@ -1119,7 +1119,7 @@ public void testTasksMaxEnforcement() throws Exception { ); numTasks = maxTasks; - connectorProps.put(MonitorableSourceConnector.NUM_TASKS, Integer.toString(numTasks)); + connectorProps.put(TestableSourceConnector.NUM_TASKS, Integer.toString(numTasks)); connectorProps.put(TASKS_MAX_ENFORCE_CONFIG, "true"); connect.configureConnector(CONNECTOR_NAME, connectorProps); @@ -1130,7 +1130,7 @@ public void testTasksMaxEnforcement() throws Exception { ); numTasks = maxTasks + 1; - connectorProps.put(MonitorableSourceConnector.NUM_TASKS, Integer.toString(numTasks)); + connectorProps.put(TestableSourceConnector.NUM_TASKS, Integer.toString(numTasks)); connect.configureConnector(CONNECTOR_NAME, connectorProps); // A connector that generates excessive tasks after being reconfigured will be failed, but its existing tasks will continue running @@ -1424,7 +1424,7 @@ public void testPluginAliases() throws Exception { final String sourceConnectorName = "plugins-alias-test-source"; Map sourceConnectorConfig = new HashMap<>(baseConnectorConfig); // Aliased source connector class - sourceConnectorConfig.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getSimpleName()); + sourceConnectorConfig.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getSimpleName()); // Connector-specific properties sourceConnectorConfig.put(TOPIC_CONFIG, topic); sourceConnectorConfig.put("throughput", "10"); @@ -1438,7 +1438,7 @@ public void testPluginAliases() throws Exception { final String sinkConnectorName = "plugins-alias-test-sink"; Map sinkConnectorConfig = new HashMap<>(baseConnectorConfig); // Aliased sink connector class - sinkConnectorConfig.put(CONNECTOR_CLASS_CONFIG, MonitorableSinkConnector.class.getSimpleName()); + sinkConnectorConfig.put(CONNECTOR_CLASS_CONFIG, TestableSinkConnector.class.getSimpleName()); // Connector-specific properties sinkConnectorConfig.put(TOPICS_CONFIG, topic); // Create the connector and ensure it and its tasks can start @@ -1450,7 +1450,7 @@ public void testPluginAliases() throws Exception { private Map defaultSourceConnectorProps(String topic) { // setup props for the source connector Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getSimpleName()); props.put(TASKS_MAX_CONFIG, String.valueOf(NUM_TASKS)); props.put(TOPIC_CONFIG, topic); props.put("throughput", "10"); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorClientPolicyIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorClientPolicyIntegrationTest.java index a127f85b12de..7e023236a5c8 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorClientPolicyIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorClientPolicyIntegrationTest.java @@ -153,7 +153,7 @@ private void assertPassCreateConnector(String policy, Map props) public Map basicConnectorConfig() { Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, MonitorableSinkConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, TestableSinkConnector.class.getSimpleName()); props.put(TASKS_MAX_CONFIG, String.valueOf(NUM_TASKS)); props.put(TOPICS_CONFIG, "test-topic"); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorHandle.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorHandle.java index 074c6eb91fb2..aa6c89d63a2b 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorHandle.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorHandle.java @@ -45,7 +45,7 @@ public class ConnectorHandle { private final StartAndStopCounter startAndStopCounter = new StartAndStopCounter(); private CountDownLatch recordsRemainingLatch; - private CountDownLatch recordsToCommitLatch; + public CountDownLatch recordsToCommitLatch; private int expectedRecords = -1; private int expectedCommits = -1; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorRestartApiIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorRestartApiIntegrationTest.java index 1c398a223967..54dab62a924c 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorRestartApiIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorRestartApiIntegrationTest.java @@ -42,7 +42,7 @@ import javax.ws.rs.core.Response; -import static org.apache.kafka.connect.integration.MonitorableSourceConnector.TOPIC_CONFIG; +import static org.apache.kafka.connect.integration.TestableSourceConnector.TOPIC_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.TASKS_MAX_CONFIG; @@ -415,7 +415,7 @@ private String taskId(int i) { private Map defaultSourceConnectorProps(String topic) { // setup up props for the source connector Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getSimpleName()); props.put(TASKS_MAX_CONFIG, String.valueOf(NUM_TASKS)); props.put(TOPIC_CONFIG, topic); props.put("throughput", "10"); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorTopicsIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorTopicsIntegrationTest.java index fb4bbcdf408e..c5fcfbca0a7d 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorTopicsIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorTopicsIntegrationTest.java @@ -46,7 +46,7 @@ import java.util.stream.Collectors; import java.util.stream.StreamSupport; -import static org.apache.kafka.connect.integration.MonitorableSourceConnector.TOPIC_CONFIG; +import static org.apache.kafka.connect.integration.TestableSourceConnector.TOPIC_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.TASKS_MAX_CONFIG; @@ -296,7 +296,7 @@ public void assertNoTopicStatusInStatusTopic() { private Map defaultSourceConnectorProps(String topic) { // setup up props for the source connector Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getSimpleName()); props.put(TASKS_MAX_CONFIG, String.valueOf(NUM_TASKS)); props.put(TOPIC_CONFIG, topic); props.put("throughput", String.valueOf(10)); @@ -311,7 +311,7 @@ private Map defaultSourceConnectorProps(String topic) { private Map defaultSinkConnectorProps(String... topics) { // setup up props for the sink connector Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, MonitorableSinkConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, TestableSinkConnector.class.getSimpleName()); props.put(TASKS_MAX_CONFIG, String.valueOf(NUM_TASKS)); props.put(TOPICS_CONFIG, String.join(",", topics)); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorValidationIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorValidationIntegrationTest.java index 2805504e360d..eb8b59de015d 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorValidationIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorValidationIntegrationTest.java @@ -39,7 +39,7 @@ import java.util.Map; import static org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_ID_CONFIG; -import static org.apache.kafka.connect.integration.MonitorableSourceConnector.TOPIC_CONFIG; +import static org.apache.kafka.connect.integration.TestableSourceConnector.TOPIC_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLIENT_CONSUMER_OVERRIDES_PREFIX; import static org.apache.kafka.connect.runtime.ConnectorConfig.HEADER_CONVERTER_CLASS_CONFIG; @@ -228,7 +228,7 @@ public void testConnectorHasInvalidTransformClass() throws InterruptedException Map config = defaultSinkConnectorProps(); String transformName = "t"; config.put(TRANSFORMS_CONFIG, transformName); - config.put(TRANSFORMS_CONFIG + "." + transformName + ".type", MonitorableSinkConnector.class.getName()); + config.put(TRANSFORMS_CONFIG + "." + transformName + ".type", TestableSinkConnector.class.getName()); connect.assertions().assertExactlyNumErrorsOnConnectorConfigValidation( config.get(CONNECTOR_CLASS_CONFIG), config, @@ -289,7 +289,7 @@ public void testConnectorHasInvalidPredicateClass() throws InterruptedException Map config = defaultSinkConnectorProps(); String predicateName = "p"; config.put(PREDICATES_CONFIG, predicateName); - config.put(PREDICATES_CONFIG + "." + predicateName + ".type", MonitorableSinkConnector.class.getName()); + config.put(PREDICATES_CONFIG + "." + predicateName + ".type", TestableSinkConnector.class.getName()); connect.assertions().assertExactlyNumErrorsOnConnectorConfigValidation( config.get(CONNECTOR_CLASS_CONFIG), config, @@ -315,7 +315,7 @@ public void testConnectorHasMissingConverterClass() throws InterruptedException @Test public void testConnectorHasInvalidConverterClassType() throws InterruptedException { Map config = defaultSinkConnectorProps(); - config.put(KEY_CONVERTER_CLASS_CONFIG, MonitorableSinkConnector.class.getName()); + config.put(KEY_CONVERTER_CLASS_CONFIG, TestableSinkConnector.class.getName()); connect.assertions().assertExactlyNumErrorsOnConnectorConfigValidation( config.get(CONNECTOR_CLASS_CONFIG), config, @@ -413,7 +413,7 @@ public void testConnectorHasMissingHeaderConverterClass() throws InterruptedExce @Test public void testConnectorHasInvalidHeaderConverterClassType() throws InterruptedException { Map config = defaultSinkConnectorProps(); - config.put(HEADER_CONVERTER_CLASS_CONFIG, MonitorableSinkConnector.class.getName()); + config.put(HEADER_CONVERTER_CLASS_CONFIG, TestableSinkConnector.class.getName()); connect.assertions().assertExactlyNumErrorsOnConnectorConfigValidation( config.get(CONNECTOR_CLASS_CONFIG), config, @@ -560,7 +560,7 @@ private Map defaultSourceConnectorProps() { // setup up props for the source connector Map props = new HashMap<>(); props.put(NAME_CONFIG, "source-connector"); - props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getSimpleName()); props.put(TASKS_MAX_CONFIG, "1"); props.put(TOPIC_CONFIG, "t1"); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); @@ -572,7 +572,7 @@ private Map defaultSinkConnectorProps() { // setup up props for the sink connector Map props = new HashMap<>(); props.put(NAME_CONFIG, "sink-connector"); - props.put(CONNECTOR_CLASS_CONFIG, MonitorableSinkConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, TestableSinkConnector.class.getSimpleName()); props.put(TASKS_MAX_CONFIG, "1"); props.put(TOPICS_CONFIG, "t1"); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ErrantRecordSinkConnector.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ErrantRecordSinkConnector.java index bcac3505f5de..c87b854d4bbb 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ErrantRecordSinkConnector.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ErrantRecordSinkConnector.java @@ -30,14 +30,14 @@ import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; -public class ErrantRecordSinkConnector extends MonitorableSinkConnector { +public class ErrantRecordSinkConnector extends TestableSinkConnector { @Override public Class taskClass() { return ErrantRecordSinkTask.class; } - public static class ErrantRecordSinkTask extends MonitorableSinkTask { + public static class ErrantRecordSinkTask extends TestableSinkTask { private ErrantRecordReporter reporter; private ExecutorService executorService; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ErrorHandlingIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ErrorHandlingIntegrationTest.java index 256629f4b11b..49021db20d4e 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ErrorHandlingIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ErrorHandlingIntegrationTest.java @@ -111,7 +111,7 @@ public void testSkipRetryAndDLQWithHeaders() throws Exception { // setup connector config Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, MonitorableSinkConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, TestableSinkConnector.class.getSimpleName()); props.put(TASKS_MAX_CONFIG, String.valueOf(NUM_TASKS)); props.put(TOPICS_CONFIG, "test-topic"); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ExactlyOnceSourceIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ExactlyOnceSourceIntegrationTest.java index 6bb12e8f1784..52d5c634bb58 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ExactlyOnceSourceIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ExactlyOnceSourceIntegrationTest.java @@ -84,11 +84,11 @@ import static org.apache.kafka.clients.producer.ProducerConfig.CLIENT_ID_CONFIG; import static org.apache.kafka.clients.producer.ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG; import static org.apache.kafka.clients.producer.ProducerConfig.TRANSACTIONAL_ID_CONFIG; -import static org.apache.kafka.connect.integration.MonitorableSourceConnector.CUSTOM_EXACTLY_ONCE_SUPPORT_CONFIG; -import static org.apache.kafka.connect.integration.MonitorableSourceConnector.CUSTOM_TRANSACTION_BOUNDARIES_CONFIG; -import static org.apache.kafka.connect.integration.MonitorableSourceConnector.MAX_MESSAGES_PER_SECOND_CONFIG; -import static org.apache.kafka.connect.integration.MonitorableSourceConnector.MESSAGES_PER_POLL_CONFIG; -import static org.apache.kafka.connect.integration.MonitorableSourceConnector.TOPIC_CONFIG; +import static org.apache.kafka.connect.integration.TestableSourceConnector.CUSTOM_EXACTLY_ONCE_SUPPORT_CONFIG; +import static org.apache.kafka.connect.integration.TestableSourceConnector.CUSTOM_TRANSACTION_BOUNDARIES_CONFIG; +import static org.apache.kafka.connect.integration.TestableSourceConnector.MAX_MESSAGES_PER_SECOND_CONFIG; +import static org.apache.kafka.connect.integration.TestableSourceConnector.MESSAGES_PER_POLL_CONFIG; +import static org.apache.kafka.connect.integration.TestableSourceConnector.TOPIC_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLIENT_ADMIN_OVERRIDES_PREFIX; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLIENT_CONSUMER_OVERRIDES_PREFIX; @@ -183,7 +183,7 @@ public void testPreflightValidation() { startConnect(); Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getName()); + props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getName()); props.put(TASKS_MAX_CONFIG, "1"); props.put(TOPIC_CONFIG, "topic"); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); @@ -194,8 +194,8 @@ public void testPreflightValidation() { props.put(EXACTLY_ONCE_SUPPORT_CONFIG, "required"); // Connector will return null from SourceConnector::exactlyOnceSupport - props.put(CUSTOM_EXACTLY_ONCE_SUPPORT_CONFIG, MonitorableSourceConnector.EXACTLY_ONCE_NULL); - ConfigInfos validation = connect.validateConnectorConfig(MonitorableSourceConnector.class.getSimpleName(), props); + props.put(CUSTOM_EXACTLY_ONCE_SUPPORT_CONFIG, TestableSourceConnector.EXACTLY_ONCE_NULL); + ConfigInfos validation = connect.validateConnectorConfig(TestableSourceConnector.class.getSimpleName(), props); assertEquals(1, validation.errorCount(), "Preflight validation should have exactly one error"); ConfigInfo propertyValidation = findConfigInfo(EXACTLY_ONCE_SUPPORT_CONFIG, validation); @@ -203,56 +203,56 @@ public void testPreflightValidation() { "Preflight validation for exactly-once support property should have at least one error message"); // Connector will return UNSUPPORTED from SourceConnector::exactlyOnceSupport - props.put(CUSTOM_EXACTLY_ONCE_SUPPORT_CONFIG, MonitorableSourceConnector.EXACTLY_ONCE_UNSUPPORTED); - validation = connect.validateConnectorConfig(MonitorableSourceConnector.class.getSimpleName(), props); + props.put(CUSTOM_EXACTLY_ONCE_SUPPORT_CONFIG, TestableSourceConnector.EXACTLY_ONCE_UNSUPPORTED); + validation = connect.validateConnectorConfig(TestableSourceConnector.class.getSimpleName(), props); assertEquals(1, validation.errorCount(), "Preflight validation should have exactly one error"); propertyValidation = findConfigInfo(EXACTLY_ONCE_SUPPORT_CONFIG, validation); assertFalse(propertyValidation.configValue().errors().isEmpty(), "Preflight validation for exactly-once support property should have at least one error message"); // Connector will throw an exception from SourceConnector::exactlyOnceSupport - props.put(CUSTOM_EXACTLY_ONCE_SUPPORT_CONFIG, MonitorableSourceConnector.EXACTLY_ONCE_FAIL); - validation = connect.validateConnectorConfig(MonitorableSourceConnector.class.getSimpleName(), props); + props.put(CUSTOM_EXACTLY_ONCE_SUPPORT_CONFIG, TestableSourceConnector.EXACTLY_ONCE_FAIL); + validation = connect.validateConnectorConfig(TestableSourceConnector.class.getSimpleName(), props); assertEquals(1, validation.errorCount(), "Preflight validation should have exactly one error"); propertyValidation = findConfigInfo(EXACTLY_ONCE_SUPPORT_CONFIG, validation); assertFalse(propertyValidation.configValue().errors().isEmpty(), "Preflight validation for exactly-once support property should have at least one error message"); // Connector will return SUPPORTED from SourceConnector::exactlyOnceSupport - props.put(CUSTOM_EXACTLY_ONCE_SUPPORT_CONFIG, MonitorableSourceConnector.EXACTLY_ONCE_SUPPORTED); - validation = connect.validateConnectorConfig(MonitorableSourceConnector.class.getSimpleName(), props); + props.put(CUSTOM_EXACTLY_ONCE_SUPPORT_CONFIG, TestableSourceConnector.EXACTLY_ONCE_SUPPORTED); + validation = connect.validateConnectorConfig(TestableSourceConnector.class.getSimpleName(), props); assertEquals(0, validation.errorCount(), "Preflight validation should have zero errors"); // Test out the transaction boundary definition property props.put(TRANSACTION_BOUNDARY_CONFIG, CONNECTOR.toString()); // Connector will return null from SourceConnector::canDefineTransactionBoundaries - props.put(CUSTOM_TRANSACTION_BOUNDARIES_CONFIG, MonitorableSourceConnector.TRANSACTION_BOUNDARIES_NULL); - validation = connect.validateConnectorConfig(MonitorableSourceConnector.class.getSimpleName(), props); + props.put(CUSTOM_TRANSACTION_BOUNDARIES_CONFIG, TestableSourceConnector.TRANSACTION_BOUNDARIES_NULL); + validation = connect.validateConnectorConfig(TestableSourceConnector.class.getSimpleName(), props); assertEquals(1, validation.errorCount(), "Preflight validation should have exactly one error"); propertyValidation = findConfigInfo(TRANSACTION_BOUNDARY_CONFIG, validation); assertFalse(propertyValidation.configValue().errors().isEmpty(), "Preflight validation for transaction boundary property should have at least one error message"); // Connector will return UNSUPPORTED from SourceConnector::canDefineTransactionBoundaries - props.put(CUSTOM_TRANSACTION_BOUNDARIES_CONFIG, MonitorableSourceConnector.TRANSACTION_BOUNDARIES_UNSUPPORTED); - validation = connect.validateConnectorConfig(MonitorableSourceConnector.class.getSimpleName(), props); + props.put(CUSTOM_TRANSACTION_BOUNDARIES_CONFIG, TestableSourceConnector.TRANSACTION_BOUNDARIES_UNSUPPORTED); + validation = connect.validateConnectorConfig(TestableSourceConnector.class.getSimpleName(), props); assertEquals(1, validation.errorCount(), "Preflight validation should have exactly one error"); propertyValidation = findConfigInfo(TRANSACTION_BOUNDARY_CONFIG, validation); assertFalse(propertyValidation.configValue().errors().isEmpty(), "Preflight validation for transaction boundary property should have at least one error message"); // Connector will throw an exception from SourceConnector::canDefineTransactionBoundaries - props.put(CUSTOM_TRANSACTION_BOUNDARIES_CONFIG, MonitorableSourceConnector.TRANSACTION_BOUNDARIES_FAIL); - validation = connect.validateConnectorConfig(MonitorableSourceConnector.class.getSimpleName(), props); + props.put(CUSTOM_TRANSACTION_BOUNDARIES_CONFIG, TestableSourceConnector.TRANSACTION_BOUNDARIES_FAIL); + validation = connect.validateConnectorConfig(TestableSourceConnector.class.getSimpleName(), props); assertEquals(1, validation.errorCount(), "Preflight validation should have exactly one error"); propertyValidation = findConfigInfo(TRANSACTION_BOUNDARY_CONFIG, validation); assertFalse(propertyValidation.configValue().errors().isEmpty(), "Preflight validation for transaction boundary property should have at least one error message"); // Connector will return SUPPORTED from SourceConnector::canDefineTransactionBoundaries - props.put(CUSTOM_TRANSACTION_BOUNDARIES_CONFIG, MonitorableSourceConnector.TRANSACTION_BOUNDARIES_SUPPORTED); - validation = connect.validateConnectorConfig(MonitorableSourceConnector.class.getSimpleName(), props); + props.put(CUSTOM_TRANSACTION_BOUNDARIES_CONFIG, TestableSourceConnector.TRANSACTION_BOUNDARIES_SUPPORTED); + validation = connect.validateConnectorConfig(TestableSourceConnector.class.getSimpleName(), props); assertEquals(0, validation.errorCount(), "Preflight validation should have zero errors"); } @@ -274,7 +274,7 @@ public void testPollBoundary() throws Exception { int numTasks = 1; Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getName()); + props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getName()); props.put(TASKS_MAX_CONFIG, Integer.toString(numTasks)); props.put(TOPIC_CONFIG, topic); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); @@ -333,7 +333,7 @@ public void testIntervalBoundary() throws Exception { int numTasks = 1; Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getName()); + props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getName()); props.put(TASKS_MAX_CONFIG, Integer.toString(numTasks)); props.put(TOPIC_CONFIG, topic); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); @@ -393,14 +393,14 @@ public void testConnectorBoundary() throws Exception { connect.kafka().createTopic(topic, 3); Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getName()); + props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getName()); props.put(TASKS_MAX_CONFIG, "1"); props.put(TOPIC_CONFIG, topic); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); props.put(VALUE_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); props.put(NAME_CONFIG, CONNECTOR_NAME); props.put(TRANSACTION_BOUNDARY_CONFIG, CONNECTOR.toString()); - props.put(CUSTOM_TRANSACTION_BOUNDARIES_CONFIG, MonitorableSourceConnector.TRANSACTION_BOUNDARIES_SUPPORTED); + props.put(CUSTOM_TRANSACTION_BOUNDARIES_CONFIG, TestableSourceConnector.TRANSACTION_BOUNDARIES_SUPPORTED); props.put(MESSAGES_PER_POLL_CONFIG, MESSAGES_PER_POLL); props.put(MAX_MESSAGES_PER_SECOND_CONFIG, MESSAGES_PER_SECOND); @@ -495,7 +495,7 @@ public void testFencedLeaderRecovery() throws Exception { int numTasks = 1; Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getName()); + props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getName()); props.put(TASKS_MAX_CONFIG, Integer.toString(numTasks)); props.put(TOPIC_CONFIG, topic); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); @@ -562,7 +562,7 @@ public void testConnectorReconfiguration() throws Exception { connect.kafka().createTopic(topic, 3); Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getName()); + props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getName()); props.put(TOPIC_CONFIG, topic); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); props.put(VALUE_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); @@ -669,7 +669,7 @@ public void testTasksFailOnInabilityToFence() throws Exception { Map props = new HashMap<>(); int tasksMax = 2; // Use two tasks since single-task connectors don't require zombie fencing - props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getName()); + props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getName()); props.put(TOPIC_CONFIG, topic); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); props.put(VALUE_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); @@ -824,7 +824,7 @@ public void testSeparateOffsetsTopic() throws Exception { int numTasks = 1; Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getName()); + props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getName()); props.put(TASKS_MAX_CONFIG, Integer.toString(numTasks)); props.put(TOPIC_CONFIG, topic); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); @@ -1123,7 +1123,7 @@ private Map> parseOffsetForTasks(ConsumerRecords partition = assertAndCast(key.get(1), Map.class, "Key[1]"); Object taskIdObject = partition.get("task.id"); - assertNotNull(taskIdObject, "Serialized source partition should contain 'task.id' field from MonitorableSourceConnector"); + assertNotNull(taskIdObject, "Serialized source partition should contain 'task.id' field from TestableSourceConnector"); String taskId = assertAndCast(taskIdObject, String.class, "task ID"); assertTrue(taskId.startsWith(CONNECTOR_NAME + "-"), "task ID should match pattern '-"); String taskIdRemainder = taskId.substring(CONNECTOR_NAME.length() + 1); @@ -1138,7 +1138,7 @@ private Map> parseOffsetForTasks(ConsumerRecords value = assertAndCast(valueObject, Map.class, "Value"); Object seqnoObject = value.get("saved"); - assertNotNull(seqnoObject, "Serialized source offset should contain 'seqno' field from MonitorableSourceConnector"); + assertNotNull(seqnoObject, "Serialized source offset should contain 'seqno' field from TestableSourceConnector"); long seqno = assertAndCast(seqnoObject, Long.class, "Seqno offset field"); result.computeIfAbsent(taskNum, t -> new ArrayList<>()).add(seqno); @@ -1163,7 +1163,7 @@ private static T assertAndCast(Object o, Class klass, String objectDescri private StartAndStopLatch connectorAndTaskStart(int numTasks) { connectorHandle.clearTasks(); IntStream.range(0, numTasks) - .mapToObj(i -> MonitorableSourceConnector.taskId(CONNECTOR_NAME, i)) + .mapToObj(i -> TestableSourceConnector.taskId(CONNECTOR_NAME, i)) .forEach(connectorHandle::taskHandle); return connectorHandle.expectedStarts(1, true); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ExampleConnectIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ExampleConnectIntegrationTest.java index d131fd4efc63..6263c8ab96cc 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ExampleConnectIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ExampleConnectIntegrationTest.java @@ -63,8 +63,8 @@ public class ExampleConnectIntegrationTest { private static final int NUM_TASKS = 3; private static final int NUM_WORKERS = 3; private static final String CONNECTOR_NAME = "simple-conn"; - private static final String SINK_CONNECTOR_CLASS_NAME = MonitorableSinkConnector.class.getSimpleName(); - private static final String SOURCE_CONNECTOR_CLASS_NAME = MonitorableSourceConnector.class.getSimpleName(); + private static final String SINK_CONNECTOR_CLASS_NAME = TestableSinkConnector.class.getSimpleName(); + private static final String SOURCE_CONNECTOR_CLASS_NAME = TestableSourceConnector.class.getSimpleName(); private EmbeddedConnectCluster connect; private ConnectorHandle connectorHandle; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/MonitorableSinkConnector.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/MonitorableSinkConnector.java index 1084ddf6732c..c181c6aaa3fc 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/MonitorableSinkConnector.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/MonitorableSinkConnector.java @@ -16,48 +16,28 @@ */ package org.apache.kafka.connect.integration; -import org.apache.kafka.clients.consumer.OffsetAndMetadata; -import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.config.ConfigDef; +import org.apache.kafka.common.MetricName; +import org.apache.kafka.common.metrics.Gauge; +import org.apache.kafka.common.metrics.Measurable; +import org.apache.kafka.common.metrics.PluginMetrics; import org.apache.kafka.connect.connector.Task; -import org.apache.kafka.connect.runtime.SampleSinkConnector; import org.apache.kafka.connect.sink.SinkRecord; -import org.apache.kafka.connect.sink.SinkTask; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; import java.util.Collection; -import java.util.HashMap; -import java.util.List; +import java.util.Collections; import java.util.Map; -/** - * A sink connector that is used in Apache Kafka integration tests to verify the behavior of the - * Connect framework, but that can be used in other integration tests as a simple connector that - * consumes and counts records. This class provides methods to find task instances - * which are initiated by the embedded connector, and wait for them to consume a desired number of - * messages. - */ -public class MonitorableSinkConnector extends SampleSinkConnector { - - private static final Logger log = LoggerFactory.getLogger(MonitorableSinkConnector.class); +public class MonitorableSinkConnector extends TestableSinkConnector { - // Boolean valued configuration that determines whether MonitorableSinkConnector::alterOffsets should return true or false - public static final String ALTER_OFFSETS_RESULT = "alter.offsets.result"; - - private String connectorName; - private Map commonConfigs; - private ConnectorHandle connectorHandle; + public static MetricName metricsName = null; + public static final String VALUE = "started"; @Override public void start(Map props) { - connectorHandle = RuntimeHandles.get().connectorHandle(props.get("name")); - connectorName = props.get("name"); - commonConfigs = props; - log.info("Starting connector {}", props.get("name")); - connectorHandle.recordConnectorStart(); + super.start(props); + PluginMetrics pluginMetrics = context.pluginMetrics(); + metricsName = pluginMetrics.metricName("start", "description", Collections.emptyMap()); + pluginMetrics.addMetric(metricsName, (Gauge) (config, now) -> VALUE); } @Override @@ -65,103 +45,26 @@ public Class taskClass() { return MonitorableSinkTask.class; } - @Override - public List> taskConfigs(int maxTasks) { - List> configs = new ArrayList<>(); - for (int i = 0; i < maxTasks; i++) { - Map config = new HashMap<>(commonConfigs); - config.put("connector.name", connectorName); - config.put("task.id", connectorName + "-" + i); - configs.add(config); - } - return configs; - } - - @Override - public void stop() { - log.info("Stopped {} connector {}", this.getClass().getSimpleName(), connectorName); - connectorHandle.recordConnectorStop(); - } - - @Override - public ConfigDef config() { - return new ConfigDef(); - } - - @Override - public boolean alterOffsets(Map connectorConfig, Map offsets) { - return Boolean.parseBoolean(connectorConfig.get(ALTER_OFFSETS_RESULT)); - } - - public static class MonitorableSinkTask extends SinkTask { + public static class MonitorableSinkTask extends TestableSinkTask { - private String taskId; - TaskHandle taskHandle; - Map committedOffsets; - Map> cachedTopicPartitions; - - public MonitorableSinkTask() { - this.committedOffsets = new HashMap<>(); - this.cachedTopicPartitions = new HashMap<>(); - } - - @Override - public String version() { - return "unknown"; - } + public static MetricName metricsName = null; + private int count = 0; @Override public void start(Map props) { - taskId = props.get("task.id"); - String connectorName = props.get("connector.name"); - taskHandle = RuntimeHandles.get().connectorHandle(connectorName).taskHandle(taskId); - log.debug("Starting task {}", taskId); - taskHandle.recordTaskStart(); - } - - @Override - public void open(Collection partitions) { - log.debug("Opening partitions {}", partitions); - taskHandle.partitionsAssigned(partitions); - } - - @Override - public void close(Collection partitions) { - log.debug("Closing partitions {}", partitions); - taskHandle.partitionsRevoked(partitions); - partitions.forEach(committedOffsets::remove); + super.start(props); + PluginMetrics pluginMetrics = context.pluginMetrics(); + metricsName = pluginMetrics.metricName("put", "description", Collections.emptyMap()); + pluginMetrics.addMetric(metricsName, (Measurable) (config, now) -> count); } @Override public void put(Collection records) { for (SinkRecord rec : records) { - taskHandle.record(rec); - TopicPartition tp = cachedTopicPartitions - .computeIfAbsent(rec.topic(), v -> new HashMap<>()) - .computeIfAbsent(rec.kafkaPartition(), v -> new TopicPartition(rec.topic(), rec.kafkaPartition())); - committedOffsets.put(tp, committedOffsets.getOrDefault(tp, 0) + 1); - log.trace("Task {} obtained record (key='{}' value='{}')", taskId, rec.key(), rec.value()); + taskHandle.record(); + count++; } } - @Override - public Map preCommit(Map offsets) { - taskHandle.partitionsCommitted(offsets.keySet()); - offsets.forEach((tp, offset) -> { - int recordsSinceLastCommit = committedOffsets.getOrDefault(tp, 0); - if (recordsSinceLastCommit != 0) { - taskHandle.commit(recordsSinceLastCommit); - log.debug("Forwarding to framework request to commit {} records for {}", recordsSinceLastCommit, tp); - committedOffsets.put(tp, 0); - } - }); - return offsets; - } - - @Override - public void stop() { - log.info("Stopped {} task {}", this.getClass().getSimpleName(), taskId); - taskHandle.recordTaskStop(); - } } } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/MonitorableSinkIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/MonitorableSinkIntegrationTest.java new file mode 100644 index 000000000000..082359dea393 --- /dev/null +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/MonitorableSinkIntegrationTest.java @@ -0,0 +1,159 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.connect.integration; + +import org.apache.kafka.common.MetricName; +import org.apache.kafka.common.metrics.KafkaMetric; +import org.apache.kafka.connect.runtime.rest.entities.ConnectorStateInfo; +import org.apache.kafka.connect.storage.StringConverter; +import org.apache.kafka.connect.util.clusters.EmbeddedConnectStandalone; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; +import static org.apache.kafka.connect.runtime.ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG; +import static org.apache.kafka.connect.runtime.ConnectorConfig.TASKS_MAX_CONFIG; +import static org.apache.kafka.connect.runtime.ConnectorConfig.VALUE_CONVERTER_CLASS_CONFIG; +import static org.apache.kafka.connect.runtime.SinkConnectorConfig.TOPICS_CONFIG; +import static org.apache.kafka.test.TestUtils.waitForCondition; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * Integration test for a sink connector defining metrics via the PluginMetrics API + */ +@Tag("integration") +@Timeout(value = 600) +public class MonitorableSinkIntegrationTest { + + private static final Logger log = LoggerFactory.getLogger(MonitorableSinkIntegrationTest.class); + + private static final String CONNECTOR_NAME = "monitorable-sink"; + private static final String TASK_ID = CONNECTOR_NAME + "-0"; + private static final int NUM_RECORDS_PRODUCED = 1000; + private static final int NUM_TASKS = 1; + private static final long CONNECTOR_SETUP_DURATION_MS = TimeUnit.SECONDS.toMillis(60); + private static final long CONSUME_MAX_DURATION_MS = TimeUnit.SECONDS.toMillis(30); + + private EmbeddedConnectStandalone connect; + private ConnectorHandle connectorHandle; + + @BeforeEach + public void setup() throws InterruptedException { + // setup Connect cluster with defaults + connect = new EmbeddedConnectStandalone.Builder().build(); + + // start Connect cluster + connect.start(); + + // get connector handles before starting test. + connectorHandle = RuntimeHandles.get().connectorHandle(CONNECTOR_NAME); + } + + @AfterEach + public void close() { + connect.stop(); + } + + @Test + public void testMonitorableSinkConnectorAndTask() throws Exception { + // create test topic + connect.kafka().createTopic("test-topic"); + + Map props = new HashMap<>(); + props.put(CONNECTOR_CLASS_CONFIG, MonitorableSinkConnector.class.getSimpleName()); + props.put(TASKS_MAX_CONFIG, "1"); + props.put(TOPICS_CONFIG, "test-topic"); + props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); + props.put(VALUE_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); + + // set expected records to successfully reach the task + connectorHandle.taskHandle(TASK_ID).expectedRecords(NUM_RECORDS_PRODUCED); + + connect.configureConnector(CONNECTOR_NAME, props); + connect.assertions().assertConnectorAndAtLeastNumTasksAreRunning(CONNECTOR_NAME, NUM_TASKS, + "Connector tasks did not start in time."); + + waitForCondition(this::checkForPartitionAssignment, + CONNECTOR_SETUP_DURATION_MS, + "Connector task was not assigned a partition."); + + // check connector metric + Map metrics = connect.herder().worker().metrics().metrics().metrics(); + MetricName connectorMetric = MonitorableSinkConnector.metricsName; + assertTrue(metrics.containsKey(connectorMetric)); + assertEquals(CONNECTOR_NAME, connectorMetric.tags().get("connector")); + KafkaMetric kafkaMetric = metrics.get(connectorMetric); + assertEquals(MonitorableSinkConnector.VALUE, kafkaMetric.metricValue()); + + // produce some strings into test topic + for (int i = 0; i < NUM_RECORDS_PRODUCED; i++) { + connect.kafka().produce("test-topic", "key-" + i, "value-" + i); + } + + // wait for records to reach the task + connectorHandle.taskHandle(TASK_ID).awaitRecords(CONSUME_MAX_DURATION_MS); + + // check tasks metric + metrics = connect.herder().worker().metrics().metrics().metrics(); + MetricName taskMetric = MonitorableSinkConnector.MonitorableSinkTask.metricsName; + assertTrue(metrics.containsKey(taskMetric)); + assertEquals(CONNECTOR_NAME, taskMetric.tags().get("connector")); + assertEquals("0", taskMetric.tags().get("task")); + assertEquals((double) NUM_RECORDS_PRODUCED, metrics.get(taskMetric).metricValue()); + + connect.deleteConnector(CONNECTOR_NAME); + connect.assertions().assertConnectorDoesNotExist(CONNECTOR_NAME, + "Connector wasn't deleted in time."); + + // verify metrics have been deleted + metrics = connect.herder().worker().metrics().metrics().metrics(); + assertFalse(metrics.containsKey(connectorMetric)); + assertFalse(metrics.containsKey(taskMetric)); + } + + /** + * Check if a partition was assigned to each task. This method swallows exceptions since it is invoked from a + * {@link org.apache.kafka.test.TestUtils#waitForCondition} that will throw an error if this method continued + * to return false after the specified duration has elapsed. + * + * @return true if each task was assigned a partition each, false if this was not true or an error occurred when + * executing this operation. + */ + private boolean checkForPartitionAssignment() { + try { + ConnectorStateInfo info = connect.connectorStatus(CONNECTOR_NAME); + return info != null && info.tasks().size() == NUM_TASKS + && connectorHandle.taskHandle(TASK_ID).numPartitionsAssigned() == 1; + } catch (Exception e) { + // Log the exception and return that the partitions were not assigned + log.error("Could not check connector state info.", e); + return false; + } + } +} diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/MonitorableSourceConnector.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/MonitorableSourceConnector.java index 7387c81c599b..1119e7984ddb 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/MonitorableSourceConnector.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/MonitorableSourceConnector.java @@ -16,76 +16,34 @@ */ package org.apache.kafka.connect.integration; -import org.apache.kafka.clients.producer.RecordMetadata; -import org.apache.kafka.common.config.ConfigDef; +import org.apache.kafka.common.MetricName; +import org.apache.kafka.common.metrics.Gauge; +import org.apache.kafka.common.metrics.Measurable; +import org.apache.kafka.common.metrics.PluginMetrics; import org.apache.kafka.connect.connector.Task; -import org.apache.kafka.connect.data.Schema; -import org.apache.kafka.connect.errors.ConnectException; -import org.apache.kafka.connect.header.ConnectHeaders; -import org.apache.kafka.connect.runtime.SampleSourceConnector; -import org.apache.kafka.connect.source.ConnectorTransactionBoundaries; -import org.apache.kafka.connect.source.ExactlyOnceSupport; import org.apache.kafka.connect.source.SourceRecord; -import org.apache.kafka.connect.source.SourceTask; -import org.apache.kafka.server.util.ThroughputThrottler; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.ArrayList; import java.util.Collections; -import java.util.HashMap; import java.util.List; -import java.util.Locale; import java.util.Map; -import java.util.Optional; -import java.util.stream.Collectors; -import java.util.stream.LongStream; -/** - * A source connector that is used in Apache Kafka integration tests to verify the behavior of - * the Connect framework, but that can be used in other integration tests as a simple connector - * that generates records of a fixed structure. The rate of record production can be adjusted - * through the configs 'throughput' and 'messages.per.poll' - */ -public class MonitorableSourceConnector extends SampleSourceConnector { - private static final Logger log = LoggerFactory.getLogger(MonitorableSourceConnector.class); - - public static final String TOPIC_CONFIG = "topic"; - public static final String NUM_TASKS = "num.tasks"; - public static final String MESSAGES_PER_POLL_CONFIG = "messages.per.poll"; - public static final String MAX_MESSAGES_PER_SECOND_CONFIG = "throughput"; - public static final String MAX_MESSAGES_PRODUCED_CONFIG = "max.messages"; - - public static final String CUSTOM_EXACTLY_ONCE_SUPPORT_CONFIG = "custom.exactly.once.support"; - public static final String EXACTLY_ONCE_SUPPORTED = "supported"; - public static final String EXACTLY_ONCE_UNSUPPORTED = "unsupported"; - public static final String EXACTLY_ONCE_NULL = "null"; - public static final String EXACTLY_ONCE_FAIL = "fail"; +public class MonitorableSourceConnector extends TestableSourceConnector { - public static final String CUSTOM_TRANSACTION_BOUNDARIES_CONFIG = "custom.transaction.boundaries"; - public static final String TRANSACTION_BOUNDARIES_SUPPORTED = "supported"; - public static final String TRANSACTION_BOUNDARIES_UNSUPPORTED = "unsupported"; - public static final String TRANSACTION_BOUNDARIES_NULL = "null"; - public static final String TRANSACTION_BOUNDARIES_FAIL = "fail"; - - // Boolean valued configuration that determines whether MonitorableSourceConnector::alterOffsets should return true or false - public static final String ALTER_OFFSETS_RESULT = "alter.offsets.result"; + private static final Logger log = LoggerFactory.getLogger(MonitorableSourceConnector.class); - private String connectorName; - private ConnectorHandle connectorHandle; - private Map commonConfigs; + public static MetricName metricsName = null; + public static final String VALUE = "started"; @Override public void start(Map props) { - connectorHandle = RuntimeHandles.get().connectorHandle(props.get("name")); - connectorName = connectorHandle.name(); - commonConfigs = props; - log.info("Started {} connector {}", this.getClass().getSimpleName(), connectorName); - connectorHandle.recordConnectorStart(); - if (Boolean.parseBoolean(props.getOrDefault("connector.start.inject.error", "false"))) { - throw new RuntimeException("Injecting errors during connector start"); - } + super.start(props); + log.info("starting MonitorableSourceConnector"); + PluginMetrics pluginMetrics = context.pluginMetrics(); + metricsName = pluginMetrics.metricName("start", "description", Collections.emptyMap()); + pluginMetrics.addMetric(metricsName, (Gauge) (config, now) -> VALUE); } @Override @@ -93,222 +51,34 @@ public Class taskClass() { return MonitorableSourceTask.class; } - @Override - public List> taskConfigs(int maxTasks) { - String numTasksProp = commonConfigs.get(NUM_TASKS); - int numTasks = numTasksProp != null ? Integer.parseInt(numTasksProp) : maxTasks; - List> configs = new ArrayList<>(); - for (int i = 0; i < numTasks; i++) { - Map config = taskConfig(commonConfigs, connectorName, i); - configs.add(config); - } - return configs; - } - - public static Map taskConfig( - Map connectorProps, - String connectorName, - int taskNum - ) { - Map result = new HashMap<>(connectorProps); - result.put("connector.name", connectorName); - result.put("task.id", taskId(connectorName, taskNum)); - return result; - } - - @Override - public void stop() { - log.info("Stopped {} connector {}", this.getClass().getSimpleName(), connectorName); - connectorHandle.recordConnectorStop(); - if (Boolean.parseBoolean(commonConfigs.getOrDefault("connector.stop.inject.error", "false"))) { - throw new RuntimeException("Injecting errors during connector stop"); - } - } - - @Override - public ConfigDef config() { - log.info("Configured {} connector {}", this.getClass().getSimpleName(), connectorName); - return new ConfigDef(); - } - - @Override - public ExactlyOnceSupport exactlyOnceSupport(Map connectorConfig) { - String supportLevel = connectorConfig.getOrDefault(CUSTOM_EXACTLY_ONCE_SUPPORT_CONFIG, "null").toLowerCase(Locale.ROOT); - switch (supportLevel) { - case EXACTLY_ONCE_SUPPORTED: - return ExactlyOnceSupport.SUPPORTED; - case EXACTLY_ONCE_UNSUPPORTED: - return ExactlyOnceSupport.UNSUPPORTED; - case EXACTLY_ONCE_FAIL: - throw new ConnectException("oops"); - default: - case EXACTLY_ONCE_NULL: - return null; - } - } - - @Override - public ConnectorTransactionBoundaries canDefineTransactionBoundaries(Map connectorConfig) { - String supportLevel = connectorConfig.getOrDefault(CUSTOM_TRANSACTION_BOUNDARIES_CONFIG, TRANSACTION_BOUNDARIES_UNSUPPORTED).toLowerCase(Locale.ROOT); - switch (supportLevel) { - case TRANSACTION_BOUNDARIES_SUPPORTED: - return ConnectorTransactionBoundaries.SUPPORTED; - case TRANSACTION_BOUNDARIES_FAIL: - throw new ConnectException("oh no :("); - case TRANSACTION_BOUNDARIES_NULL: - return null; - default: - case TRANSACTION_BOUNDARIES_UNSUPPORTED: - return ConnectorTransactionBoundaries.UNSUPPORTED; - } - } - - @Override - public boolean alterOffsets(Map connectorConfig, Map, Map> offsets) { - return Boolean.parseBoolean(connectorConfig.get(ALTER_OFFSETS_RESULT)); - } - - public static String taskId(String connectorName, int taskId) { - return connectorName + "-" + taskId; - } - - public static class MonitorableSourceTask extends SourceTask { - private String taskId; - private String topicName; - private TaskHandle taskHandle; - private volatile boolean stopped; - private long startingSeqno; - private long seqno; - private int batchSize; - private ThroughputThrottler throttler; - private long maxMessages; + public static class MonitorableSourceTask extends TestableSourceTask { - private long priorTransactionBoundary; - private long nextTransactionBoundary; - - @Override - public String version() { - return "unknown"; - } + public static MetricName metricsName = null; + private int count = 0; @Override public void start(Map props) { - taskId = props.get("task.id"); - String connectorName = props.get("connector.name"); - topicName = props.getOrDefault(TOPIC_CONFIG, "sequential-topic"); - batchSize = Integer.parseInt(props.getOrDefault(MESSAGES_PER_POLL_CONFIG, "1")); - taskHandle = RuntimeHandles.get().connectorHandle(connectorName).taskHandle(taskId); - Map offset = Optional.ofNullable( - context.offsetStorageReader().offset(sourcePartition(taskId))) - .orElse(Collections.emptyMap()); - startingSeqno = Optional.ofNullable((Long) offset.get("saved")).orElse(0L); - seqno = startingSeqno; - log.info("Started {} task {} with properties {}", this.getClass().getSimpleName(), taskId, props); - throttler = new ThroughputThrottler(Long.parseLong(props.getOrDefault(MAX_MESSAGES_PER_SECOND_CONFIG, "-1")), System.currentTimeMillis()); - maxMessages = Long.parseLong(props.getOrDefault(MAX_MESSAGES_PRODUCED_CONFIG, String.valueOf(Long.MAX_VALUE))); - taskHandle.recordTaskStart(); - priorTransactionBoundary = 0; - nextTransactionBoundary = 1; - if (Boolean.parseBoolean(props.getOrDefault("task-" + taskId + ".start.inject.error", "false"))) { - throw new RuntimeException("Injecting errors during task start"); - } - calculateNextBoundary(); + super.start(props); + log.info("starting MonitorableSourceTask"); + PluginMetrics pluginMetrics = context.pluginMetrics(); + log.info("task pluginMetric {}", pluginMetrics.hashCode()); + metricsName = pluginMetrics.metricName("poll", "description", Collections.emptyMap()); + pluginMetrics.addMetric(metricsName, (Measurable) (config, now) -> count); } @Override public List poll() { - if (!stopped) { - // Don't return any more records since we've already produced the configured maximum number. - if (seqno >= maxMessages) { - return null; + List records = super.poll(); + if (records != null) { + log.info("poll with {} records", records.size()); + for (SourceRecord record : records) { + count++; } - if (throttler.shouldThrottle(seqno - startingSeqno, System.currentTimeMillis())) { - throttler.throttle(); - } - int currentBatchSize = (int) Math.min(maxMessages - seqno, batchSize); - taskHandle.record(currentBatchSize); - log.trace("Returning batch of {} records", currentBatchSize); - return LongStream.range(0, currentBatchSize) - .mapToObj(i -> { - seqno++; - SourceRecord record = new SourceRecord( - sourcePartition(taskId), - sourceOffset(seqno), - topicName, - null, - Schema.STRING_SCHEMA, - "key-" + taskId + "-" + seqno, - Schema.STRING_SCHEMA, - "value-" + taskId + "-" + seqno, - null, - new ConnectHeaders().addLong("header-" + seqno, seqno)); - maybeDefineTransactionBoundary(record); - return record; - }) - .collect(Collectors.toList()); - } - return null; - } - - @Override - public void commit() { - log.info("Task {} committing offsets", taskId); - //TODO: save progress outside the offset topic, potentially in the task handle - } - - @Override - public void commitRecord(SourceRecord record, RecordMetadata metadata) { - log.trace("Committing record: {}", record); - taskHandle.commit(); - } - - @Override - public void stop() { - log.info("Stopped {} task {}", this.getClass().getSimpleName(), taskId); - stopped = true; - taskHandle.recordTaskStop(); - } - - /** - * Calculate the next transaction boundary, i.e., the seqno whose corresponding source record should be used to - * either {@link org.apache.kafka.connect.source.TransactionContext#commitTransaction(SourceRecord) commit} - * or {@link org.apache.kafka.connect.source.TransactionContext#abortTransaction(SourceRecord) abort} the next transaction. - *

- * This connector defines transactions whose size correspond to successive elements of the Fibonacci sequence, - * where transactions with an even number of records are aborted, and those with an odd number of records are committed. - */ - private void calculateNextBoundary() { - while (nextTransactionBoundary <= seqno) { - nextTransactionBoundary += priorTransactionBoundary; - priorTransactionBoundary = nextTransactionBoundary - priorTransactionBoundary; - } - } - - private void maybeDefineTransactionBoundary(SourceRecord record) { - if (context.transactionContext() == null || seqno != nextTransactionBoundary) { - return; - } - long transactionSize = nextTransactionBoundary - priorTransactionBoundary; - - // If the transaction boundary ends on an even-numbered offset, abort it - // Otherwise, commit - boolean abort = nextTransactionBoundary % 2 == 0; - calculateNextBoundary(); - if (abort) { - log.info("Aborting transaction of {} records", transactionSize); - context.transactionContext().abortTransaction(record); } else { - log.info("Committing transaction of {} records", transactionSize); - context.transactionContext().commitTransaction(record); + log.info("null records"); } + return records; } - } - - public static Map sourcePartition(String taskId) { - return Collections.singletonMap("task.id", taskId); - } - public static Map sourceOffset(long seqno) { - return Collections.singletonMap("saved", seqno); } } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/MonitorableSourceIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/MonitorableSourceIntegrationTest.java new file mode 100644 index 000000000000..646b29a64d78 --- /dev/null +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/MonitorableSourceIntegrationTest.java @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.connect.integration; + +import org.apache.kafka.common.MetricName; +import org.apache.kafka.common.metrics.KafkaMetric; +import org.apache.kafka.connect.storage.StringConverter; +import org.apache.kafka.connect.util.clusters.EmbeddedConnectStandalone; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.apache.kafka.connect.integration.TestableSourceConnector.MAX_MESSAGES_PER_SECOND_CONFIG; +import static org.apache.kafka.connect.integration.TestableSourceConnector.MESSAGES_PER_POLL_CONFIG; +import static org.apache.kafka.connect.integration.TestableSourceConnector.TOPIC_CONFIG; +import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; +import static org.apache.kafka.connect.runtime.ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG; +import static org.apache.kafka.connect.runtime.ConnectorConfig.TASKS_MAX_CONFIG; +import static org.apache.kafka.connect.runtime.ConnectorConfig.VALUE_CONVERTER_CLASS_CONFIG; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * Integration test for a source connector defining metrics via the PluginMetrics API + */ +@Tag("integration") +@Timeout(value = 600) +public class MonitorableSourceIntegrationTest { + + private static final String CONNECTOR_NAME = "monitorable-source"; + private static final String TASK_ID = CONNECTOR_NAME + "-0"; + + private static final int NUM_TASKS = 1; + private static final long SOURCE_TASK_PRODUCE_TIMEOUT_MS = TimeUnit.SECONDS.toMillis(30); + + // Tests require that a minimum but not unreasonably large number of records are sourced. + // Throttle the poll such that a reasonable amount of records are produced while the test runs. + private static final int MINIMUM_MESSAGES = 100; + private static final String MESSAGES_PER_POLL = Integer.toString(MINIMUM_MESSAGES); + private static final String MESSAGES_PER_SECOND = Long.toString(MINIMUM_MESSAGES / 2); + + private EmbeddedConnectStandalone connect; + private ConnectorHandle connectorHandle; + + @BeforeEach + public void setup() throws InterruptedException { + // setup Connect cluster with defaults + connect = new EmbeddedConnectStandalone.Builder().build(); + + // start Connect cluster + connect.start(); + + // get connector handles before starting test. + connectorHandle = RuntimeHandles.get().connectorHandle(CONNECTOR_NAME); + } + + @AfterEach + public void close() { + connect.stop(); + } + + @Test + public void testMonitorableSourceConnectorAndTask() throws Exception { + // create test topic + connect.kafka().createTopic("test-topic"); + + Map props = new HashMap<>(); + props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getSimpleName()); + props.put(TASKS_MAX_CONFIG, "1"); + props.put(TOPIC_CONFIG, "test-topic"); + props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); + props.put(VALUE_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); + props.put(MESSAGES_PER_POLL_CONFIG, MESSAGES_PER_POLL); + props.put(MAX_MESSAGES_PER_SECOND_CONFIG, MESSAGES_PER_SECOND); + + // set expected records to successfully reach the task + // expect all records to be consumed and committed by the task + connectorHandle.taskHandle(TASK_ID).expectedRecords(MINIMUM_MESSAGES); + connectorHandle.taskHandle(TASK_ID).expectedCommits(MINIMUM_MESSAGES); + + connect.configureConnector(CONNECTOR_NAME, props); + connect.assertions().assertConnectorAndAtLeastNumTasksAreRunning(CONNECTOR_NAME, NUM_TASKS, + "Connector tasks did not start in time."); + + // wait for the connector tasks to produce enough records + connectorHandle.taskHandle(TASK_ID).awaitRecords(SOURCE_TASK_PRODUCE_TIMEOUT_MS); + connectorHandle.taskHandle(TASK_ID).awaitCommits(TimeUnit.MINUTES.toMillis(1)); + + // check connector metric + Map metrics = connect.herder().worker().metrics().metrics().metrics(); + MetricName connectorMetric = MonitorableSourceConnector.metricsName; + assertTrue(metrics.containsKey(connectorMetric)); + assertEquals(CONNECTOR_NAME, connectorMetric.tags().get("connector")); + KafkaMetric kafkaMetric = metrics.get(connectorMetric); + assertEquals(MonitorableSourceConnector.VALUE, kafkaMetric.metricValue()); + + // check tasks metric + metrics = connect.herder().worker().metrics().metrics().metrics(); + MetricName taskMetric = MonitorableSourceConnector.MonitorableSourceTask.metricsName; + assertTrue(metrics.containsKey(taskMetric)); + assertEquals(CONNECTOR_NAME, taskMetric.tags().get("connector")); + assertEquals("0", taskMetric.tags().get("task")); + assertTrue(MINIMUM_MESSAGES <= (double) metrics.get(taskMetric).metricValue()); + + connect.deleteConnector(CONNECTOR_NAME); + connect.assertions().assertConnectorDoesNotExist(CONNECTOR_NAME, + "Connector wasn't deleted in time."); + + // verify metrics have been deleted + metrics = connect.herder().worker().metrics().metrics().metrics(); + assertFalse(metrics.containsKey(connectorMetric)); + assertFalse(metrics.containsKey(taskMetric)); + } +} diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/OffsetsApiIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/OffsetsApiIntegrationTest.java index 78c9a6140655..5793192a5d37 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/OffsetsApiIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/OffsetsApiIntegrationTest.java @@ -58,7 +58,7 @@ import javax.ws.rs.core.Response; import static javax.ws.rs.core.Response.Status.INTERNAL_SERVER_ERROR; -import static org.apache.kafka.connect.integration.MonitorableSourceConnector.TOPIC_CONFIG; +import static org.apache.kafka.connect.integration.TestableSourceConnector.TOPIC_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.TASKS_MAX_CONFIG; import static org.apache.kafka.connect.runtime.SinkConnectorConfig.TOPICS_CONFIG; @@ -277,7 +277,7 @@ private void getAndVerifySourceConnectorOffsets(Map connectorCon "Source connector offsets should reflect the expected number of records produced"); // Each task should produce more records - connectorConfigs.put(MonitorableSourceConnector.MAX_MESSAGES_PRODUCED_CONFIG, String.valueOf(2 * NUM_RECORDS_PER_PARTITION)); + connectorConfigs.put(TestableSourceConnector.MAX_MESSAGES_PRODUCED_CONFIG, String.valueOf(2 * NUM_RECORDS_PER_PARTITION)); connect.configureConnector(connectorName, connectorConfigs); verifyExpectedSourceConnectorOffsets(connectorName, NUM_TASKS, 2 * NUM_RECORDS_PER_PARTITION, @@ -300,7 +300,7 @@ public void testAlterOffsetsNonStoppedConnector() throws Exception { "Connector tasks did not start in time."); List offsets = new ArrayList<>(); - // The MonitorableSourceConnector has a source partition per task + // The TestableSourceConnector has a source partition per task for (int i = 0; i < NUM_TASKS; i++) { offsets.add( new ConnectorOffset(Collections.singletonMap("task.id", connectorName + "-" + i), @@ -415,7 +415,7 @@ private void alterAndVerifySinkConnectorOffsets(Map connectorCon "Sink connector consumer group offsets should reflect the altered offsets"); // Update the connector's configs; this time expect SinkConnector::alterOffsets to return true - connectorConfigs.put(MonitorableSinkConnector.ALTER_OFFSETS_RESULT, "true"); + connectorConfigs.put(TestableSinkConnector.ALTER_OFFSETS_RESULT, "true"); connect.configureConnector(connectorName, connectorConfigs); // Alter offsets again while the connector is still in a stopped state @@ -598,7 +598,7 @@ public void alterAndVerifySourceConnectorOffsets(Map connectorCo ); List offsetsToAlter = new ArrayList<>(); - // The MonitorableSourceConnector has a source partition per task + // The TestableSourceConnector has a source partition per task for (int i = 0; i < NUM_TASKS; i++) { offsetsToAlter.add( new ConnectorOffset(Collections.singletonMap("task.id", connectorName + "-" + i), @@ -614,12 +614,12 @@ public void alterAndVerifySourceConnectorOffsets(Map connectorCo "Source connector offsets should reflect the altered offsets"); // Update the connector's configs; this time expect SourceConnector::alterOffsets to return true - connectorConfigs.put(MonitorableSourceConnector.ALTER_OFFSETS_RESULT, "true"); + connectorConfigs.put(TestableSourceConnector.ALTER_OFFSETS_RESULT, "true"); connect.configureConnector(connectorName, connectorConfigs); // Alter offsets again while connector is in stopped state offsetsToAlter = new ArrayList<>(); - // The MonitorableSourceConnector has a source partition per task + // The TestableSourceConnector has a source partition per task for (int i = 0; i < NUM_TASKS; i++) { offsetsToAlter.add( new ConnectorOffset(Collections.singletonMap("task.id", connectorName + "-" + i), @@ -908,7 +908,7 @@ public void resetAndVerifySourceConnectorOffsets(Map connectorCo private Map baseSinkConnectorConfigs() { Map configs = new HashMap<>(); - configs.put(CONNECTOR_CLASS_CONFIG, MonitorableSinkConnector.class.getSimpleName()); + configs.put(CONNECTOR_CLASS_CONFIG, TestableSinkConnector.class.getSimpleName()); configs.put(TASKS_MAX_CONFIG, String.valueOf(NUM_TASKS)); configs.put(TOPICS_CONFIG, topic); configs.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); @@ -918,11 +918,11 @@ private Map baseSinkConnectorConfigs() { private Map baseSourceConnectorConfigs() { Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getSimpleName()); props.put(TASKS_MAX_CONFIG, String.valueOf(NUM_TASKS)); props.put(TOPIC_CONFIG, topic); - props.put(MonitorableSourceConnector.MESSAGES_PER_POLL_CONFIG, "3"); - props.put(MonitorableSourceConnector.MAX_MESSAGES_PRODUCED_CONFIG, String.valueOf(NUM_RECORDS_PER_PARTITION)); + props.put(TestableSourceConnector.MESSAGES_PER_POLL_CONFIG, "3"); + props.put(TestableSourceConnector.MAX_MESSAGES_PRODUCED_CONFIG, String.valueOf(NUM_RECORDS_PER_PARTITION)); props.put(ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); props.put(ConnectorConfig.VALUE_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); props.put(DEFAULT_TOPIC_CREATION_PREFIX + REPLICATION_FACTOR_CONFIG, "1"); @@ -1044,7 +1044,7 @@ private void verifyExpectedSinkConnectorOffsets(String connectorName, String exp * Verify whether the actual offsets for a source connector match the expected offsets. The verification is done using the * GET /connectors/{connector}/offsets REST API which is repeatedly queried until the offsets match * or the {@link #OFFSET_READ_TIMEOUT_MS timeout} is reached. Note that this assumes that the source connector is a - * {@link MonitorableSourceConnector} + * {@link TestableSourceConnector} * * @param connectorName the name of the source connector whose offsets are to be verified * @param numTasks the number of tasks for the source connector @@ -1057,7 +1057,7 @@ private void verifyExpectedSourceConnectorOffsets(String connectorName, int numT int expectedOffset, String conditionDetails) throws InterruptedException { waitForCondition(() -> { ConnectorOffsets offsets = connect.connectorOffsets(connectorName); - // The MonitorableSourceConnector has a source partition per task + // The TestableSourceConnector has a source partition per task if (offsets.offsets().size() != numTasks) { return false; } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RebalanceSourceConnectorsIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RebalanceSourceConnectorsIntegrationTest.java index ff028928c257..b1e6db89b01e 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RebalanceSourceConnectorsIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RebalanceSourceConnectorsIntegrationTest.java @@ -38,7 +38,7 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; -import static org.apache.kafka.connect.integration.MonitorableSourceConnector.TOPIC_CONFIG; +import static org.apache.kafka.connect.integration.TestableSourceConnector.TOPIC_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.TASKS_MAX_CONFIG; @@ -298,7 +298,7 @@ public void testMultipleWorkersRejoining() throws Exception { private Map defaultSourceConnectorProps(String topic) { // setup up props for the source connector Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getSimpleName()); props.put(TASKS_MAX_CONFIG, String.valueOf(NUM_TASKS)); props.put(TOPIC_CONFIG, topic); props.put("throughput", String.valueOf(10)); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RestExtensionIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RestExtensionIntegrationTest.java index 8ccc31baa86c..e43eb5558c68 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RestExtensionIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RestExtensionIntegrationTest.java @@ -92,7 +92,7 @@ public void testRestExtensionApi() throws InterruptedException { try { // setup up props for the connector Map connectorProps = new HashMap<>(); - connectorProps.put(CONNECTOR_CLASS_CONFIG, MonitorableSinkConnector.class.getSimpleName()); + connectorProps.put(CONNECTOR_CLASS_CONFIG, TestableSinkConnector.class.getSimpleName()); connectorProps.put(TASKS_MAX_CONFIG, String.valueOf(1)); connectorProps.put(TOPICS_CONFIG, "test-topic"); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RestForwardingIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RestForwardingIntegrationTest.java index 81b18d03442f..325ace726c0d 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RestForwardingIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RestForwardingIntegrationTest.java @@ -21,6 +21,8 @@ import org.apache.kafka.common.network.ConnectionMode; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.connect.runtime.Herder; +import org.apache.kafka.connect.runtime.MockConnectMetrics; +import org.apache.kafka.connect.runtime.Worker; import org.apache.kafka.connect.runtime.WorkerConfig; import org.apache.kafka.connect.runtime.distributed.DistributedConfig; import org.apache.kafka.connect.runtime.distributed.NotLeaderException; @@ -75,6 +77,7 @@ import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.isNull; import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.when; @ExtendWith(MockitoExtension.class) @@ -92,6 +95,8 @@ public class RestForwardingIntegrationTest { private ConnectRestServer leaderServer; @Mock private Herder leaderHerder; + @Mock + private Worker worker; private SslContextFactory.Client factory; private CloseableHttpClient httpClient; @@ -167,6 +172,8 @@ public void testRestForwardToLeader(boolean dualListener, boolean followerSsl, b followerServer = new ConnectRestServer(null, followerClient, followerConfig.originals()); followerServer.initializeServer(); when(followerHerder.plugins()).thenReturn(plugins); + doReturn(new MockConnectMetrics()).when(worker).metrics(); + doReturn(worker).when(followerHerder).worker(); followerServer.initializeResources(followerHerder); // Leader worker setup @@ -174,6 +181,8 @@ public void testRestForwardToLeader(boolean dualListener, boolean followerSsl, b leaderServer = new ConnectRestServer(null, leaderClient, leaderConfig.originals()); leaderServer.initializeServer(); when(leaderHerder.plugins()).thenReturn(plugins); + doReturn(new MockConnectMetrics()).when(worker).metrics(); + doReturn(worker).when(leaderHerder).worker(); leaderServer.initializeResources(leaderHerder); // External client setup diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SessionedProtocolIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SessionedProtocolIntegrationTest.java index 7969471918e1..febf7dcdfea4 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SessionedProtocolIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SessionedProtocolIntegrationTest.java @@ -122,7 +122,7 @@ public void ensureInternalEndpointIsSecured() throws Throwable { // Create the connector now // setup up props for the sink connector Map connectorProps = new HashMap<>(); - connectorProps.put(CONNECTOR_CLASS_CONFIG, MonitorableSinkConnector.class.getSimpleName()); + connectorProps.put(CONNECTOR_CLASS_CONFIG, TestableSinkConnector.class.getSimpleName()); connectorProps.put(TASKS_MAX_CONFIG, String.valueOf(1)); connectorProps.put(TOPICS_CONFIG, "test-topic"); connectorProps.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SinkConnectorsIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SinkConnectorsIntegrationTest.java index 961eeb70f995..8c538ee3fc50 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SinkConnectorsIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SinkConnectorsIntegrationTest.java @@ -309,7 +309,7 @@ public void testCooperativeConsumerPartitionAssignment() throws Exception { private Map baseSinkConnectorProps(String topics) { Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, MonitorableSinkConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, TestableSinkConnector.class.getSimpleName()); props.put(TASKS_MAX_CONFIG, String.valueOf(NUM_TASKS)); props.put(TOPICS_CONFIG, topics); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SourceConnectorsIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SourceConnectorsIntegrationTest.java index aa1dc6bcf94f..549394fd0396 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SourceConnectorsIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SourceConnectorsIntegrationTest.java @@ -31,7 +31,7 @@ import java.util.Properties; import java.util.stream.IntStream; -import static org.apache.kafka.connect.integration.MonitorableSourceConnector.TOPIC_CONFIG; +import static org.apache.kafka.connect.integration.TestableSourceConnector.TOPIC_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.NAME_CONFIG; @@ -208,7 +208,7 @@ public void testSwitchingToTopicCreationEnabled() throws InterruptedException { private Map defaultSourceConnectorProps(String topic) { // setup up props for the source connector Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getSimpleName()); props.put(TASKS_MAX_CONFIG, String.valueOf(NUM_TASKS)); props.put(TOPIC_CONFIG, topic); props.put("throughput", String.valueOf(10)); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/StandaloneWorkerIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/StandaloneWorkerIntegrationTest.java index 69a65ba7bfbd..76d6dab200b2 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/StandaloneWorkerIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/StandaloneWorkerIntegrationTest.java @@ -45,7 +45,7 @@ import static org.apache.kafka.connect.integration.BlockingConnectorTest.Block.BLOCK_CONFIG; import static org.apache.kafka.connect.integration.BlockingConnectorTest.CONNECTOR_START; import static org.apache.kafka.connect.integration.BlockingConnectorTest.CONNECTOR_TASK_CONFIGS; -import static org.apache.kafka.connect.integration.MonitorableSourceConnector.TOPIC_CONFIG; +import static org.apache.kafka.connect.integration.TestableSourceConnector.TOPIC_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.NAME_CONFIG; @@ -371,7 +371,7 @@ private Map defaultSourceConnectorProps(String topic) { // setup props for the source connector Map props = new HashMap<>(); props.put(NAME_CONFIG, CONNECTOR_NAME); - props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getSimpleName()); props.put(TASKS_MAX_CONFIG, String.valueOf(NUM_TASKS)); props.put(TOPIC_CONFIG, topic); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/TestableSinkConnector.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/TestableSinkConnector.java new file mode 100644 index 000000000000..fa0d5da93132 --- /dev/null +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/TestableSinkConnector.java @@ -0,0 +1,167 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.connect.integration; + +import org.apache.kafka.clients.consumer.OffsetAndMetadata; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.config.ConfigDef; +import org.apache.kafka.connect.connector.Task; +import org.apache.kafka.connect.runtime.SampleSinkConnector; +import org.apache.kafka.connect.sink.SinkRecord; +import org.apache.kafka.connect.sink.SinkTask; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * A sink connector that is used in Apache Kafka integration tests to verify the behavior of the + * Connect framework, but that can be used in other integration tests as a simple connector that + * consumes and counts records. This class provides methods to find task instances + * which are initiated by the embedded connector, and wait for them to consume a desired number of + * messages. + */ +public class TestableSinkConnector extends SampleSinkConnector { + + private static final Logger log = LoggerFactory.getLogger(TestableSinkConnector.class); + + // Boolean valued configuration that determines whether TestableSinkConnector::alterOffsets should return true or false + public static final String ALTER_OFFSETS_RESULT = "alter.offsets.result"; + + private String connectorName; + private Map commonConfigs; + private ConnectorHandle connectorHandle; + + @Override + public void start(Map props) { + connectorHandle = RuntimeHandles.get().connectorHandle(props.get("name")); + connectorName = props.get("name"); + commonConfigs = props; + log.info("Starting connector {}", props.get("name")); + connectorHandle.recordConnectorStart(); + } + + @Override + public Class taskClass() { + return TestableSinkTask.class; + } + + @Override + public List> taskConfigs(int maxTasks) { + List> configs = new ArrayList<>(); + for (int i = 0; i < maxTasks; i++) { + Map config = new HashMap<>(commonConfigs); + config.put("connector.name", connectorName); + config.put("task.id", connectorName + "-" + i); + configs.add(config); + } + return configs; + } + + @Override + public void stop() { + log.info("Stopped {} connector {}", this.getClass().getSimpleName(), connectorName); + connectorHandle.recordConnectorStop(); + } + + @Override + public ConfigDef config() { + return new ConfigDef(); + } + + @Override + public boolean alterOffsets(Map connectorConfig, Map offsets) { + return Boolean.parseBoolean(connectorConfig.get(ALTER_OFFSETS_RESULT)); + } + + public static class TestableSinkTask extends SinkTask { + + private String taskId; + TaskHandle taskHandle; + Map committedOffsets; + Map> cachedTopicPartitions; + + public TestableSinkTask() { + this.committedOffsets = new HashMap<>(); + this.cachedTopicPartitions = new HashMap<>(); + } + + @Override + public String version() { + return "unknown"; + } + + @Override + public void start(Map props) { + taskId = props.get("task.id"); + String connectorName = props.get("connector.name"); + taskHandle = RuntimeHandles.get().connectorHandle(connectorName).taskHandle(taskId); + log.debug("Starting task {}", taskId); + taskHandle.recordTaskStart(); + } + + @Override + public void open(Collection partitions) { + log.debug("Opening partitions {}", partitions); + taskHandle.partitionsAssigned(partitions); + } + + @Override + public void close(Collection partitions) { + log.debug("Closing partitions {}", partitions); + taskHandle.partitionsRevoked(partitions); + partitions.forEach(committedOffsets::remove); + } + + @Override + public void put(Collection records) { + for (SinkRecord rec : records) { + taskHandle.record(rec); + TopicPartition tp = cachedTopicPartitions + .computeIfAbsent(rec.topic(), v -> new HashMap<>()) + .computeIfAbsent(rec.kafkaPartition(), v -> new TopicPartition(rec.topic(), rec.kafkaPartition())); + committedOffsets.put(tp, committedOffsets.getOrDefault(tp, 0) + 1); + log.trace("Task {} obtained record (key='{}' value='{}')", taskId, rec.key(), rec.value()); + } + } + + @Override + public Map preCommit(Map offsets) { + taskHandle.partitionsCommitted(offsets.keySet()); + offsets.forEach((tp, offset) -> { + int recordsSinceLastCommit = committedOffsets.getOrDefault(tp, 0); + if (recordsSinceLastCommit != 0) { + taskHandle.commit(recordsSinceLastCommit); + log.debug("Forwarding to framework request to commit {} records for {}", recordsSinceLastCommit, tp); + committedOffsets.put(tp, 0); + } + }); + return offsets; + } + + @Override + public void stop() { + log.info("Stopped {} task {}", this.getClass().getSimpleName(), taskId); + taskHandle.recordTaskStop(); + } + } +} diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/TestableSourceConnector.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/TestableSourceConnector.java new file mode 100644 index 000000000000..f80f036071f5 --- /dev/null +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/TestableSourceConnector.java @@ -0,0 +1,314 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.connect.integration; + +import org.apache.kafka.clients.producer.RecordMetadata; +import org.apache.kafka.common.config.ConfigDef; +import org.apache.kafka.connect.connector.Task; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.errors.ConnectException; +import org.apache.kafka.connect.header.ConnectHeaders; +import org.apache.kafka.connect.runtime.SampleSourceConnector; +import org.apache.kafka.connect.source.ConnectorTransactionBoundaries; +import org.apache.kafka.connect.source.ExactlyOnceSupport; +import org.apache.kafka.connect.source.SourceRecord; +import org.apache.kafka.connect.source.SourceTask; +import org.apache.kafka.server.util.ThroughputThrottler; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; +import java.util.stream.LongStream; + +/** + * A source connector that is used in Apache Kafka integration tests to verify the behavior of + * the Connect framework, but that can be used in other integration tests as a simple connector + * that generates records of a fixed structure. The rate of record production can be adjusted + * through the configs 'throughput' and 'messages.per.poll' + */ +public class TestableSourceConnector extends SampleSourceConnector { + private static final Logger log = LoggerFactory.getLogger(TestableSourceConnector.class); + + public static final String TOPIC_CONFIG = "topic"; + public static final String NUM_TASKS = "num.tasks"; + public static final String MESSAGES_PER_POLL_CONFIG = "messages.per.poll"; + public static final String MAX_MESSAGES_PER_SECOND_CONFIG = "throughput"; + public static final String MAX_MESSAGES_PRODUCED_CONFIG = "max.messages"; + + public static final String CUSTOM_EXACTLY_ONCE_SUPPORT_CONFIG = "custom.exactly.once.support"; + public static final String EXACTLY_ONCE_SUPPORTED = "supported"; + public static final String EXACTLY_ONCE_UNSUPPORTED = "unsupported"; + public static final String EXACTLY_ONCE_NULL = "null"; + public static final String EXACTLY_ONCE_FAIL = "fail"; + + public static final String CUSTOM_TRANSACTION_BOUNDARIES_CONFIG = "custom.transaction.boundaries"; + public static final String TRANSACTION_BOUNDARIES_SUPPORTED = "supported"; + public static final String TRANSACTION_BOUNDARIES_UNSUPPORTED = "unsupported"; + public static final String TRANSACTION_BOUNDARIES_NULL = "null"; + public static final String TRANSACTION_BOUNDARIES_FAIL = "fail"; + + // Boolean valued configuration that determines whether TestableSourceConnector::alterOffsets should return true or false + public static final String ALTER_OFFSETS_RESULT = "alter.offsets.result"; + + private String connectorName; + private ConnectorHandle connectorHandle; + private Map commonConfigs; + + @Override + public void start(Map props) { + connectorHandle = RuntimeHandles.get().connectorHandle(props.get("name")); + connectorName = connectorHandle.name(); + commonConfigs = props; + log.info("Started {} connector {}", this.getClass().getSimpleName(), connectorName); + connectorHandle.recordConnectorStart(); + if (Boolean.parseBoolean(props.getOrDefault("connector.start.inject.error", "false"))) { + throw new RuntimeException("Injecting errors during connector start"); + } + } + + @Override + public Class taskClass() { + return TestableSourceTask.class; + } + + @Override + public List> taskConfigs(int maxTasks) { + String numTasksProp = commonConfigs.get(NUM_TASKS); + int numTasks = numTasksProp != null ? Integer.parseInt(numTasksProp) : maxTasks; + List> configs = new ArrayList<>(); + for (int i = 0; i < numTasks; i++) { + Map config = taskConfig(commonConfigs, connectorName, i); + configs.add(config); + } + return configs; + } + + public static Map taskConfig( + Map connectorProps, + String connectorName, + int taskNum + ) { + Map result = new HashMap<>(connectorProps); + result.put("connector.name", connectorName); + result.put("task.id", taskId(connectorName, taskNum)); + return result; + } + + @Override + public void stop() { + log.info("Stopped {} connector {}", this.getClass().getSimpleName(), connectorName); + connectorHandle.recordConnectorStop(); + if (Boolean.parseBoolean(commonConfigs.getOrDefault("connector.stop.inject.error", "false"))) { + throw new RuntimeException("Injecting errors during connector stop"); + } + } + + @Override + public ConfigDef config() { + log.info("Configured {} connector {}", this.getClass().getSimpleName(), connectorName); + return new ConfigDef(); + } + + @Override + public ExactlyOnceSupport exactlyOnceSupport(Map connectorConfig) { + String supportLevel = connectorConfig.getOrDefault(CUSTOM_EXACTLY_ONCE_SUPPORT_CONFIG, "null").toLowerCase(Locale.ROOT); + switch (supportLevel) { + case EXACTLY_ONCE_SUPPORTED: + return ExactlyOnceSupport.SUPPORTED; + case EXACTLY_ONCE_UNSUPPORTED: + return ExactlyOnceSupport.UNSUPPORTED; + case EXACTLY_ONCE_FAIL: + throw new ConnectException("oops"); + default: + case EXACTLY_ONCE_NULL: + return null; + } + } + + @Override + public ConnectorTransactionBoundaries canDefineTransactionBoundaries(Map connectorConfig) { + String supportLevel = connectorConfig.getOrDefault(CUSTOM_TRANSACTION_BOUNDARIES_CONFIG, TRANSACTION_BOUNDARIES_UNSUPPORTED).toLowerCase(Locale.ROOT); + switch (supportLevel) { + case TRANSACTION_BOUNDARIES_SUPPORTED: + return ConnectorTransactionBoundaries.SUPPORTED; + case TRANSACTION_BOUNDARIES_FAIL: + throw new ConnectException("oh no :("); + case TRANSACTION_BOUNDARIES_NULL: + return null; + default: + case TRANSACTION_BOUNDARIES_UNSUPPORTED: + return ConnectorTransactionBoundaries.UNSUPPORTED; + } + } + + @Override + public boolean alterOffsets(Map connectorConfig, Map, Map> offsets) { + return Boolean.parseBoolean(connectorConfig.get(ALTER_OFFSETS_RESULT)); + } + + public static String taskId(String connectorName, int taskId) { + return connectorName + "-" + taskId; + } + + public static class TestableSourceTask extends SourceTask { + private String taskId; + private String topicName; + private TaskHandle taskHandle; + private volatile boolean stopped; + private long startingSeqno; + private long seqno; + private int batchSize; + private ThroughputThrottler throttler; + private long maxMessages; + + private long priorTransactionBoundary; + private long nextTransactionBoundary; + + @Override + public String version() { + return "unknown"; + } + + @Override + public void start(Map props) { + taskId = props.get("task.id"); + String connectorName = props.get("connector.name"); + topicName = props.getOrDefault(TOPIC_CONFIG, "sequential-topic"); + batchSize = Integer.parseInt(props.getOrDefault(MESSAGES_PER_POLL_CONFIG, "1")); + taskHandle = RuntimeHandles.get().connectorHandle(connectorName).taskHandle(taskId); + Map offset = Optional.ofNullable( + context.offsetStorageReader().offset(sourcePartition(taskId))) + .orElse(Collections.emptyMap()); + startingSeqno = Optional.ofNullable((Long) offset.get("saved")).orElse(0L); + seqno = startingSeqno; + log.info("Started {} task {} with properties {}", this.getClass().getSimpleName(), taskId, props); + throttler = new ThroughputThrottler(Long.parseLong(props.getOrDefault(MAX_MESSAGES_PER_SECOND_CONFIG, "-1")), System.currentTimeMillis()); + maxMessages = Long.parseLong(props.getOrDefault(MAX_MESSAGES_PRODUCED_CONFIG, String.valueOf(Long.MAX_VALUE))); + taskHandle.recordTaskStart(); + priorTransactionBoundary = 0; + nextTransactionBoundary = 1; + if (Boolean.parseBoolean(props.getOrDefault("task-" + taskId + ".start.inject.error", "false"))) { + throw new RuntimeException("Injecting errors during task start"); + } + calculateNextBoundary(); + } + + @Override + public List poll() { + if (!stopped) { + // Don't return any more records since we've already produced the configured maximum number. + if (seqno >= maxMessages) { + return null; + } + if (throttler.shouldThrottle(seqno - startingSeqno, System.currentTimeMillis())) { + throttler.throttle(); + } + int currentBatchSize = (int) Math.min(maxMessages - seqno, batchSize); + taskHandle.record(currentBatchSize); + log.trace("Returning batch of {} records", currentBatchSize); + return LongStream.range(0, currentBatchSize) + .mapToObj(i -> { + seqno++; + SourceRecord record = new SourceRecord( + sourcePartition(taskId), + sourceOffset(seqno), + topicName, + null, + Schema.STRING_SCHEMA, + "key-" + taskId + "-" + seqno, + Schema.STRING_SCHEMA, + "value-" + taskId + "-" + seqno, + null, + new ConnectHeaders().addLong("header-" + seqno, seqno)); + maybeDefineTransactionBoundary(record); + return record; + }) + .collect(Collectors.toList()); + } + return null; + } + + @Override + public void commit() { + log.info("Task {} committing offsets", taskId); + //TODO: save progress outside the offset topic, potentially in the task handle + } + + @Override + public void commitRecord(SourceRecord record, RecordMetadata metadata) { + log.trace("Committing record: {}", record); + taskHandle.commit(); + } + + @Override + public void stop() { + log.info("Stopped {} task {}", this.getClass().getSimpleName(), taskId); + stopped = true; + taskHandle.recordTaskStop(); + } + + /** + * Calculate the next transaction boundary, i.e., the seqno whose corresponding source record should be used to + * either {@link org.apache.kafka.connect.source.TransactionContext#commitTransaction(SourceRecord) commit} + * or {@link org.apache.kafka.connect.source.TransactionContext#abortTransaction(SourceRecord) abort} the next transaction. + *

+ * This connector defines transactions whose size correspond to successive elements of the Fibonacci sequence, + * where transactions with an even number of records are aborted, and those with an odd number of records are committed. + */ + private void calculateNextBoundary() { + while (nextTransactionBoundary <= seqno) { + nextTransactionBoundary += priorTransactionBoundary; + priorTransactionBoundary = nextTransactionBoundary - priorTransactionBoundary; + } + } + + private void maybeDefineTransactionBoundary(SourceRecord record) { + if (context.transactionContext() == null || seqno != nextTransactionBoundary) { + return; + } + long transactionSize = nextTransactionBoundary - priorTransactionBoundary; + + // If the transaction boundary ends on an even-numbered offset, abort it + // Otherwise, commit + boolean abort = nextTransactionBoundary % 2 == 0; + calculateNextBoundary(); + if (abort) { + log.info("Aborting transaction of {} records", transactionSize); + context.transactionContext().abortTransaction(record); + } else { + log.info("Committing transaction of {} records", transactionSize); + context.transactionContext().commitTransaction(record); + } + } + } + + public static Map sourcePartition(String taskId) { + return Collections.singletonMap("task.id", taskId); + } + + public static Map sourceOffset(long seqno) { + return Collections.singletonMap("saved", seqno); + } +} diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/TransformationIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/TransformationIntegrationTest.java index 105d238d56f2..33d7315cb4c5 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/TransformationIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/TransformationIntegrationTest.java @@ -67,8 +67,8 @@ public class TransformationIntegrationTest { private static final int NUM_TASKS = 1; private static final int NUM_WORKERS = 3; private static final String CONNECTOR_NAME = "simple-conn"; - private static final String SINK_CONNECTOR_CLASS_NAME = MonitorableSinkConnector.class.getSimpleName(); - private static final String SOURCE_CONNECTOR_CLASS_NAME = MonitorableSourceConnector.class.getSimpleName(); + private static final String SINK_CONNECTOR_CLASS_NAME = TestableSinkConnector.class.getSimpleName(); + private static final String SOURCE_CONNECTOR_CLASS_NAME = TestableSourceConnector.class.getSimpleName(); private EmbeddedConnectCluster connect; private ConnectorHandle connectorHandle; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractHerderTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractHerderTest.java index 420cf5e745ce..71f75147f9ab 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractHerderTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractHerderTest.java @@ -18,11 +18,16 @@ import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.common.MetricName; import org.apache.kafka.common.config.ConfigDef; import org.apache.kafka.common.config.ConfigTransformer; import org.apache.kafka.common.config.ConfigValue; import org.apache.kafka.common.config.SaslConfigs; import org.apache.kafka.common.config.provider.DirectoryConfigProvider; +import org.apache.kafka.common.metrics.KafkaMetric; +import org.apache.kafka.common.metrics.Measurable; +import org.apache.kafka.common.metrics.Monitorable; +import org.apache.kafka.common.metrics.PluginMetrics; import org.apache.kafka.common.security.oauthbearer.internals.unsecured.OAuthBearerUnsecuredLoginCallbackHandler; import org.apache.kafka.common.utils.Time; import org.apache.kafka.connect.connector.Connector; @@ -770,6 +775,54 @@ public void testConfigValidationAllOverride() { verifyValidationIsolation(); } + static final class TestClientConfigOverridePolicy extends AllConnectorClientConfigOverridePolicy implements Monitorable { + + static MetricName metricName = null; + private int count = 0; + + @Override + protected boolean isAllowed(ConfigValue configValue) { + count++; + return super.isAllowed(configValue); + } + + @Override + public void withPluginMetrics(PluginMetrics metrics) { + metricName = metrics.metricName("", "description", Collections.emptyMap()); + metrics.addMetric(metricName, (Measurable) (config, now) -> count); + } + } + + @Test + public void testClientConfigOverridePolicyWithMetrics() { + final Class connectorClass = SampleSourceConnector.class; + AbstractHerder herder = createConfigValidationHerder(connectorClass, new TestClientConfigOverridePolicy()); + + Map config = new HashMap<>(); + config.put(ConnectorConfig.CONNECTOR_CLASS_CONFIG, connectorClass.getName()); + config.put(ConnectorConfig.NAME_CONFIG, "connector-name"); + config.put("required", "value"); // connector required config + + Map overrides = new HashMap<>(); + String maxRequestSizeConfigKey = producerOverrideKey(ProducerConfig.MAX_REQUEST_SIZE_CONFIG); + overrides.put(maxRequestSizeConfigKey, "420"); + String maxBlockConfigKey = producerOverrideKey(ProducerConfig.MAX_BLOCK_MS_CONFIG); + overrides.put(maxBlockConfigKey, "28980"); + String idempotenceConfigKey = producerOverrideKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG); + overrides.put(idempotenceConfigKey, "true"); + config.putAll(overrides); + + herder.validateConnectorConfig(config, s -> null, false); + + Map metrics = herder.worker.metrics().metrics().metrics(); + assertTrue(metrics.containsKey(TestClientConfigOverridePolicy.metricName)); + assertEquals((double) overrides.size(), metrics.get(TestClientConfigOverridePolicy.metricName).metricValue()); + + herder.stopServices(); + metrics = herder.worker.metrics().metrics().metrics(); + assertFalse(metrics.containsKey(TestClientConfigOverridePolicy.metricName)); + } + @Test public void testReverseTransformConfigs() { // Construct a task config with constant values for TEST_KEY and TEST_KEY2 @@ -1270,6 +1323,8 @@ private AbstractHerder testHerder() { } private AbstractHerder testHerder(ConnectorClientConfigOverridePolicy connectorClientConfigOverridePolicy) { + ConnectMetrics connectMetrics = new MockConnectMetrics(); + when(worker.metrics()).thenReturn(connectMetrics); return mock(AbstractHerder.class, withSettings() .useConstructor(worker, workerId, kafkaClusterId, statusStore, configStore, connectorClientConfigOverridePolicy, Time.SYSTEM) .defaultAnswer(CALLS_REAL_METHODS)); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractWorkerSourceTaskTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractWorkerSourceTaskTest.java index 0cb4db706472..4a7c7ae7c90d 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractWorkerSourceTaskTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractWorkerSourceTaskTest.java @@ -29,13 +29,14 @@ import org.apache.kafka.common.errors.TopicAuthorizationException; import org.apache.kafka.common.header.Headers; import org.apache.kafka.common.header.internals.RecordHeaders; +import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.utils.Time; import org.apache.kafka.connect.data.Schema; import org.apache.kafka.connect.data.SchemaAndValue; import org.apache.kafka.connect.errors.ConnectException; import org.apache.kafka.connect.errors.RetriableException; import org.apache.kafka.connect.header.ConnectHeaders; -import org.apache.kafka.connect.integration.MonitorableSourceConnector; +import org.apache.kafka.connect.integration.TestableSourceConnector; import org.apache.kafka.connect.runtime.errors.ErrorHandlingMetrics; import org.apache.kafka.connect.runtime.errors.ErrorReporter; import org.apache.kafka.connect.runtime.errors.ProcessingContext; @@ -46,6 +47,7 @@ import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceTask; import org.apache.kafka.connect.storage.CloseableOffsetStorageReader; +import org.apache.kafka.connect.storage.ClusterConfigState; import org.apache.kafka.connect.storage.ConnectorOffsetBackingStore; import org.apache.kafka.connect.storage.Converter; import org.apache.kafka.connect.storage.HeaderConverter; @@ -81,7 +83,7 @@ import java.util.function.Supplier; import java.util.stream.Collectors; -import static org.apache.kafka.connect.integration.MonitorableSourceConnector.TOPIC_CONFIG; +import static org.apache.kafka.connect.integration.TestableSourceConnector.TOPIC_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.TASKS_MAX_CONFIG; @@ -130,8 +132,7 @@ public class AbstractWorkerSourceTaskTest { private static final byte[] SERIALIZED_KEY = "converted-key".getBytes(); private static final byte[] SERIALIZED_RECORD = "converted-record".getBytes(); - @Mock - private SourceTask sourceTask; + @Mock private SourceTask sourceTask; @Mock private TopicAdmin admin; @Mock private KafkaProducer producer; @Mock private Converter keyConverter; @@ -142,8 +143,9 @@ public class AbstractWorkerSourceTaskTest { @Mock private OffsetStorageWriter offsetWriter; @Mock private ConnectorOffsetBackingStore offsetStore; @Mock private StatusBackingStore statusBackingStore; - @Mock private WorkerSourceTaskContext sourceTaskContext; + @Mock private WorkerTransactionContext workerTransactionContext; @Mock private TaskStatus.Listener statusListener; + @Mock private ClusterConfigState configState; private final ConnectorTaskId taskId = new ConnectorTaskId("job", 0); private final ConnectorTaskId taskId1 = new ConnectorTaskId("job", 1); @@ -178,7 +180,7 @@ private Map sourceConnectorPropsWithGroups() { // setup up props for the source connector Map props = new HashMap<>(); props.put("name", "foo-connector"); - props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getSimpleName()); props.put(TASKS_MAX_CONFIG, String.valueOf(1)); props.put(TOPIC_CONFIG, TOPIC); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); @@ -823,9 +825,12 @@ private void createWorkerTask() { private void createWorkerTask(Converter keyConverter, Converter valueConverter, HeaderConverter headerConverter, RetryWithToleranceOperator retryWithToleranceOperator, Supplier>> errorReportersSupplier) { + Plugin keyConverterPlugin = metrics.wrap(keyConverter, taskId, true); + Plugin valueConverterPlugin = metrics.wrap(valueConverter, taskId, false); + Plugin headerConverterPlugin = metrics.wrap(headerConverter, taskId); workerTask = new AbstractWorkerSourceTask( - taskId, sourceTask, statusListener, TargetState.STARTED, keyConverter, valueConverter, headerConverter, transformationChain, - sourceTaskContext, producer, admin, TopicCreationGroup.configuredGroups(sourceConfig), offsetReader, offsetWriter, offsetStore, + taskId, sourceTask, statusListener, TargetState.STARTED, configState, keyConverterPlugin, valueConverterPlugin, headerConverterPlugin, transformationChain, + workerTransactionContext, producer, admin, TopicCreationGroup.configuredGroups(sourceConfig), offsetReader, offsetWriter, offsetStore, config, metrics, errorHandlingMetrics, plugins.delegatingLoader(), Time.SYSTEM, retryWithToleranceOperator, statusBackingStore, Runnable::run, errorReportersSupplier) { @Override diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ConnectMetricsTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ConnectMetricsTest.java index 58924d79ecf6..6c703339ceff 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ConnectMetricsTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ConnectMetricsTest.java @@ -18,19 +18,37 @@ import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.common.MetricName; +import org.apache.kafka.common.config.ConfigDef; +import org.apache.kafka.common.metrics.KafkaMetric; +import org.apache.kafka.common.metrics.Measurable; +import org.apache.kafka.common.metrics.Monitorable; +import org.apache.kafka.common.metrics.PluginMetrics; import org.apache.kafka.common.metrics.Sensor; +import org.apache.kafka.common.metrics.internals.PluginMetricsImpl; import org.apache.kafka.common.metrics.stats.Avg; import org.apache.kafka.common.metrics.stats.Max; import org.apache.kafka.common.utils.MockTime; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.data.SchemaAndValue; import org.apache.kafka.connect.runtime.ConnectMetrics.MetricGroup; import org.apache.kafka.connect.runtime.ConnectMetrics.MetricGroupId; +import org.apache.kafka.connect.source.SourceRecord; +import org.apache.kafka.connect.storage.Converter; +import org.apache.kafka.connect.storage.HeaderConverter; +import org.apache.kafka.connect.transforms.Transformation; +import org.apache.kafka.connect.transforms.predicates.Predicate; +import org.apache.kafka.connect.util.ConnectorTaskId; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; +import java.io.IOException; import java.util.Collections; import java.util.HashMap; +import java.util.LinkedHashMap; import java.util.Map; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -44,6 +62,8 @@ public class ConnectMetricsTest { private static final Map DEFAULT_WORKER_CONFIG = new HashMap<>(); + private static final ConnectorTaskId CONNECTOR_TASK_ID = new ConnectorTaskId("connector", 0); + private static final Map TAGS = Collections.singletonMap("t1", "v1"); static { DEFAULT_WORKER_CONFIG.put(WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.json.JsonConverter"); @@ -171,6 +191,218 @@ public void testExplicitlyEnableJmxReporter() { cm.stop(); } + @Test + public void testConnectorPluginMetrics() throws Exception { + try (PluginMetricsImpl pluginMetrics = metrics.connectorPluginMetrics(CONNECTOR_TASK_ID.connector())) { + MetricName metricName = pluginMetrics.metricName("name", "description", TAGS); + Map expectedTags = new LinkedHashMap<>(); + expectedTags.put("connector", CONNECTOR_TASK_ID.connector()); + expectedTags.putAll(TAGS); + assertEquals(expectedTags, metricName.tags()); + } + } + + @Test + public void testTaskPluginMetrics() throws Exception { + try (PluginMetricsImpl pluginMetrics = metrics.taskPluginMetrics(CONNECTOR_TASK_ID)) { + MetricName metricName = pluginMetrics.metricName("name", "description", TAGS); + Map expectedTags = new LinkedHashMap<>(); + expectedTags.put("connector", CONNECTOR_TASK_ID.connector()); + expectedTags.put("task", String.valueOf(CONNECTOR_TASK_ID.task())); + expectedTags.putAll(TAGS); + assertEquals(expectedTags, metricName.tags()); + } + } + + static final class MonitorableConverter implements Converter, HeaderConverter, Monitorable { + + PluginMetrics pluginMetrics = null; + MetricName metricName = null; + int calls = 0; + + @Override + public void withPluginMetrics(PluginMetrics pluginMetrics) { + this.pluginMetrics = pluginMetrics; + metricName = pluginMetrics.metricName("name", "description", TAGS); + pluginMetrics.addMetric(metricName, (Measurable) (config, now) -> calls); + } + + @Override + public void configure(Map configs, boolean isKey) { } + + @Override + public byte[] fromConnectData(String topic, Schema schema, Object value) { + calls++; + return new byte[0]; + } + + @Override + public SchemaAndValue toConnectData(String topic, byte[] value) { + calls++; + return null; + } + + @Override + public ConfigDef config() { + return Converter.super.config(); + } + + @Override + public void configure(Map configs) { } + + @Override + public SchemaAndValue toConnectHeader(String topic, String headerKey, byte[] value) { + calls++; + return null; + } + + @Override + public byte[] fromConnectHeader(String topic, String headerKey, Schema schema, Object value) { + calls++; + return new byte[0]; + } + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testWrapConverter(boolean isKey) throws IOException { + try (MonitorableConverter converter = new MonitorableConverter()) { + metrics.wrap(converter, CONNECTOR_TASK_ID, isKey); + assertNotNull(converter.pluginMetrics); + MetricName metricName = converter.metricName; + Map expectedTags = new LinkedHashMap<>(); + expectedTags.put("connector", CONNECTOR_TASK_ID.connector()); + expectedTags.put("task", String.valueOf(CONNECTOR_TASK_ID.task())); + expectedTags.put("converter", isKey ? "key" : "value"); + expectedTags.putAll(TAGS); + assertEquals(expectedTags, metricName.tags()); + KafkaMetric metric = metrics.metrics().metrics().get(metricName); + assertEquals(0.0, (double) metric.metricValue()); + converter.toConnectData("topic", new byte[]{}); + converter.fromConnectData("topic", null, null); + assertEquals(2.0, (double) metric.metricValue()); + } + } + + @Test + public void testWrapHeaderConverter() throws IOException { + try (MonitorableConverter converter = new MonitorableConverter()) { + metrics.wrap(converter, CONNECTOR_TASK_ID); + assertNotNull(converter.pluginMetrics); + MetricName metricName = converter.metricName; + Map expectedTags = new LinkedHashMap<>(); + expectedTags.put("connector", CONNECTOR_TASK_ID.connector()); + expectedTags.put("task", String.valueOf(CONNECTOR_TASK_ID.task())); + expectedTags.put("converter", "header"); + expectedTags.putAll(TAGS); + assertEquals(expectedTags, metricName.tags()); + KafkaMetric metric = metrics.metrics().metrics().get(metricName); + assertEquals(0.0, (double) metric.metricValue()); + converter.toConnectHeader("topic", "header", new byte[]{}); + converter.fromConnectHeader("topic", "header", null, null); + assertEquals(2.0, (double) metric.metricValue()); + } + } + + static final class MonitorableTransformation implements Transformation, Monitorable { + + PluginMetrics pluginMetrics = null; + MetricName metricName = null; + int calls = 0; + + @Override + public void withPluginMetrics(PluginMetrics pluginMetrics) { + this.pluginMetrics = pluginMetrics; + metricName = pluginMetrics.metricName("name", "description", TAGS); + pluginMetrics.addMetric(metricName, (Measurable) (config, now) -> calls); + } + + @Override + public void configure(Map configs) { } + + @Override + public SourceRecord apply(SourceRecord record) { + calls++; + return null; + } + + @Override + public ConfigDef config() { + return null; + } + + @Override + public void close() { } + } + + @Test + public void testWrapTransformation() { + try (MonitorableTransformation transformation = new MonitorableTransformation()) { + metrics.wrap(transformation, CONNECTOR_TASK_ID, "alias"); + assertNotNull(transformation.pluginMetrics); + MetricName metricName = transformation.metricName; + Map expectedTags = new LinkedHashMap<>(); + expectedTags.put("connector", CONNECTOR_TASK_ID.connector()); + expectedTags.put("task", String.valueOf(CONNECTOR_TASK_ID.task())); + expectedTags.put("transformation", "alias"); + expectedTags.putAll(TAGS); + assertEquals(expectedTags, metricName.tags()); + KafkaMetric metric = metrics.metrics().metrics().get(metricName); + assertEquals(0.0, (double) metric.metricValue()); + transformation.apply(null); + assertEquals(1.0, (double) metric.metricValue()); + } + } + + static final class MonitorablePredicate implements Predicate, Monitorable { + PluginMetrics pluginMetrics = null; + MetricName metricName = null; + int calls = 0; + + @Override + public void withPluginMetrics(PluginMetrics pluginMetrics) { + this.pluginMetrics = pluginMetrics; + metricName = pluginMetrics.metricName("name", "description", TAGS); + pluginMetrics.addMetric(metricName, (Measurable) (config, now) -> calls); + } + + @Override + public void configure(Map configs) { } + + @Override + public ConfigDef config() { + return null; + } + + @Override + public boolean test(SourceRecord record) { + calls++; + return false; + } + + @Override + public void close() { } + } + + @Test + public void testWrapPredicate() { + try (MonitorablePredicate predicate = new MonitorablePredicate()) { + metrics.wrap(predicate, CONNECTOR_TASK_ID, "alias"); + assertNotNull(predicate.pluginMetrics); + MetricName metricName = predicate.metricName; + Map expectedTags = new LinkedHashMap<>(); + expectedTags.put("connector", CONNECTOR_TASK_ID.connector()); + expectedTags.put("task", String.valueOf(CONNECTOR_TASK_ID.task())); + expectedTags.put("predicate", "alias"); + expectedTags.putAll(TAGS); + assertEquals(expectedTags, metricName.tags()); + KafkaMetric metric = metrics.metrics().metrics().get(metricName); + assertEquals(0.0, (double) metric.metricValue()); + predicate.test(null); + assertEquals(1.0, (double) metric.metricValue()); + } + } + private Sensor addToGroup(ConnectMetrics connectMetrics, boolean shouldClose) { ConnectMetricsRegistry registry = connectMetrics.registry(); ConnectMetrics.MetricGroup metricGroup = connectMetrics.group(registry.taskGroupName(), diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ConnectorConfigTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ConnectorConfigTest.java index 6092f8ca7bdc..ee1a24b7aa5e 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ConnectorConfigTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ConnectorConfigTest.java @@ -26,6 +26,7 @@ import org.apache.kafka.connect.sink.SinkRecord; import org.apache.kafka.connect.transforms.Transformation; import org.apache.kafka.connect.transforms.predicates.Predicate; +import org.apache.kafka.connect.util.ConnectorTaskId; import org.junit.jupiter.api.Test; @@ -44,6 +45,9 @@ public class ConnectorConfigTest> { + private static final ConnectMetrics METRICS = new MockConnectMetrics(); + private static final ConnectorTaskId CONNECTOR_TASK_ID = new ConnectorTaskId("test", 0); + public static final Plugins MOCK_PLUGINS = new Plugins(new HashMap<>()) { @Override public Set>> transformations() { @@ -157,7 +161,7 @@ public void singleTransform() { props.put("transforms.a.type", SimpleTransformation.class.getName()); props.put("transforms.a.magic.number", "42"); final ConnectorConfig config = new ConnectorConfig(MOCK_PLUGINS, props); - final List> transformationStages = config.transformationStages(); + final List> transformationStages = config.transformationStages(CONNECTOR_TASK_ID, METRICS); assertEquals(1, transformationStages.size()); final TransformationStage stage = transformationStages.get(0); assertEquals(SimpleTransformation.class, stage.transformClass()); @@ -186,7 +190,7 @@ public void multipleTransforms() { props.put("transforms.b.type", SimpleTransformation.class.getName()); props.put("transforms.b.magic.number", "84"); final ConnectorConfig config = new ConnectorConfig(MOCK_PLUGINS, props); - final List> transformationStages = config.transformationStages(); + final List> transformationStages = config.transformationStages(CONNECTOR_TASK_ID, METRICS); assertEquals(2, transformationStages.size()); assertEquals(42, transformationStages.get(0).apply(DUMMY_RECORD).kafkaPartition().intValue()); assertEquals(84, transformationStages.get(1).apply(DUMMY_RECORD).kafkaPartition().intValue()); @@ -287,7 +291,7 @@ public void abstractPredicate() { private void assertTransformationStageWithPredicate(Map props, boolean expectedNegated) { final ConnectorConfig config = new ConnectorConfig(MOCK_PLUGINS, props); - final List> transformationStages = config.transformationStages(); + final List> transformationStages = config.transformationStages(CONNECTOR_TASK_ID, METRICS); assertEquals(1, transformationStages.size()); TransformationStage stage = transformationStages.get(0); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ErrorHandlingTaskTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ErrorHandlingTaskTest.java index 0974f35d16c7..d98fefcdff5b 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ErrorHandlingTaskTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ErrorHandlingTaskTest.java @@ -25,6 +25,7 @@ import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.config.ConfigDef; +import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; import org.apache.kafka.connect.components.Versioned; @@ -33,7 +34,7 @@ import org.apache.kafka.connect.data.SchemaBuilder; import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.errors.RetriableException; -import org.apache.kafka.connect.integration.MonitorableSourceConnector; +import org.apache.kafka.connect.integration.TestableSourceConnector; import org.apache.kafka.connect.json.JsonConverter; import org.apache.kafka.connect.runtime.errors.ErrorHandlingMetrics; import org.apache.kafka.connect.runtime.errors.ErrorReporter; @@ -85,7 +86,7 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.singletonList; import static org.apache.kafka.common.utils.Time.SYSTEM; -import static org.apache.kafka.connect.integration.MonitorableSourceConnector.TOPIC_CONFIG; +import static org.apache.kafka.connect.integration.TestableSourceConnector.TOPIC_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.TASKS_MAX_CONFIG; @@ -197,7 +198,7 @@ private Map sourceConnectorProps(String topic) { // setup up props for the source connector Map props = new HashMap<>(); props.put("name", "foo-connector"); - props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getSimpleName()); props.put(TASKS_MAX_CONFIG, String.valueOf(1)); props.put(TOPIC_CONFIG, topic); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); @@ -435,13 +436,17 @@ private void createSinkTask(TargetState initialState, RetryWithToleranceOperator oo.put("schemas.enable", "false"); converter.configure(oo); + Plugin> transformationPlugin = metrics.wrap(new FaultyPassthrough(), taskId, ""); TransformationChain, SinkRecord> sinkTransforms = - new TransformationChain<>(singletonList(new TransformationStage<>(new FaultyPassthrough())), retryWithToleranceOperator); + new TransformationChain<>(singletonList(new TransformationStage<>(transformationPlugin)), retryWithToleranceOperator); + Plugin keyConverterPlugin = metrics.wrap(converter, taskId, true); + Plugin valueConverterPlugin = metrics.wrap(converter, taskId, false); + Plugin headerConverterPlugin = metrics.wrap(headerConverter, taskId); workerSinkTask = new WorkerSinkTask( taskId, sinkTask, statusListener, initialState, workerConfig, - ClusterConfigState.EMPTY, metrics, converter, converter, errorHandlingMetrics, - headerConverter, sinkTransforms, consumer, pluginLoader, time, + ClusterConfigState.EMPTY, metrics, keyConverterPlugin, valueConverterPlugin, errorHandlingMetrics, + headerConverterPlugin, sinkTransforms, consumer, pluginLoader, time, retryWithToleranceOperator, workerErrantRecordReporter, statusBackingStore, () -> errorReporters); } @@ -467,12 +472,16 @@ private Converter badConverter() { private void createSourceTask(TargetState initialState, RetryWithToleranceOperator retryWithToleranceOperator, List> errorReporters, Converter converter) { + Plugin> transformationPlugin = metrics.wrap(new FaultyPassthrough(), taskId, ""); TransformationChain sourceTransforms = new TransformationChain<>(singletonList( - new TransformationStage<>(new FaultyPassthrough())), retryWithToleranceOperator); + new TransformationStage<>(transformationPlugin)), retryWithToleranceOperator); + Plugin keyConverterPlugin = metrics.wrap(converter, taskId, true); + Plugin valueConverterPlugin = metrics.wrap(converter, taskId, false); + Plugin headerConverterPlugin = metrics.wrap(headerConverter, taskId); workerSourceTask = spy(new WorkerSourceTask( - taskId, sourceTask, statusListener, initialState, converter, - converter, errorHandlingMetrics, headerConverter, + taskId, sourceTask, statusListener, initialState, keyConverterPlugin, + valueConverterPlugin, errorHandlingMetrics, headerConverterPlugin, sourceTransforms, producer, admin, TopicCreationGroup.configuredGroups(sourceConfig), offsetReader, offsetWriter, offsetStore, workerConfig, diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ExactlyOnceWorkerSourceTaskTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ExactlyOnceWorkerSourceTaskTest.java index be3dc2401ad6..acb5017a6749 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ExactlyOnceWorkerSourceTaskTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ExactlyOnceWorkerSourceTaskTest.java @@ -26,11 +26,12 @@ import org.apache.kafka.common.header.Header; import org.apache.kafka.common.header.Headers; import org.apache.kafka.common.header.internals.RecordHeaders; +import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; import org.apache.kafka.connect.data.Schema; import org.apache.kafka.connect.errors.ConnectException; -import org.apache.kafka.connect.integration.MonitorableSourceConnector; +import org.apache.kafka.connect.integration.TestableSourceConnector; import org.apache.kafka.connect.runtime.ConnectMetrics.MetricGroup; import org.apache.kafka.connect.runtime.errors.ErrorHandlingMetrics; import org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperatorTest; @@ -83,7 +84,7 @@ import java.util.stream.Collectors; import static java.util.Collections.emptySet; -import static org.apache.kafka.connect.integration.MonitorableSourceConnector.TOPIC_CONFIG; +import static org.apache.kafka.connect.integration.TestableSourceConnector.TOPIC_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.TASKS_MAX_CONFIG; @@ -248,7 +249,7 @@ private Map sourceConnectorProps(SourceTask.TransactionBoundary // setup up props for the source connector Map props = new HashMap<>(); props.put("name", "foo-connector"); - props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getSimpleName()); props.put(TASKS_MAX_CONFIG, String.valueOf(1)); props.put(TOPIC_CONFIG, TOPIC); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); @@ -276,7 +277,10 @@ private void createWorkerTask(TargetState initialState) { } private void createWorkerTask(TargetState initialState, Converter keyConverter, Converter valueConverter, HeaderConverter headerConverter) { - workerTask = new ExactlyOnceWorkerSourceTask(taskId, sourceTask, statusListener, initialState, keyConverter, valueConverter, headerConverter, + Plugin keyConverterPlugin = metrics.wrap(keyConverter, taskId, true); + Plugin valueConverterPlugin = metrics.wrap(valueConverter, taskId, false); + Plugin headerConverterPlugin = metrics.wrap(headerConverter, taskId); + workerTask = new ExactlyOnceWorkerSourceTask(taskId, sourceTask, statusListener, initialState, keyConverterPlugin, valueConverterPlugin, headerConverterPlugin, transformationChain, producer, admin, TopicCreationGroup.configuredGroups(sourceConfig), offsetReader, offsetWriter, offsetStore, config, clusterConfigState, metrics, errorHandlingMetrics, plugins.delegatingLoader(), time, RetryWithToleranceOperatorTest.noopOperator(), statusBackingStore, sourceConfig, Runnable::run, preProducerCheck, postProducerCheck, Collections::emptyList); @@ -686,7 +690,7 @@ private void testConnectorBasedCommit(Consumer requestCommit expectSuccessfulSends(); expectSuccessfulFlushes(); - TransactionContext transactionContext = workerTask.sourceTaskContext.transactionContext(); + TransactionContext transactionContext = workerTask.context.transactionContext(); startTaskThread(); @@ -735,7 +739,7 @@ public void testConnectorAbortsEmptyTransaction(boolean enableTopicCreation) thr expectTaskGetTopic(); expectApplyTransformationChain(); expectConvertHeadersAndKeyValue(); - TransactionContext transactionContext = workerTask.sourceTaskContext.transactionContext(); + TransactionContext transactionContext = workerTask.context.transactionContext(); startTaskThread(); @@ -792,7 +796,7 @@ public void testMixedConnectorTransactionBoundaryCommitLastRecordAbortBatch(bool workerTask.initialize(TASK_CONFIG); - TransactionContext transactionContext = workerTask.sourceTaskContext.transactionContext(); + TransactionContext transactionContext = workerTask.context.transactionContext(); // Request that the batch be aborted transactionContext.abortTransaction(); @@ -832,7 +836,7 @@ public void testMixedConnectorTransactionBoundaryAbortLastRecordCommitBatch(bool workerTask.initialize(TASK_CONFIG); - TransactionContext transactionContext = workerTask.sourceTaskContext.transactionContext(); + TransactionContext transactionContext = workerTask.context.transactionContext(); // Request that the last record in the batch be aborted transactionContext.abortTransaction(RECORDS.get(RECORDS.size() - 1)); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/TransformationStageTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/TransformationStageTest.java index c3208515ac3a..0d6b360f1ee7 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/TransformationStageTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/TransformationStageTest.java @@ -16,6 +16,7 @@ */ package org.apache.kafka.connect.runtime; +import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.transforms.Transformation; import org.apache.kafka.connect.transforms.predicates.Predicate; @@ -38,33 +39,39 @@ public class TransformationStageTest { private final SourceRecord transformed = new SourceRecord(singletonMap("transformed", 2), null, null, null, null); @Test - public void apply() { + public void apply() throws Exception { applyAndAssert(true, false, transformed); applyAndAssert(true, true, initial); applyAndAssert(false, false, initial); applyAndAssert(false, true, transformed); } + @SuppressWarnings("unchecked") private void applyAndAssert(boolean predicateResult, boolean negate, - SourceRecord expectedResult) { + SourceRecord expectedResult) throws Exception { - @SuppressWarnings("unchecked") + + Plugin> predicatePlugin = mock(Plugin.class); Predicate predicate = mock(Predicate.class); when(predicate.test(any())).thenReturn(predicateResult); - @SuppressWarnings("unchecked") + when(predicatePlugin.get()).thenReturn(predicate); + Plugin> transformationPlugin = mock(Plugin.class); Transformation transformation = mock(Transformation.class); + if ((predicateResult && !negate) || (!predicateResult && negate)) { + when(transformationPlugin.get()).thenReturn(transformation); + } if (expectedResult == transformed) { when(transformation.apply(any())).thenReturn(transformed); } TransformationStage stage = new TransformationStage<>( - predicate, + predicatePlugin, negate, - transformation); + transformationPlugin); assertEquals(expectedResult, stage.apply(initial)); stage.close(); - verify(predicate).close(); - verify(transformation).close(); + verify(predicatePlugin).close(); + verify(transformationPlugin).close(); } } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskTest.java index b3d197acc066..fc05acc2e3a3 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskTest.java @@ -16,6 +16,7 @@ */ package org.apache.kafka.connect.runtime; +import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; @@ -30,6 +31,7 @@ import org.apache.kafka.common.header.Header; import org.apache.kafka.common.header.Headers; import org.apache.kafka.common.header.internals.RecordHeaders; +import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.record.RecordBatch; import org.apache.kafka.common.record.TimestampType; import org.apache.kafka.common.utils.MockTime; @@ -199,10 +201,27 @@ private void createTask(TargetState initialState, Converter keyConverter, Conver private void createTask(TargetState initialState, Converter keyConverter, Converter valueConverter, HeaderConverter headerConverter, RetryWithToleranceOperator> retryWithToleranceOperator, Supplier>>> errorReportersSupplier) { - workerTask = new WorkerSinkTask( - taskId, sinkTask, statusListener, initialState, workerConfig, ClusterConfigState.EMPTY, metrics, + createTask(taskId, sinkTask, statusListener, initialState, workerConfig, metrics, keyConverter, valueConverter, errorHandlingMetrics, headerConverter, transformationChain, consumer, pluginLoader, time, + retryWithToleranceOperator, statusBackingStore, errorReportersSupplier); + } + + private void createTask(ConnectorTaskId taskId, SinkTask task, TaskStatus.Listener statusListener, TargetState initialState, + WorkerConfig workerConfig, ConnectMetrics connectMetrics, Converter keyConverter, Converter valueConverter, + ErrorHandlingMetrics errorMetrics, HeaderConverter headerConverter, + TransformationChain, SinkRecord> transformationChain, + Consumer consumer, ClassLoader loader, Time time, + RetryWithToleranceOperator> retryWithToleranceOperator, + StatusBackingStore statusBackingStore, + Supplier>>> errorReportersSupplier) { + Plugin keyConverterPlugin = metrics.wrap(keyConverter, taskId, true); + Plugin valueConverterPlugin = metrics.wrap(valueConverter, taskId, false); + Plugin headerConverterPlugin = metrics.wrap(headerConverter, taskId); + workerTask = new WorkerSinkTask( + taskId, task, statusListener, initialState, workerConfig, ClusterConfigState.EMPTY, connectMetrics, + keyConverterPlugin, valueConverterPlugin, errorMetrics, headerConverterPlugin, + transformationChain, consumer, loader, time, retryWithToleranceOperator, null, statusBackingStore, errorReportersSupplier); } @@ -1754,13 +1773,12 @@ public void testOriginalTopicWithTopicMutatingTransformations() { public void testPartitionCountInCaseOfPartitionRevocation() { MockConsumer mockConsumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST); // Setting up Worker Sink Task to check metrics - workerTask = new WorkerSinkTask( - taskId, sinkTask, statusListener, TargetState.PAUSED, workerConfig, ClusterConfigState.EMPTY, metrics, + createTask(taskId, sinkTask, statusListener, TargetState.PAUSED, workerConfig, metrics, keyConverter, valueConverter, errorHandlingMetrics, headerConverter, transformationChain, mockConsumer, pluginLoader, time, - RetryWithToleranceOperatorTest.noopOperator(), null, statusBackingStore, Collections::emptyList); + RetryWithToleranceOperatorTest.noopOperator(), statusBackingStore, Collections::emptyList); mockConsumer.updateBeginningOffsets( - new HashMap() {{ + new HashMap<>() {{ put(TOPIC_PARTITION, 0L); put(TOPIC_PARTITION2, 0L); }} diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskThreadedTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskThreadedTest.java index a41ce37c356d..224b6898777d 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskThreadedTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskThreadedTest.java @@ -24,6 +24,7 @@ import org.apache.kafka.clients.consumer.OffsetCommitCallback; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.header.internals.RecordHeaders; +import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.record.TimestampType; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; @@ -174,9 +175,12 @@ public void setup() { workerProps.put("value.converter", "org.apache.kafka.connect.json.JsonConverter"); workerProps.put("offset.storage.file.filename", "/tmp/connect.offsets"); WorkerConfig workerConfig = new StandaloneConfig(workerProps); + Plugin keyConverterPlugin = metrics.wrap(keyConverter, taskId, true); + Plugin valueConverterPlugin = metrics.wrap(valueConverter, taskId, false); + Plugin headerConverterPlugin = metrics.wrap(headerConverter, taskId); workerTask = new WorkerSinkTask( - taskId, sinkTask, statusListener, initialState, workerConfig, ClusterConfigState.EMPTY, metrics, keyConverter, - valueConverter, errorHandlingMetrics, headerConverter, transformationChain, + taskId, sinkTask, statusListener, initialState, workerConfig, ClusterConfigState.EMPTY, metrics, keyConverterPlugin, + valueConverterPlugin, errorHandlingMetrics, headerConverterPlugin, transformationChain, consumer, pluginLoader, time, RetryWithToleranceOperatorTest.noopOperator(), null, statusBackingStore, Collections::emptyList); recordsReturned = 0; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSourceTaskTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSourceTaskTest.java index 77d56a207d76..0fbf9df6136f 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSourceTaskTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSourceTaskTest.java @@ -27,11 +27,12 @@ import org.apache.kafka.common.errors.TopicAuthorizationException; import org.apache.kafka.common.header.Headers; import org.apache.kafka.common.header.internals.RecordHeaders; +import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.utils.LogCaptureAppender; import org.apache.kafka.common.utils.Time; import org.apache.kafka.connect.data.Schema; import org.apache.kafka.connect.errors.ConnectException; -import org.apache.kafka.connect.integration.MonitorableSourceConnector; +import org.apache.kafka.connect.integration.TestableSourceConnector; import org.apache.kafka.connect.runtime.ConnectMetrics.MetricGroup; import org.apache.kafka.connect.runtime.errors.ErrorHandlingMetrics; import org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator; @@ -86,7 +87,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Supplier; -import static org.apache.kafka.connect.integration.MonitorableSourceConnector.TOPIC_CONFIG; +import static org.apache.kafka.connect.integration.TestableSourceConnector.TOPIC_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.TASKS_MAX_CONFIG; @@ -210,7 +211,7 @@ private Map sourceConnectorPropsWithGroups(String topic) { // setup up props for the source connector Map props = new HashMap<>(); props.put("name", "foo-connector"); - props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getSimpleName()); props.put(TASKS_MAX_CONFIG, String.valueOf(1)); props.put(TOPIC_CONFIG, topic); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); @@ -248,7 +249,10 @@ private void createWorkerTask(TargetState initialState, RetryWithToleranceOperat private void createWorkerTask(TargetState initialState, Converter keyConverter, Converter valueConverter, HeaderConverter headerConverter, RetryWithToleranceOperator retryWithToleranceOperator) { - workerTask = new WorkerSourceTask(taskId, sourceTask, statusListener, initialState, keyConverter, valueConverter, errorHandlingMetrics, headerConverter, + Plugin keyConverterPlugin = metrics.wrap(keyConverter, taskId, true); + Plugin valueConverterPlugin = metrics.wrap(valueConverter, taskId, false); + Plugin headerConverterPlugin = metrics.wrap(headerConverter, taskId); + workerTask = new WorkerSourceTask(taskId, sourceTask, statusListener, initialState, keyConverterPlugin, valueConverterPlugin, errorHandlingMetrics, headerConverterPlugin, transformationChain, producer, admin, TopicCreationGroup.configuredGroups(sourceConfig), offsetReader, offsetWriter, offsetStore, config, clusterConfigState, metrics, plugins.delegatingLoader(), Time.SYSTEM, retryWithToleranceOperator, statusBackingStore, Runnable::run, Collections::emptyList); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/DistributedHerderTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/DistributedHerderTest.java index bbc074e97308..7b025003e60c 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/DistributedHerderTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/DistributedHerderTest.java @@ -314,6 +314,8 @@ public void setUp() throws Exception { metrics = new MockConnectMetrics(time); AutoCloseable uponShutdown = shutdownCalled::countDown; + when(worker.metrics()).thenReturn(new MockConnectMetrics()); + // Default to the old protocol unless specified otherwise connectProtocolVersion = CONNECT_PROTOCOL_V0; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/ConnectRestServerTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/ConnectRestServerTest.java index 8b69b1c37b23..d718180c8131 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/ConnectRestServerTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/ConnectRestServerTest.java @@ -16,11 +16,21 @@ */ package org.apache.kafka.connect.runtime.rest; +import org.apache.kafka.common.MetricName; import org.apache.kafka.common.config.AbstractConfig; +import org.apache.kafka.common.metrics.Gauge; +import org.apache.kafka.common.metrics.KafkaMetric; +import org.apache.kafka.common.metrics.Monitorable; +import org.apache.kafka.common.metrics.PluginMetrics; import org.apache.kafka.common.utils.LogCaptureAppender; +import org.apache.kafka.connect.errors.ConnectException; import org.apache.kafka.connect.rest.ConnectRestExtension; +import org.apache.kafka.connect.rest.ConnectRestExtensionContext; import org.apache.kafka.connect.runtime.Herder; +import org.apache.kafka.connect.runtime.MockConnectMetrics; +import org.apache.kafka.connect.runtime.Worker; import org.apache.kafka.connect.runtime.isolation.Plugins; +import org.apache.kafka.connect.runtime.isolation.PluginsTest; import org.apache.kafka.connect.runtime.rest.entities.LoggerLevel; import com.fasterxml.jackson.core.type.TypeReference; @@ -62,12 +72,15 @@ import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.reset; @ExtendWith(MockitoExtension.class) @MockitoSettings(strictness = Strictness.STRICT_STUBS) @@ -76,6 +89,7 @@ public class ConnectRestServerTest { @Mock private RestClient restClient; @Mock private Herder herder; @Mock private Plugins plugins; + @Mock private Worker worker; private ConnectRestServer server; private CloseableHttpClient httpClient; private final Collection responses = new ArrayList<>(); @@ -85,6 +99,8 @@ public class ConnectRestServerTest { @BeforeEach public void setUp() { httpClient = HttpClients.createMinimal(); + doReturn(new MockConnectMetrics()).when(worker).metrics(); + doReturn(worker).when(herder).worker(); } @AfterEach @@ -118,6 +134,10 @@ public void testCORSDisabled() throws IOException { @Test public void testAdvertisedUri() { + // CLear stubs not needed by this test + reset(herder); + reset(worker); + // Advertised URI from listeners without protocol Map configMap = new HashMap<>(baseServerProps()); configMap.put(RestServerConfig.LISTENERS_CONFIG, "http://localhost:8080,https://localhost:8443"); @@ -376,6 +396,44 @@ public void testDefaultCustomizedHttpResponseHeaders() throws IOException { checkCustomizedHttpResponseHeaders(headerConfig, expectedHeaders); } + static final class MonitorableConnectRestExtension extends PluginsTest.TestConnectRestExtension implements Monitorable { + + private boolean called = false; + private static MetricName metricName; + + @Override + public void register(ConnectRestExtensionContext restPluginContext) { + called = true; + } + + @Override + public void withPluginMetrics(PluginMetrics metrics) { + metricName = metrics.metricName("name", "description", Collections.emptyMap()); + metrics.addMetric(metricName, (Gauge) (config, now) -> called); + } + } + + @Test + public void testMonitorableConnectRestExtension() { + Map configMap = new HashMap<>(baseServerProps()); + configMap.put(RestServerConfig.REST_EXTENSION_CLASSES_CONFIG, MonitorableConnectRestExtension.class.getName()); + + doReturn(plugins).when(herder).plugins(); + doReturn(Collections.singletonList(new MonitorableConnectRestExtension())).when(plugins).newPlugins(any(), any(), any()); + server = new ConnectRestServer(null, restClient, configMap); + + // the call throws because of mocks but the ConnectRestExtension should have been initialized + assertThrows(ConnectException.class, () -> server.initializeResources(herder)); + + Map metrics = herder.worker().metrics().metrics().metrics(); + assertTrue(metrics.containsKey(MonitorableConnectRestExtension.metricName)); + assertTrue((boolean) metrics.get(MonitorableConnectRestExtension.metricName).metricValue()); + + server.stop(); + metrics = herder.worker().metrics().metrics().metrics(); + assertFalse(metrics.containsKey(MonitorableConnectRestExtension.metricName)); + } + private void checkCustomizedHttpResponseHeaders(String headerConfig, Map expectedHeaders) throws IOException { Map configMap = baseServerProps(); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/standalone/StandaloneHerderTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/standalone/StandaloneHerderTest.java index 857bbb77b638..2dab103ddfe6 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/standalone/StandaloneHerderTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/standalone/StandaloneHerderTest.java @@ -30,6 +30,7 @@ import org.apache.kafka.connect.runtime.ConnectorStatus; import org.apache.kafka.connect.runtime.Herder; import org.apache.kafka.connect.runtime.HerderConnectorContext; +import org.apache.kafka.connect.runtime.MockConnectMetrics; import org.apache.kafka.connect.runtime.RestartPlan; import org.apache.kafka.connect.runtime.RestartRequest; import org.apache.kafka.connect.runtime.SinkConnectorConfig; @@ -144,6 +145,7 @@ private enum SourceSink { noneConnectorClientConfigOverridePolicy = new SampleConnectorClientConfigOverridePolicy(); public void initialize(boolean mockTransform) { + when(worker.metrics()).thenReturn(new MockConnectMetrics()); herder = mock(StandaloneHerder.class, withSettings() .useConstructor(worker, WORKER_ID, KAFKA_CLUSTER_ID, statusBackingStore, new MemoryConfigBackingStore(transformer), noneConnectorClientConfigOverridePolicy, new MockTime()) .defaultAnswer(CALLS_REAL_METHODS)); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicCreationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicCreationTest.java index 5d764d160310..29a9426e3373 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicCreationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicCreationTest.java @@ -19,6 +19,8 @@ import org.apache.kafka.clients.admin.NewTopic; import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.runtime.ConnectMetrics; +import org.apache.kafka.connect.runtime.MockConnectMetrics; import org.apache.kafka.connect.runtime.SourceConnectorConfig; import org.apache.kafka.connect.runtime.TransformationStage; import org.apache.kafka.connect.runtime.WorkerConfig; @@ -80,6 +82,8 @@ public class TopicCreationTest { private static final short DEFAULT_REPLICATION_FACTOR = -1; private static final int DEFAULT_PARTITIONS = -1; + private static final ConnectMetrics METRICS = new MockConnectMetrics(); + private static final ConnectorTaskId CONNECTOR_TASK_ID = new ConnectorTaskId("test", 0); Map workerProps; WorkerConfig workerConfig; @@ -515,7 +519,7 @@ public void testTopicCreationWithSingleTransformation() { topicCreation.addTopic(FOO_TOPIC); assertFalse(topicCreation.isTopicCreationRequired(FOO_TOPIC)); - List> transformationStages = sourceConfig.transformationStages(); + List> transformationStages = sourceConfig.transformationStages(CONNECTOR_TASK_ID, METRICS); assertEquals(1, transformationStages.size()); TransformationStage xform = transformationStages.get(0); SourceRecord transformed = xform.apply(new SourceRecord(null, null, "topic", 0, null, null, Schema.INT8_SCHEMA, 42)); @@ -622,7 +626,7 @@ public void topicCreationWithTwoGroupsAndTwoTransformations() { assertEquals(barPartitions, barTopicSpec.numPartitions()); assertEquals(barTopicProps, barTopicSpec.configs()); - List> transformationStages = sourceConfig.transformationStages(); + List> transformationStages = sourceConfig.transformationStages(CONNECTOR_TASK_ID, METRICS); assertEquals(2, transformationStages.size()); TransformationStage castXForm = transformationStages.get(0); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnectStandalone.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnectStandalone.java index 66ce78d0d1ba..ebe11639ff4b 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnectStandalone.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnectStandalone.java @@ -62,6 +62,7 @@ public class EmbeddedConnectStandalone extends EmbeddedConnect { private final String offsetsFile; private volatile WorkerHandle connectWorker; + Connect connect; private EmbeddedConnectStandalone( int numBrokers, @@ -92,7 +93,7 @@ public void startConnect() { workerProps.putIfAbsent(PLUGIN_DISCOVERY_CONFIG, "hybrid_fail"); ConnectStandalone cli = new ConnectStandalone(); - Connect connect = cli.startConnect(workerProps); + connect = cli.startConnect(workerProps); connectWorker = new WorkerHandle("standalone", connect); cli.processExtraArgs(connect, connectorConfigFiles()); } @@ -137,6 +138,10 @@ private String[] connectorConfigFiles() { return result; } + public StandaloneHerder herder() { + return connect.herder(); + } + public static class Builder extends EmbeddedConnectBuilder { private final List> connectorConfigs = new ArrayList<>(); diff --git a/connect/runtime/src/test/resources/META-INF/services/org.apache.kafka.connect.sink.SinkConnector b/connect/runtime/src/test/resources/META-INF/services/org.apache.kafka.connect.sink.SinkConnector index 56e054ddbeb9..818d09e61871 100644 --- a/connect/runtime/src/test/resources/META-INF/services/org.apache.kafka.connect.sink.SinkConnector +++ b/connect/runtime/src/test/resources/META-INF/services/org.apache.kafka.connect.sink.SinkConnector @@ -17,5 +17,6 @@ org.apache.kafka.connect.integration.BlockingConnectorTest$BlockingSinkConnector org.apache.kafka.connect.integration.BlockingConnectorTest$TaskInitializeBlockingSinkConnector org.apache.kafka.connect.integration.ErrantRecordSinkConnector org.apache.kafka.connect.integration.MonitorableSinkConnector +org.apache.kafka.connect.integration.TestableSinkConnector org.apache.kafka.connect.runtime.SampleSinkConnector org.apache.kafka.connect.integration.ConnectWorkerIntegrationTest$EmptyTaskConfigsConnector \ No newline at end of file diff --git a/connect/runtime/src/test/resources/META-INF/services/org.apache.kafka.connect.source.SourceConnector b/connect/runtime/src/test/resources/META-INF/services/org.apache.kafka.connect.source.SourceConnector index 73033ca23c02..8ff259f8878e 100644 --- a/connect/runtime/src/test/resources/META-INF/services/org.apache.kafka.connect.source.SourceConnector +++ b/connect/runtime/src/test/resources/META-INF/services/org.apache.kafka.connect.source.SourceConnector @@ -21,5 +21,6 @@ org.apache.kafka.connect.integration.BlockingConnectorTest$BlockingSourceConnect org.apache.kafka.connect.integration.BlockingConnectorTest$TaskInitializeBlockingSourceConnector org.apache.kafka.connect.integration.ExactlyOnceSourceIntegrationTest$NaughtyConnector org.apache.kafka.connect.integration.MonitorableSourceConnector +org.apache.kafka.connect.integration.TestableSourceConnector org.apache.kafka.connect.runtime.SampleSourceConnector org.apache.kafka.connect.runtime.rest.resources.ConnectorPluginsResourceTest$ConnectorPluginsResourceTestConnector