From e94c2f69a5ff345b29a324cc62a1edd4c7720ae3 Mon Sep 17 00:00:00 2001 From: ewelinagr Date: Wed, 9 Apr 2025 16:22:40 +0200 Subject: [PATCH 01/11] Change connector base image to work wit Strimzi operator. --- docker/kafka_connect_run.sh | 88 ++++++++++++++++++++++++++ docker/launch | 0 kafka-connect-fitbit-source/Dockerfile | 21 ++++-- 3 files changed, 102 insertions(+), 7 deletions(-) create mode 100644 docker/kafka_connect_run.sh mode change 100755 => 100644 docker/launch diff --git a/docker/kafka_connect_run.sh b/docker/kafka_connect_run.sh new file mode 100644 index 00000000..20c8695f --- /dev/null +++ b/docker/kafka_connect_run.sh @@ -0,0 +1,88 @@ +#!/usr/bin/env bash +set -e +set +x + +if [ -z "$KAFKA_CONNECT_PLUGIN_PATH" ]; then + export KAFKA_CONNECT_PLUGIN_PATH="${KAFKA_HOME}/plugins" +fi + +# Get client rack if it's enabled from the file $KAFKA_HOME/init/rack.id (if it exists). This file is generated by the +# init-container used when rack awareness is enabled. +if [ -e "$KAFKA_HOME/init/rack.id" ]; then + STRIMZI_RACK_ID=$(cat "$KAFKA_HOME/init/rack.id") + export STRIMZI_RACK_ID +fi + +# Prepare hostname - for StrimziPodSets we use the Pod DNS name assigned through the headless service +ADVERTISED_HOSTNAME=$(hostname -f | cut -d "." -f1-4) +export ADVERTISED_HOSTNAME + +# Generate temporary keystore password +CERTS_STORE_PASSWORD=$(< /dev/urandom tr -dc _A-Z-a-z-0-9 | head -c32) +export CERTS_STORE_PASSWORD + +# Create dir where keystores and truststores will be stored +mkdir -p /tmp/kafka + +# Import certificates into keystore and truststore +./kafka_connect_tls_prepare_certificates.sh + +# Generate and print the config file +echo "Starting Kafka Connect with configuration:" +./kafka_connect_config_generator.sh | tee /tmp/strimzi-connect.properties | sed -e 's/sasl.jaas.config=.*/sasl.jaas.config=[hidden]/g' -e 's/password=.*/password=[hidden]/g' +echo "" + +# Disable Kafka's GC logging (which logs to a file)... +export GC_LOG_ENABLED="false" + +if [ -z "$KAFKA_LOG4J_OPTS" ]; then + export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$KAFKA_HOME/custom-config/log4j.properties" +fi + +# We don't need LOG_DIR because we write no log files, but setting it to a +# directory avoids trying to create it (and logging a permission denied error) +export LOG_DIR="$KAFKA_HOME" + +# enabling Prometheus JMX exporter as Java agent +if [ "$KAFKA_CONNECT_METRICS_ENABLED" = "true" ]; then + KAFKA_OPTS="${KAFKA_OPTS} -javaagent:$(ls "$KAFKA_HOME"/libs/jmx_prometheus_javaagent*.jar)=9404:$KAFKA_HOME/custom-config/metrics-config.json" + export KAFKA_OPTS +fi + +. ./set_kafka_jmx_options.sh "${STRIMZI_JMX_ENABLED}" "${STRIMZI_JMX_USERNAME}" "${STRIMZI_JMX_PASSWORD}" + +# enabling Tracing agent (initializes tracing) as Java agent +if [ "$STRIMZI_TRACING" = "jaeger" ] || [ "$STRIMZI_TRACING" = "opentelemetry" ]; then + KAFKA_OPTS="$KAFKA_OPTS -javaagent:$(ls "$KAFKA_HOME"/libs/tracing-agent*.jar)=$STRIMZI_TRACING" + export KAFKA_OPTS + if [ "$STRIMZI_TRACING" = "opentelemetry" ] && [ -z "$OTEL_TRACES_EXPORTER" ]; then + # auto-set OTLP exporter + export OTEL_TRACES_EXPORTER="otlp" + fi +fi + +if [ -n "$STRIMZI_JAVA_SYSTEM_PROPERTIES" ]; then + export KAFKA_OPTS="${KAFKA_OPTS} ${STRIMZI_JAVA_SYSTEM_PROPERTIES}" +fi + +# Disable FIPS if needed +if [ "$FIPS_MODE" = "disabled" ]; then + export KAFKA_OPTS="${KAFKA_OPTS} -Dcom.redhat.fips=false" +fi + +# Configure heap based on the available resources if needed +. ./dynamic_resources.sh + +# Configure Garbage Collection logging +. ./set_kafka_gc_options.sh + +set -x + +### BEGIN CUSTOM RADAR KAFKA CONNECT SCRIPT ### +# Call the ensure script to verify infrastructure, Kafka cluster, schema registry, and other components +echo "===> Running preflight checks ... " +"${KAFKA_HOME}/ensure" +### END CUSTOM RADAR KAFKA CONNECT SCRIPT ### + +# starting Kafka server with final configuration +exec /usr/bin/tini -w -e 143 -- "${KAFKA_HOME}/bin/connect-distributed.sh" /tmp/strimzi-connect.properties diff --git a/docker/launch b/docker/launch old mode 100755 new mode 100644 diff --git a/kafka-connect-fitbit-source/Dockerfile b/kafka-connect-fitbit-source/Dockerfile index 33873f6f..54c139ef 100644 --- a/kafka-connect-fitbit-source/Dockerfile +++ b/kafka-connect-fitbit-source/Dockerfile @@ -32,7 +32,7 @@ COPY ./kafka-connect-fitbit-source/src/ /code/kafka-connect-fitbit-source/src RUN gradle jar -FROM confluentinc/cp-kafka-connect-base:7.8.1 +FROM quay.io/strimzi/kafka:0.45.0-kafka-3.9.0 USER appuser @@ -51,14 +51,21 @@ COPY --from=builder /code/kafka-connect-rest-source/build/libs/*.jar ${CONNECT_P COPY --from=builder /code/kafka-connect-rest-source/build/libs/*.jar ${CONNECT_PLUGIN_PATH}/kafka-connect-fitbit-source/ COPY --from=builder /code/kafka-connect-fitbit-source/build/libs/*.jar ${CONNECT_PLUGIN_PATH}/kafka-connect-fitbit-source/ -# Load topics validator -COPY --chown=appuser:appuser ./docker/ensure /etc/confluent/docker/ensure +## Load topics validator +COPY --chown=1001:1001 ./docker/ensure /opt/kafka/ensure +RUN chmod +x /opt/kafka/ensure -# Load modified launcher -COPY --chown=appuser:appuser ./docker/launch /etc/confluent/docker/launch +## Load modified launcher +#COPY --chown=appuser:appuser ./docker/launch /etc/confluent/docker/launch +## Load modified connect runner to trigger the validator +COPY --chown=1001:1001 ./docker/kafka_connect_run.sh /opt/kafka/kafka_connect_run.sh +RUN chmod +x /opt/kafka/kafka_connect_run.sh # Overwrite the log4j configuration to include Sentry monitoring. -COPY ./docker/log4j.properties.template /etc/confluent/docker/log4j.properties.template +#COPY ./docker/log4j.properties.template /etc/confluent/docker/log4j.properties.template +COPY ./docker/log4j.properties.template /opt/kafka/custom-config/log4j.properties # Copy Sentry monitoring jars. -COPY --from=builder /code/kafka-connect-fitbit-source/build/third-party/sentry-* /etc/kafka-connect/jars +#COPY --from=builder /code/kafka-connect-fitbit-source/build/third-party/sentry-* /etc/kafka-connect/jars +COPY --from=builder /code/kafka-connect-fitbit-source/build/third-party/sentry-* /opt/kafka/libs/ +USER 1001 From b2f6c3db1ba6d804fa1dbcdee832616db1097e81 Mon Sep 17 00:00:00 2001 From: Pim van Nierop Date: Thu, 26 Jun 2025 14:16:47 +0200 Subject: [PATCH 02/11] fix: build of fitbit docker image --- kafka-connect-fitbit-source/Dockerfile | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/kafka-connect-fitbit-source/Dockerfile b/kafka-connect-fitbit-source/Dockerfile index 54c139ef..5aa67045 100644 --- a/kafka-connect-fitbit-source/Dockerfile +++ b/kafka-connect-fitbit-source/Dockerfile @@ -51,16 +51,6 @@ COPY --from=builder /code/kafka-connect-rest-source/build/libs/*.jar ${CONNECT_P COPY --from=builder /code/kafka-connect-rest-source/build/libs/*.jar ${CONNECT_PLUGIN_PATH}/kafka-connect-fitbit-source/ COPY --from=builder /code/kafka-connect-fitbit-source/build/libs/*.jar ${CONNECT_PLUGIN_PATH}/kafka-connect-fitbit-source/ -## Load topics validator -COPY --chown=1001:1001 ./docker/ensure /opt/kafka/ensure -RUN chmod +x /opt/kafka/ensure - -## Load modified launcher -#COPY --chown=appuser:appuser ./docker/launch /etc/confluent/docker/launch -## Load modified connect runner to trigger the validator -COPY --chown=1001:1001 ./docker/kafka_connect_run.sh /opt/kafka/kafka_connect_run.sh -RUN chmod +x /opt/kafka/kafka_connect_run.sh - # Overwrite the log4j configuration to include Sentry monitoring. #COPY ./docker/log4j.properties.template /etc/confluent/docker/log4j.properties.template COPY ./docker/log4j.properties.template /opt/kafka/custom-config/log4j.properties @@ -69,3 +59,7 @@ COPY ./docker/log4j.properties.template /opt/kafka/custom-config/log4j.propertie COPY --from=builder /code/kafka-connect-fitbit-source/build/third-party/sentry-* /opt/kafka/libs/ USER 1001 + +COPY --chown=1001:1001 ./docker/ensure /opt/kafka/ensure +COPY --chown=1001:1001 ./docker/kafka_connect_run.sh /opt/kafka/kafka_connect_run.sh +RUN chmod +x /opt/kafka/ensure /opt/kafka/kafka_connect_run.sh From 04415d64be5b4ba4140b03e9daea38e621410e75 Mon Sep 17 00:00:00 2001 From: ewelinagr Date: Thu, 26 Jun 2025 15:33:40 +0200 Subject: [PATCH 03/11] Cleanup docker ensure and launch scripts. --- docker/ensure | 62 +------------------------- docker/launch | 51 --------------------- kafka-connect-fitbit-source/Dockerfile | 5 +-- 3 files changed, 2 insertions(+), 116 deletions(-) delete mode 100644 docker/launch diff --git a/docker/ensure b/docker/ensure index 13e94dad..ed7fdbf7 100755 --- a/docker/ensure +++ b/docker/ensure @@ -1,62 +1,6 @@ #!/bin/bash -if [ "$WAIT_FOR_KAFKA" != "1" ]; then - echo "Starting without checking for Kafka availability" - exit 0 -fi - -max_timeout=32 - -IS_TEMP=0 - -echo "===> Wait for infrastructure ..." - -if [ -z "$COMMAND_CONFIG_FILE_PATH" ]; then - COMMAND_CONFIG_FILE_PATH="$(mktemp)" - IS_TEMP=1 -fi - -if [ ! -f "$COMMAND_CONFIG_FILE_PATH" ] || [ $IS_TEMP = 1 ]; then - while IFS='=' read -r -d '' n v; do - if [[ "$n" == "CONNECT_"* ]]; then - name="${n/CONNECT_/""}" # remove first "CONNECT_" - name="${name,,}" # lower case - name="${name//_/"."}" # replace all '_' with '.' - echo "$name=$v" >> ${COMMAND_CONFIG_FILE_PATH} - fi - done < <(env -0) -fi - -# Check if variables exist -if [ -z "$CONNECT_BOOTSTRAP_SERVERS" ]; then - echo "CONNECT_BOOTSTRAP_SERVERS is not defined" -else - KAFKA_BROKERS=${KAFKA_BROKERS:-3} - - tries=10 - timeout=1 - while true; do - KAFKA_CHECK=$(kafka-broker-api-versions --bootstrap-server "$CONNECT_BOOTSTRAP_SERVERS" --command-config "${COMMAND_CONFIG_FILE_PATH}" | grep "(id: " | wc -l) - - if [ "$KAFKA_CHECK" -ge "$KAFKA_BROKERS" ]; then - echo "Kafka brokers available." - break - fi - - tries=$((tries - 1)) - if [ ${tries} -eq 0 ]; then - echo "FAILED: KAFKA BROKERs NOT READY." - exit 5 - fi - echo "Expected $KAFKA_BROKERS brokers but found only $KAFKA_CHECK. Waiting $timeout second before retrying ..." - sleep ${timeout} - if [ ${timeout} -lt ${max_timeout} ]; then - timeout=$((timeout * 2)) - fi - done - - echo "Kafka is available." -fi +echo "===> Checking if Schema Registry is available ..." if [ -z "$CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL" ]; then echo "CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL is not defined" @@ -82,7 +26,3 @@ else echo "Schema registry is available." fi - -if [ $IS_TEMP = 1 ]; then - /bin/rm -f "$COMMAND_CONFIG_FILE_PATH" -fi diff --git a/docker/launch b/docker/launch deleted file mode 100644 index 7ae6b678..00000000 --- a/docker/launch +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright 2016 Confluent Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# Override this section from the script to include the com.sun.management.jmxremote.rmi.port property. -if [ -z "$KAFKA_JMX_OPTS" ]; then - export KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false " -fi - -# The JMX client needs to be able to connect to java.rmi.server.hostname. -# The default for bridged n/w is the bridged IP so you will only be able to connect from another docker container. -# For host n/w, this is the IP that the hostname on the host resolves to. - -# If you have more that one n/w configured, hostname -i gives you all the IPs, -# the default is to pick the first IP (or network). -export KAFKA_JMX_HOSTNAME=${KAFKA_JMX_HOSTNAME:-$(hostname -i | cut -d" " -f1)} - -if [ "$KAFKA_JMX_PORT" ]; then - # This ensures that the "if" section for JMX_PORT in kafka launch script does not trigger. - export JMX_PORT=$KAFKA_JMX_PORT - export KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Djava.rmi.server.hostname=$KAFKA_JMX_HOSTNAME -Dcom.sun.management.jmxremote.local.only=false -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT -Dcom.sun.management.jmxremote.port=$JMX_PORT" -fi - -echo "===> Launching ${COMPONENT} ..." -# Add our jar to the classpath so that the custom classes can be loaded first. -# And this also makes sure that the CLASSPATH does not start with ":/etc/..." -# other jars are loaded via the plugin path -if [ -z "$CLASSPATH" ]; then - export CLASSPATH="/etc/kafka-connect/jars/*" -fi - -if [ -z "$CONNECTOR_PROPERTY_FILE_PREFIX" ]; then - # execute connector in distributed mode - exec connect-distributed /etc/"${COMPONENT}"/"${COMPONENT}".properties -else - # execute connector in standalone mode - exec connect-standalone /etc/"${COMPONENT}"/"${COMPONENT}".properties /etc/"${COMPONENT}"/"${CONNECTOR_PROPERTY_FILE_PREFIX}"*.properties -fi diff --git a/kafka-connect-fitbit-source/Dockerfile b/kafka-connect-fitbit-source/Dockerfile index 5aa67045..5bba367e 100644 --- a/kafka-connect-fitbit-source/Dockerfile +++ b/kafka-connect-fitbit-source/Dockerfile @@ -40,8 +40,7 @@ LABEL org.opencontainers.image.authors="pim@thehyve.nl" LABEL description="Kafka REST API Source connector" -ENV CONNECT_PLUGIN_PATH="/usr/share/java/kafka-connect/plugins" \ - WAIT_FOR_KAFKA="1" +ENV CONNECT_PLUGIN_PATH="/usr/share/java/kafka-connect/plugins" # To isolate the classpath from the plugin path as recommended COPY --from=builder /code/kafka-connect-rest-source/build/third-party/*.jar ${CONNECT_PLUGIN_PATH}/kafka-connect-rest-source/ @@ -52,10 +51,8 @@ COPY --from=builder /code/kafka-connect-rest-source/build/libs/*.jar ${CONNECT_P COPY --from=builder /code/kafka-connect-fitbit-source/build/libs/*.jar ${CONNECT_PLUGIN_PATH}/kafka-connect-fitbit-source/ # Overwrite the log4j configuration to include Sentry monitoring. -#COPY ./docker/log4j.properties.template /etc/confluent/docker/log4j.properties.template COPY ./docker/log4j.properties.template /opt/kafka/custom-config/log4j.properties # Copy Sentry monitoring jars. -#COPY --from=builder /code/kafka-connect-fitbit-source/build/third-party/sentry-* /etc/kafka-connect/jars COPY --from=builder /code/kafka-connect-fitbit-source/build/third-party/sentry-* /opt/kafka/libs/ USER 1001 From f0cb080a2db4e1d4064ec53c948636c98e37ddbf Mon Sep 17 00:00:00 2001 From: ewelinagr Date: Thu, 26 Jun 2025 15:34:00 +0200 Subject: [PATCH 04/11] Update kafka connect run script. --- docker/kafka_connect_run.sh | 36 +++++++++++++----------------------- 1 file changed, 13 insertions(+), 23 deletions(-) diff --git a/docker/kafka_connect_run.sh b/docker/kafka_connect_run.sh index 20c8695f..25cc0b2b 100644 --- a/docker/kafka_connect_run.sh +++ b/docker/kafka_connect_run.sh @@ -1,57 +1,47 @@ #!/usr/bin/env bash -set -e -set +x -if [ -z "$KAFKA_CONNECT_PLUGIN_PATH" ]; then - export KAFKA_CONNECT_PLUGIN_PATH="${KAFKA_HOME}/plugins" -fi +# Source script: https://github.com/strimzi/strimzi-kafka-operator/blob/main/docker-images/kafka-based/kafka/scripts/kafka_connect_run.sh -# Get client rack if it's enabled from the file $KAFKA_HOME/init/rack.id (if it exists). This file is generated by the -# init-container used when rack awareness is enabled. -if [ -e "$KAFKA_HOME/init/rack.id" ]; then - STRIMZI_RACK_ID=$(cat "$KAFKA_HOME/init/rack.id") - export STRIMZI_RACK_ID -fi +set -e +set +x # Prepare hostname - for StrimziPodSets we use the Pod DNS name assigned through the headless service ADVERTISED_HOSTNAME=$(hostname -f | cut -d "." -f1-4) export ADVERTISED_HOSTNAME -# Generate temporary keystore password -CERTS_STORE_PASSWORD=$(< /dev/urandom tr -dc _A-Z-a-z-0-9 | head -c32) -export CERTS_STORE_PASSWORD - # Create dir where keystores and truststores will be stored mkdir -p /tmp/kafka -# Import certificates into keystore and truststore -./kafka_connect_tls_prepare_certificates.sh - # Generate and print the config file echo "Starting Kafka Connect with configuration:" -./kafka_connect_config_generator.sh | tee /tmp/strimzi-connect.properties | sed -e 's/sasl.jaas.config=.*/sasl.jaas.config=[hidden]/g' -e 's/password=.*/password=[hidden]/g' +tee /tmp/strimzi-connect.properties < "/opt/kafka/custom-config/kafka-connect.properties" | sed -e 's/sasl.jaas.config=.*/sasl.jaas.config=[hidden]/g' echo "" # Disable Kafka's GC logging (which logs to a file)... export GC_LOG_ENABLED="false" if [ -z "$KAFKA_LOG4J_OPTS" ]; then - export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$KAFKA_HOME/custom-config/log4j.properties" + if [[ "${KAFKA_VERSION:0:1}" == "3" ]] + then + export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$KAFKA_HOME/custom-config/log4j.properties" + else + export KAFKA_LOG4J_OPTS="-Dlog4j2.configurationFile=$KAFKA_HOME/custom-config/log4j2.properties" + fi fi # We don't need LOG_DIR because we write no log files, but setting it to a # directory avoids trying to create it (and logging a permission denied error) export LOG_DIR="$KAFKA_HOME" -# enabling Prometheus JMX exporter as Java agent +# Enable Prometheus JMX Exporter as Java agent if [ "$KAFKA_CONNECT_METRICS_ENABLED" = "true" ]; then - KAFKA_OPTS="${KAFKA_OPTS} -javaagent:$(ls "$KAFKA_HOME"/libs/jmx_prometheus_javaagent*.jar)=9404:$KAFKA_HOME/custom-config/metrics-config.json" + KAFKA_OPTS="${KAFKA_OPTS} -javaagent:$(ls "$JMX_EXPORTER_HOME"/jmx_prometheus_javaagent*.jar)=9404:$KAFKA_HOME/custom-config/metrics-config.json" export KAFKA_OPTS fi . ./set_kafka_jmx_options.sh "${STRIMZI_JMX_ENABLED}" "${STRIMZI_JMX_USERNAME}" "${STRIMZI_JMX_PASSWORD}" -# enabling Tracing agent (initializes tracing) as Java agent +# Enable Tracing agent (initializes tracing) as Java agent if [ "$STRIMZI_TRACING" = "jaeger" ] || [ "$STRIMZI_TRACING" = "opentelemetry" ]; then KAFKA_OPTS="$KAFKA_OPTS -javaagent:$(ls "$KAFKA_HOME"/libs/tracing-agent*.jar)=$STRIMZI_TRACING" export KAFKA_OPTS From 86c1e687176d5dbe532c4d082634a84931d86ba6 Mon Sep 17 00:00:00 2001 From: Pim van Nierop Date: Thu, 26 Jun 2025 16:32:50 +0200 Subject: [PATCH 05/11] build: fix plugin directory --- kafka-connect-fitbit-source/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kafka-connect-fitbit-source/Dockerfile b/kafka-connect-fitbit-source/Dockerfile index 5bba367e..b35e4349 100644 --- a/kafka-connect-fitbit-source/Dockerfile +++ b/kafka-connect-fitbit-source/Dockerfile @@ -40,7 +40,7 @@ LABEL org.opencontainers.image.authors="pim@thehyve.nl" LABEL description="Kafka REST API Source connector" -ENV CONNECT_PLUGIN_PATH="/usr/share/java/kafka-connect/plugins" +ENV CONNECT_PLUGIN_PATH=/opt/kafka/plugins # To isolate the classpath from the plugin path as recommended COPY --from=builder /code/kafka-connect-rest-source/build/third-party/*.jar ${CONNECT_PLUGIN_PATH}/kafka-connect-rest-source/ From 39e5dd86b40e21ad6eec5aef6a2bef6f8e904f1b Mon Sep 17 00:00:00 2001 From: Pim van Nierop Date: Thu, 26 Jun 2025 16:33:20 +0200 Subject: [PATCH 06/11] wip: update ensure for schema registry scanning --- docker/ensure | 52 ++++++++++++++++++++++++++++----------------------- 1 file changed, 29 insertions(+), 23 deletions(-) diff --git a/docker/ensure b/docker/ensure index ed7fdbf7..accc9e7c 100755 --- a/docker/ensure +++ b/docker/ensure @@ -1,28 +1,34 @@ #!/bin/bash +# Get the schema registry URL from the config. +ss_url=$(grep -E '^key.converter.schema.registry.url=' /tmp/strimzi-connect.properties | cut -d'=' -f2) + +# If the schema registry URL is not set, exit... +if [ -z "$ss_url" ]; then + echo "Schema registry URL is not set in strimzi-connect.properties." + echo "We will not check for schema registry availability." + exit 0 +fi + echo "===> Checking if Schema Registry is available ..." -if [ -z "$CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL" ]; then - echo "CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL is not defined" -else - tries=10 - timeout=1 - while true; do - if wget --spider -q "${CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL}/subjects" 2>/dev/null; then - echo "Schema registry available." - break - fi - tries=$((tries - 1)) - if [ $tries -eq 0 ]; then - echo "FAILED TO REACH SCHEMA REGISTRY." - exit 6 - fi - echo "Failed to reach schema registry. Retrying in ${timeout} seconds." - sleep ${timeout} - if [ ${timeout} -lt ${max_timeout} ]; then - timeout=$((timeout * 2)) - fi - done +tries=10 +timeout=1 +while true; do + if wget --spider -q "${ss_url}/subjects" 2>/dev/null; then + echo "Schema registry available." + break + fi + tries=$((tries - 1)) + if [ $tries -eq 0 ]; then + echo "FAILED TO REACH SCHEMA REGISTRY." + exit 6 + fi + echo "Failed to reach schema registry. Retrying in ${timeout} seconds." + sleep ${timeout} + if [ ${timeout} -lt ${max_timeout} ]; then + timeout=$((timeout * 2)) + fi +done - echo "Schema registry is available." -fi +echo "Schema registry is available." From fc11fa9fc0e36520d427fca06d7cfc5dcef7d5fa Mon Sep 17 00:00:00 2001 From: ewelinagr Date: Fri, 27 Jun 2025 12:59:40 +0200 Subject: [PATCH 07/11] Fix ensure script by replacing wget with curl. --- docker/ensure | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docker/ensure b/docker/ensure index accc9e7c..18fed8fd 100755 --- a/docker/ensure +++ b/docker/ensure @@ -12,10 +12,11 @@ fi echo "===> Checking if Schema Registry is available ..." +max_timeout=32 tries=10 timeout=1 while true; do - if wget --spider -q "${ss_url}/subjects" 2>/dev/null; then + if curl --head --silent --fail "${ss_url}/subjects" > /dev/null; then echo "Schema registry available." break fi From 358418722c705214904266c8a2997498aa591037 Mon Sep 17 00:00:00 2001 From: ewelinagr Date: Mon, 30 Jun 2025 11:32:32 +0200 Subject: [PATCH 08/11] Add legacy setup with confluentinc images to run kafka stack with docker compose. --- README.md | 8 +- docker-compose.yml | 2 +- docker/legacy/README.md | 7 ++ docker/legacy/ensure | 88 +++++++++++++++++++ docker/legacy/launch | 51 +++++++++++ .../source-fitbit.properties.template | 0 kafka-connect-fitbit-source/Dockerfile-legacy | 63 +++++++++++++ 7 files changed, 216 insertions(+), 3 deletions(-) create mode 100644 docker/legacy/README.md create mode 100644 docker/legacy/ensure create mode 100644 docker/legacy/launch rename docker/{ => legacy}/source-fitbit.properties.template (100%) create mode 100644 kafka-connect-fitbit-source/Dockerfile-legacy diff --git a/README.md b/README.md index eeafadba..f830d147 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,11 @@ of Java 17 or later. Generally, this component is installed with [RADAR-Kubernetes](https://github.com/RADAR-base/RADAR-Kubernetes). It uses Docker -image [radarbase/kafka-connect-rest-fitbit-source](https://hub.docker.com/r/radarbase/kafka-connect-rest-fitbit-source). +image [radarbase/kafka-connect-rest-fitbit-source](https://hub.docker.com/r/radarbase/kafka-connect-rest-fitbit-source), +which is built from the `kafka-connect-fitbit-source/Dockerfile`. The image is based on the Strimzi Kafka Connect image. + +The Fitbit source connector can be also run with docker compose and the Confluent Kafka Connect image, using the `kafka-connect-fitbit-source/Dockerfile-legacy`. + First, [register a Fitbit App](https://dev.fitbit.com/apps) with Fitbit. It should be either a server app, for multiple users, or a personal app for a single user. With the server app, you need @@ -39,7 +43,7 @@ For every Fitbit user you want access to, copy `docker/fitbit-user.yml.template` For automatic configuration for multiple users, please take a look at `scripts/REDCAP-FITBIT-AUTH-AUTO/README.md`. -Copy `docker/source-fitbit.properties.template` to `docker/source-fitbit.properties` and enter +Copy `docker/legacy/source-fitbit.properties.template` to `docker/legacy/source-fitbit.properties` and enter your Fitbit App client ID and client secret. The following tables shows the possible properties. diff --git a/docker-compose.yml b/docker-compose.yml index 3dd12369..a874c300 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -145,7 +145,7 @@ services: radar-fitbit-connector: build: context: . - dockerfile: ./kafka-connect-fitbit-source/Dockerfile + dockerfile: ./kafka-connect-fitbit-source/Dockerfile-legacy image: radarbase/radar-connect-fitbit-source restart: on-failure volumes: diff --git a/docker/legacy/README.md b/docker/legacy/README.md new file mode 100644 index 00000000..7daa3e12 --- /dev/null +++ b/docker/legacy/README.md @@ -0,0 +1,7 @@ +# Confluentinc image based docker setup (legacy) + +Files in this directory are used by Dockerfile-legacy to build a legacy docker images (Confluentinc-based) of the connectors, +as opposed to the new Strimzi based images. + +The legacy setup can be to run the Kafka stack (Kafka, Zookeeper, Schema Registry and Kafka Connectors) +with docker compose (see [docker-compose.yml](../../docker-compose.yml)). diff --git a/docker/legacy/ensure b/docker/legacy/ensure new file mode 100644 index 00000000..13e94dad --- /dev/null +++ b/docker/legacy/ensure @@ -0,0 +1,88 @@ +#!/bin/bash + +if [ "$WAIT_FOR_KAFKA" != "1" ]; then + echo "Starting without checking for Kafka availability" + exit 0 +fi + +max_timeout=32 + +IS_TEMP=0 + +echo "===> Wait for infrastructure ..." + +if [ -z "$COMMAND_CONFIG_FILE_PATH" ]; then + COMMAND_CONFIG_FILE_PATH="$(mktemp)" + IS_TEMP=1 +fi + +if [ ! -f "$COMMAND_CONFIG_FILE_PATH" ] || [ $IS_TEMP = 1 ]; then + while IFS='=' read -r -d '' n v; do + if [[ "$n" == "CONNECT_"* ]]; then + name="${n/CONNECT_/""}" # remove first "CONNECT_" + name="${name,,}" # lower case + name="${name//_/"."}" # replace all '_' with '.' + echo "$name=$v" >> ${COMMAND_CONFIG_FILE_PATH} + fi + done < <(env -0) +fi + +# Check if variables exist +if [ -z "$CONNECT_BOOTSTRAP_SERVERS" ]; then + echo "CONNECT_BOOTSTRAP_SERVERS is not defined" +else + KAFKA_BROKERS=${KAFKA_BROKERS:-3} + + tries=10 + timeout=1 + while true; do + KAFKA_CHECK=$(kafka-broker-api-versions --bootstrap-server "$CONNECT_BOOTSTRAP_SERVERS" --command-config "${COMMAND_CONFIG_FILE_PATH}" | grep "(id: " | wc -l) + + if [ "$KAFKA_CHECK" -ge "$KAFKA_BROKERS" ]; then + echo "Kafka brokers available." + break + fi + + tries=$((tries - 1)) + if [ ${tries} -eq 0 ]; then + echo "FAILED: KAFKA BROKERs NOT READY." + exit 5 + fi + echo "Expected $KAFKA_BROKERS brokers but found only $KAFKA_CHECK. Waiting $timeout second before retrying ..." + sleep ${timeout} + if [ ${timeout} -lt ${max_timeout} ]; then + timeout=$((timeout * 2)) + fi + done + + echo "Kafka is available." +fi + +if [ -z "$CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL" ]; then + echo "CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL is not defined" +else + tries=10 + timeout=1 + while true; do + if wget --spider -q "${CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL}/subjects" 2>/dev/null; then + echo "Schema registry available." + break + fi + tries=$((tries - 1)) + if [ $tries -eq 0 ]; then + echo "FAILED TO REACH SCHEMA REGISTRY." + exit 6 + fi + echo "Failed to reach schema registry. Retrying in ${timeout} seconds." + sleep ${timeout} + if [ ${timeout} -lt ${max_timeout} ]; then + timeout=$((timeout * 2)) + fi + done + + echo "Schema registry is available." +fi + +if [ $IS_TEMP = 1 ]; then + /bin/rm -f "$COMMAND_CONFIG_FILE_PATH" +fi diff --git a/docker/legacy/launch b/docker/legacy/launch new file mode 100644 index 00000000..7ae6b678 --- /dev/null +++ b/docker/legacy/launch @@ -0,0 +1,51 @@ +#!/usr/bin/env bash +# +# Copyright 2016 Confluent Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Override this section from the script to include the com.sun.management.jmxremote.rmi.port property. +if [ -z "$KAFKA_JMX_OPTS" ]; then + export KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false " +fi + +# The JMX client needs to be able to connect to java.rmi.server.hostname. +# The default for bridged n/w is the bridged IP so you will only be able to connect from another docker container. +# For host n/w, this is the IP that the hostname on the host resolves to. + +# If you have more that one n/w configured, hostname -i gives you all the IPs, +# the default is to pick the first IP (or network). +export KAFKA_JMX_HOSTNAME=${KAFKA_JMX_HOSTNAME:-$(hostname -i | cut -d" " -f1)} + +if [ "$KAFKA_JMX_PORT" ]; then + # This ensures that the "if" section for JMX_PORT in kafka launch script does not trigger. + export JMX_PORT=$KAFKA_JMX_PORT + export KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Djava.rmi.server.hostname=$KAFKA_JMX_HOSTNAME -Dcom.sun.management.jmxremote.local.only=false -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT -Dcom.sun.management.jmxremote.port=$JMX_PORT" +fi + +echo "===> Launching ${COMPONENT} ..." +# Add our jar to the classpath so that the custom classes can be loaded first. +# And this also makes sure that the CLASSPATH does not start with ":/etc/..." +# other jars are loaded via the plugin path +if [ -z "$CLASSPATH" ]; then + export CLASSPATH="/etc/kafka-connect/jars/*" +fi + +if [ -z "$CONNECTOR_PROPERTY_FILE_PREFIX" ]; then + # execute connector in distributed mode + exec connect-distributed /etc/"${COMPONENT}"/"${COMPONENT}".properties +else + # execute connector in standalone mode + exec connect-standalone /etc/"${COMPONENT}"/"${COMPONENT}".properties /etc/"${COMPONENT}"/"${CONNECTOR_PROPERTY_FILE_PREFIX}"*.properties +fi diff --git a/docker/source-fitbit.properties.template b/docker/legacy/source-fitbit.properties.template similarity index 100% rename from docker/source-fitbit.properties.template rename to docker/legacy/source-fitbit.properties.template diff --git a/kafka-connect-fitbit-source/Dockerfile-legacy b/kafka-connect-fitbit-source/Dockerfile-legacy new file mode 100644 index 00000000..024d1cb6 --- /dev/null +++ b/kafka-connect-fitbit-source/Dockerfile-legacy @@ -0,0 +1,63 @@ +# Copyright 2018 The Hyve +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM --platform=$BUILDPLATFORM gradle:8.9-jdk17 AS builder + +RUN mkdir /code +WORKDIR /code + +ENV GRADLE_USER_HOME=/code/.gradlecache \ + GRADLE_OPTS="-Dorg.gradle.vfs.watch=false -Djdk.lang.Process.launchMechanism=vfork" + +COPY buildSrc /code/buildSrc +COPY ./build.gradle.kts ./settings.gradle.kts ./gradle.properties /code/ +COPY kafka-connect-rest-source/build.gradle.kts /code/kafka-connect-rest-source/ +COPY kafka-connect-fitbit-source/build.gradle.kts /code/kafka-connect-fitbit-source/ + +RUN gradle downloadDependencies copyDependencies + +COPY ./kafka-connect-rest-source/src/ /code/kafka-connect-rest-source/src +COPY ./kafka-connect-fitbit-source/src/ /code/kafka-connect-fitbit-source/src + +RUN gradle jar + +FROM confluentinc/cp-kafka-connect-base:7.8.1 + +USER appuser + +LABEL org.opencontainers.image.authors="pim@thehyve.nl" + +LABEL description="Kafka REST API Source connector" + +ENV CONNECT_PLUGIN_PATH="/usr/share/java/kafka-connect/plugins" \ + WAIT_FOR_KAFKA="1" + +# To isolate the classpath from the plugin path as recommended +COPY --from=builder /code/kafka-connect-rest-source/build/third-party/*.jar ${CONNECT_PLUGIN_PATH}/kafka-connect-rest-source/ +COPY --from=builder /code/kafka-connect-fitbit-source/build/third-party/*.jar ${CONNECT_PLUGIN_PATH}/kafka-connect-fitbit-source/ + +COPY --from=builder /code/kafka-connect-rest-source/build/libs/*.jar ${CONNECT_PLUGIN_PATH}/kafka-connect-rest-source/ +COPY --from=builder /code/kafka-connect-rest-source/build/libs/*.jar ${CONNECT_PLUGIN_PATH}/kafka-connect-fitbit-source/ +COPY --from=builder /code/kafka-connect-fitbit-source/build/libs/*.jar ${CONNECT_PLUGIN_PATH}/kafka-connect-fitbit-source/ + +# Load topics validator +COPY --chown=appuser:appuser ./docker/legacy/ensure /etc/confluent/docker/ensure + +# Load modified launcher +COPY --chown=appuser:appuser ./docker/legacy/launch /etc/confluent/docker/launch + +# Overwrite the log4j configuration to include Sentry monitoring. +COPY ./docker/log4j.properties.template /etc/confluent/docker/log4j.properties.template +# Copy Sentry monitoring jars. +COPY --from=builder /code/kafka-connect-fitbit-source/build/third-party/sentry-* /etc/kafka-connect/jars From e305a9773eced03684beaae90dde1e3a5cbef95f Mon Sep 17 00:00:00 2001 From: ewelinagr Date: Mon, 30 Jun 2025 12:12:40 +0200 Subject: [PATCH 09/11] Upgrade base image for kafka-connect-fitbit-source. --- kafka-connect-fitbit-source/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kafka-connect-fitbit-source/Dockerfile b/kafka-connect-fitbit-source/Dockerfile index b35e4349..438d9107 100644 --- a/kafka-connect-fitbit-source/Dockerfile +++ b/kafka-connect-fitbit-source/Dockerfile @@ -32,7 +32,7 @@ COPY ./kafka-connect-fitbit-source/src/ /code/kafka-connect-fitbit-source/src RUN gradle jar -FROM quay.io/strimzi/kafka:0.45.0-kafka-3.9.0 +FROM quay.io/strimzi/kafka:0.46.0-kafka-3.9.0 USER appuser From 56d3afcbcdc34f3304288b3717e8ffa25f0c5dfe Mon Sep 17 00:00:00 2001 From: ewelinagr Date: Wed, 2 Jul 2025 12:45:54 +0200 Subject: [PATCH 10/11] Update paths of legacy docker files --- .gitignore | 4 ++-- docker-compose.yml | 2 +- docker/{ => legacy}/log4j.properties.template | 0 kafka-connect-fitbit-source/Dockerfile | 3 ++- kafka-connect-fitbit-source/Dockerfile-legacy | 2 +- kafka-connect-oura-source/Dockerfile | 8 +++----- 6 files changed, 9 insertions(+), 10 deletions(-) rename docker/{ => legacy}/log4j.properties.template (100%) diff --git a/.gitignore b/.gitignore index 530c0530..756da85f 100644 --- a/.gitignore +++ b/.gitignore @@ -5,7 +5,7 @@ build/ out/ .gradle/ docker/users -docker/source-fitbit.properties +docker/legacy/source-fitbit.properties docker/source-oura.properties bin/ -.DS_Store \ No newline at end of file +.DS_Store diff --git a/docker-compose.yml b/docker-compose.yml index a874c300..fdab1057 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -149,7 +149,7 @@ services: image: radarbase/radar-connect-fitbit-source restart: on-failure volumes: - - ./docker/source-fitbit.properties:/etc/kafka-connect/source-fitbit.properties + - ./docker/legacy/source-fitbit.properties:/etc/kafka-connect/source-fitbit.properties - ./docker/users:/var/lib/kafka-connect-fitbit-source/users - fitbit-logs:/var/lib/kafka-connect-fitbit-source/logs depends_on: diff --git a/docker/log4j.properties.template b/docker/legacy/log4j.properties.template similarity index 100% rename from docker/log4j.properties.template rename to docker/legacy/log4j.properties.template diff --git a/kafka-connect-fitbit-source/Dockerfile b/kafka-connect-fitbit-source/Dockerfile index 438d9107..86b056ac 100644 --- a/kafka-connect-fitbit-source/Dockerfile +++ b/kafka-connect-fitbit-source/Dockerfile @@ -51,7 +51,8 @@ COPY --from=builder /code/kafka-connect-rest-source/build/libs/*.jar ${CONNECT_P COPY --from=builder /code/kafka-connect-fitbit-source/build/libs/*.jar ${CONNECT_PLUGIN_PATH}/kafka-connect-fitbit-source/ # Overwrite the log4j configuration to include Sentry monitoring. -COPY ./docker/log4j.properties.template /opt/kafka/custom-config/log4j.properties +# TODO uncomment after new log4j2 properties file is added. +# COPY ./docker/log4j2.properties /opt/kafka/custom-config/log4j.properties # Copy Sentry monitoring jars. COPY --from=builder /code/kafka-connect-fitbit-source/build/third-party/sentry-* /opt/kafka/libs/ diff --git a/kafka-connect-fitbit-source/Dockerfile-legacy b/kafka-connect-fitbit-source/Dockerfile-legacy index 024d1cb6..527dd879 100644 --- a/kafka-connect-fitbit-source/Dockerfile-legacy +++ b/kafka-connect-fitbit-source/Dockerfile-legacy @@ -58,6 +58,6 @@ COPY --chown=appuser:appuser ./docker/legacy/ensure /etc/confluent/docker/ensur COPY --chown=appuser:appuser ./docker/legacy/launch /etc/confluent/docker/launch # Overwrite the log4j configuration to include Sentry monitoring. -COPY ./docker/log4j.properties.template /etc/confluent/docker/log4j.properties.template +COPY ./docker/legacy/log4j.properties.template /etc/confluent/docker/log4j.properties.template # Copy Sentry monitoring jars. COPY --from=builder /code/kafka-connect-fitbit-source/build/third-party/sentry-* /etc/kafka-connect/jars diff --git a/kafka-connect-oura-source/Dockerfile b/kafka-connect-oura-source/Dockerfile index 4c4ba125..84b147af 100644 --- a/kafka-connect-oura-source/Dockerfile +++ b/kafka-connect-oura-source/Dockerfile @@ -51,13 +51,11 @@ COPY --from=builder /code/kafka-connect-oura-source/build/libs/*.jar ${CONNECT_P COPY --from=builder /code/oura-library/build/libs/*.jar ${CONNECT_PLUGIN_PATH}/kafka-connect-oura-source/ # Load topics validator -COPY --chown=appuser:appuser ./docker/ensure /etc/confluent/docker/ensure +COPY --chown=appuser:appuser ./docker/legacy/ensure /etc/confluent/docker/ensure # Load modified launcher -COPY --chown=appuser:appuser ./docker/launch /etc/confluent/docker/launch +COPY --chown=appuser:appuser ./docker/legacy/launch /etc/confluent/docker/launch -# Overwrite the log4j configuration to include Sentry monitoring. -COPY ./docker/log4j.properties.template /etc/confluent/docker/log4j.properties.template -# Copy Sentry monitoring jars. +# Copy Sentry monitoring jars. Sentry monitoring needs to be included in helm chart log4j ConfigMap. COPY --from=builder /code/kafka-connect-oura-source/build/third-party/sentry-* /etc/kafka-connect/jars From e18e265281e080ffe361b92c4aafdc91fe3a7b48 Mon Sep 17 00:00:00 2001 From: ewelinagr Date: Fri, 4 Jul 2025 15:42:31 +0200 Subject: [PATCH 11/11] feat: update oura connector to integrate with Strimzi. --- .gitignore | 2 +- docker-compose.yml | 4 +- .../source-oura.properties.template | 0 kafka-connect-fitbit-source/Dockerfile | 3 - kafka-connect-fitbit-source/Dockerfile-legacy | 2 + kafka-connect-oura-source/Dockerfile | 18 +++--- kafka-connect-oura-source/Dockerfile-legacy | 61 +++++++++++++++++++ 7 files changed, 74 insertions(+), 16 deletions(-) rename docker/{ => legacy}/source-oura.properties.template (100%) create mode 100644 kafka-connect-oura-source/Dockerfile-legacy diff --git a/.gitignore b/.gitignore index 756da85f..11577ec1 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,6 @@ out/ .gradle/ docker/users docker/legacy/source-fitbit.properties -docker/source-oura.properties +docker/legacy/source-oura.properties bin/ .DS_Store diff --git a/docker-compose.yml b/docker-compose.yml index fdab1057..769fdb47 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -191,11 +191,11 @@ services: radar-oura-connector: build: context: . - dockerfile: ./kafka-connect-oura-source/Dockerfile + dockerfile: ./kafka-connect-oura-source/Dockerfile-legacy image: radarbase/radar-connect-oura-source restart: on-failure volumes: - - ./docker/source-oura.properties:/etc/kafka-connect/source-oura.properties + - ./docker/legacy/source-oura.properties:/etc/kafka-connect/source-oura.properties - ./docker/users:/var/lib/kafka-connect-oura-source/users depends_on: - zookeeper-1 diff --git a/docker/source-oura.properties.template b/docker/legacy/source-oura.properties.template similarity index 100% rename from docker/source-oura.properties.template rename to docker/legacy/source-oura.properties.template diff --git a/kafka-connect-fitbit-source/Dockerfile b/kafka-connect-fitbit-source/Dockerfile index 86b056ac..ac35b434 100644 --- a/kafka-connect-fitbit-source/Dockerfile +++ b/kafka-connect-fitbit-source/Dockerfile @@ -50,9 +50,6 @@ COPY --from=builder /code/kafka-connect-rest-source/build/libs/*.jar ${CONNECT_P COPY --from=builder /code/kafka-connect-rest-source/build/libs/*.jar ${CONNECT_PLUGIN_PATH}/kafka-connect-fitbit-source/ COPY --from=builder /code/kafka-connect-fitbit-source/build/libs/*.jar ${CONNECT_PLUGIN_PATH}/kafka-connect-fitbit-source/ -# Overwrite the log4j configuration to include Sentry monitoring. -# TODO uncomment after new log4j2 properties file is added. -# COPY ./docker/log4j2.properties /opt/kafka/custom-config/log4j.properties # Copy Sentry monitoring jars. COPY --from=builder /code/kafka-connect-fitbit-source/build/third-party/sentry-* /opt/kafka/libs/ diff --git a/kafka-connect-fitbit-source/Dockerfile-legacy b/kafka-connect-fitbit-source/Dockerfile-legacy index 527dd879..b7df9f8f 100644 --- a/kafka-connect-fitbit-source/Dockerfile-legacy +++ b/kafka-connect-fitbit-source/Dockerfile-legacy @@ -61,3 +61,5 @@ COPY --chown=appuser:appuser ./docker/legacy/launch /etc/confluent/docker/launc COPY ./docker/legacy/log4j.properties.template /etc/confluent/docker/log4j.properties.template # Copy Sentry monitoring jars. COPY --from=builder /code/kafka-connect-fitbit-source/build/third-party/sentry-* /etc/kafka-connect/jars + +RUN chmod +x /etc/confluent/docker/ensure diff --git a/kafka-connect-oura-source/Dockerfile b/kafka-connect-oura-source/Dockerfile index 84b147af..a88e8c79 100644 --- a/kafka-connect-oura-source/Dockerfile +++ b/kafka-connect-oura-source/Dockerfile @@ -32,7 +32,7 @@ COPY ./oura-library/src/ /code/oura-library/src RUN gradle jar -FROM confluentinc/cp-kafka-connect-base:7.8.1 +FROM quay.io/strimzi/kafka:0.46.0-kafka-3.9.0 USER appuser @@ -40,8 +40,7 @@ LABEL org.opencontainers.image.authors="pauline.conde@kcl.ac.uk" LABEL description="Kafka Oura REST API Source connector" -ENV CONNECT_PLUGIN_PATH="/usr/share/java/kafka-connect/plugins" \ - WAIT_FOR_KAFKA="1" +ENV CONNECT_PLUGIN_PATH=/opt/kafka/plugins # To isolate the classpath from the plugin path as recommended COPY --from=builder /code/kafka-connect-oura-source/build/third-party/*.jar ${CONNECT_PLUGIN_PATH}/kafka-connect-oura-source/ @@ -50,12 +49,11 @@ COPY --from=builder /code/oura-library/build/third-party/*.jar ${CONNECT_PLUGIN_ COPY --from=builder /code/kafka-connect-oura-source/build/libs/*.jar ${CONNECT_PLUGIN_PATH}/kafka-connect-oura-source/ COPY --from=builder /code/oura-library/build/libs/*.jar ${CONNECT_PLUGIN_PATH}/kafka-connect-oura-source/ -# Load topics validator -COPY --chown=appuser:appuser ./docker/legacy/ensure /etc/confluent/docker/ensure +# Copy Sentry monitoring jars. +COPY --from=builder /code/kafka-connect-oura-source/build/third-party/sentry-* /opt/kafka/libs/ -# Load modified launcher -COPY --chown=appuser:appuser ./docker/legacy/launch /etc/confluent/docker/launch - -# Copy Sentry monitoring jars. Sentry monitoring needs to be included in helm chart log4j ConfigMap. -COPY --from=builder /code/kafka-connect-oura-source/build/third-party/sentry-* /etc/kafka-connect/jars +USER 1001 +COPY --chown=1001:1001 ./docker/ensure /opt/kafka/ensure +COPY --chown=1001:1001 ./docker/kafka_connect_run.sh /opt/kafka/kafka_connect_run.sh +RUN chmod +x /opt/kafka/ensure /opt/kafka/kafka_connect_run.sh diff --git a/kafka-connect-oura-source/Dockerfile-legacy b/kafka-connect-oura-source/Dockerfile-legacy new file mode 100644 index 00000000..84b147af --- /dev/null +++ b/kafka-connect-oura-source/Dockerfile-legacy @@ -0,0 +1,61 @@ +# Copyright 2018 The Hyve +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM --platform=$BUILDPLATFORM gradle:8.9-jdk17 AS builder + +RUN mkdir /code +WORKDIR /code + +ENV GRADLE_USER_HOME=/code/.gradlecache \ + GRADLE_OPTS="-Dorg.gradle.vfs.watch=false -Djdk.lang.Process.launchMechanism=vfork" + +COPY buildSrc /code/buildSrc +COPY ./build.gradle.kts ./settings.gradle.kts ./gradle.properties /code/ +COPY kafka-connect-oura-source/build.gradle.kts /code/kafka-connect-oura-source/ +COPY oura-library/build.gradle /code/oura-library/ + +RUN gradle downloadDependencies copyDependencies + +COPY ./kafka-connect-oura-source/src/ /code/kafka-connect-oura-source/src +COPY ./oura-library/src/ /code/oura-library/src + +RUN gradle jar + +FROM confluentinc/cp-kafka-connect-base:7.8.1 + +USER appuser + +LABEL org.opencontainers.image.authors="pauline.conde@kcl.ac.uk" + +LABEL description="Kafka Oura REST API Source connector" + +ENV CONNECT_PLUGIN_PATH="/usr/share/java/kafka-connect/plugins" \ + WAIT_FOR_KAFKA="1" + +# To isolate the classpath from the plugin path as recommended +COPY --from=builder /code/kafka-connect-oura-source/build/third-party/*.jar ${CONNECT_PLUGIN_PATH}/kafka-connect-oura-source/ +COPY --from=builder /code/oura-library/build/third-party/*.jar ${CONNECT_PLUGIN_PATH}/kafka-connect-oura-source/ + +COPY --from=builder /code/kafka-connect-oura-source/build/libs/*.jar ${CONNECT_PLUGIN_PATH}/kafka-connect-oura-source/ +COPY --from=builder /code/oura-library/build/libs/*.jar ${CONNECT_PLUGIN_PATH}/kafka-connect-oura-source/ + +# Load topics validator +COPY --chown=appuser:appuser ./docker/legacy/ensure /etc/confluent/docker/ensure + +# Load modified launcher +COPY --chown=appuser:appuser ./docker/legacy/launch /etc/confluent/docker/launch + +# Copy Sentry monitoring jars. Sentry monitoring needs to be included in helm chart log4j ConfigMap. +COPY --from=builder /code/kafka-connect-oura-source/build/third-party/sentry-* /etc/kafka-connect/jars +