Skip to content

Commit 4c47ccc

Browse files
author
Rajendran
committed
Punahele project with complete setup
1 parent 65a1c0f commit 4c47ccc

27 files changed

+1168
-0
lines changed
Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
# Licensed to the Apache Software Foundation (ASF) under one or more
2+
# contributor license agreements. See the NOTICE file distributed with
3+
# this work for additional information regarding copyright ownership.
4+
# The ASF licenses this file to You under the Apache License, Version 2.0
5+
# (the "License"); you may not use this file except in compliance with
6+
# the License. You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
16+
name=local-console-sink
17+
connector.class=org.apache.kafka.connect.file.FileStreamSinkConnector
18+
tasks.max=1
19+
topics=connect-test
Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
# Licensed to the Apache Software Foundation (ASF) under one or more
2+
# contributor license agreements. See the NOTICE file distributed with
3+
# this work for additional information regarding copyright ownership.
4+
# The ASF licenses this file to You under the Apache License, Version 2.0
5+
# (the "License"); you may not use this file except in compliance with
6+
# the License. You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
16+
name=local-console-source
17+
connector.class=org.apache.kafka.connect.file.FileStreamSourceConnector
18+
tasks.max=1
19+
topic=connect-test
Lines changed: 93 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,93 @@
1+
##
2+
# Licensed to the Apache Software Foundation (ASF) under one or more
3+
# contributor license agreements. See the NOTICE file distributed with
4+
# this work for additional information regarding copyright ownership.
5+
# The ASF licenses this file to You under the Apache License, Version 2.0
6+
# (the "License"); you may not use this file except in compliance with
7+
# the License. You may obtain a copy of the License at
8+
#
9+
# http://www.apache.org/licenses/LICENSE-2.0
10+
#
11+
# Unless required by applicable law or agreed to in writing, software
12+
# distributed under the License is distributed on an "AS IS" BASIS,
13+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
# See the License for the specific language governing permissions and
15+
# limitations under the License.
16+
##
17+
18+
# This file contains some of the configurations for the Kafka Connect distributed worker. This file is intended
19+
# to be used with the examples, and some settings may differ from those used in a production system, especially
20+
# the `bootstrap.servers` and those specifying replication factors.
21+
22+
# A list of host/port pairs to use for establishing the initial connection to the Kafka cluster.
23+
bootstrap.servers=localhost:9092
24+
25+
# unique name for the cluster, used in forming the Connect cluster group. Note that this must not conflict with consumer group IDs
26+
group.id=connect-cluster
27+
28+
# The converters specify the format of data in Kafka and how to translate it into Connect data. Every Connect user will
29+
# need to configure these based on the format they want their data in when loaded from or stored into Kafka
30+
key.converter=org.apache.kafka.connect.json.JsonConverter
31+
value.converter=org.apache.kafka.connect.json.JsonConverter
32+
# Converter-specific settings can be passed in by prefixing the Converter's setting with the converter we want to apply
33+
# it to
34+
key.converter.schemas.enable=true
35+
value.converter.schemas.enable=true
36+
37+
# The internal converter used for offsets, config, and status data is configurable and must be specified, but most users will
38+
# always want to use the built-in default. Offset, config, and status data is never visible outside of Kafka Connect in this format.
39+
internal.key.converter=org.apache.kafka.connect.json.JsonConverter
40+
internal.value.converter=org.apache.kafka.connect.json.JsonConverter
41+
internal.key.converter.schemas.enable=false
42+
internal.value.converter.schemas.enable=false
43+
44+
# Topic to use for storing offsets. This topic should have many partitions and be replicated and compacted.
45+
# Kafka Connect will attempt to create the topic automatically when needed, but you can always manually create
46+
# the topic before starting Kafka Connect if a specific topic configuration is needed.
47+
# Most users will want to use the built-in default replication factor of 3 or in some cases even specify a larger value.
48+
# Since this means there must be at least as many brokers as the maximum replication factor used, we'd like to be able
49+
# to run this example on a single-broker cluster and so here we instead set the replication factor to 1.
50+
offset.storage.topic=connect-offsets
51+
offset.storage.replication.factor=1
52+
#offset.storage.partitions=25
53+
54+
# Topic to use for storing connector and task configurations; note that this should be a single partition, highly replicated,
55+
# and compacted topic. Kafka Connect will attempt to create the topic automatically when needed, but you can always manually create
56+
# the topic before starting Kafka Connect if a specific topic configuration is needed.
57+
# Most users will want to use the built-in default replication factor of 3 or in some cases even specify a larger value.
58+
# Since this means there must be at least as many brokers as the maximum replication factor used, we'd like to be able
59+
# to run this example on a single-broker cluster and so here we instead set the replication factor to 1.
60+
config.storage.topic=connect-configs
61+
config.storage.replication.factor=1
62+
63+
# Topic to use for storing statuses. This topic can have multiple partitions and should be replicated and compacted.
64+
# Kafka Connect will attempt to create the topic automatically when needed, but you can always manually create
65+
# the topic before starting Kafka Connect if a specific topic configuration is needed.
66+
# Most users will want to use the built-in default replication factor of 3 or in some cases even specify a larger value.
67+
# Since this means there must be at least as many brokers as the maximum replication factor used, we'd like to be able
68+
# to run this example on a single-broker cluster and so here we instead set the replication factor to 1.
69+
status.storage.topic=connect-status
70+
status.storage.replication.factor=1
71+
#status.storage.partitions=5
72+
73+
# Flush much faster than normal, which is useful for testing/debugging
74+
offset.flush.interval.ms=10000
75+
76+
# These are provided to inform the user about the presence of the REST host and port configs
77+
# Hostname & Port for the REST API to listen on. If this is set, it will bind to the interface used to listen to requests.
78+
#rest.host.name=
79+
#rest.port=8083
80+
81+
# The Hostname & Port that will be given out to other workers to connect to i.e. URLs that are routable from other servers.
82+
#rest.advertised.host.name=
83+
#rest.advertised.port=
84+
85+
# Set to a list of filesystem paths separated by commas (,) to enable class loading isolation for plugins
86+
# (connectors, converters, transformations). The list should consist of top level directories that include
87+
# any combination of:
88+
# a) directories immediately containing jars with plugins and their dependencies
89+
# b) uber-jars with plugins and their dependencies
90+
# c) directories immediately containing the package directory structure of classes of plugins and their dependencies
91+
# Examples:
92+
# plugin.path=/usr/local/share/java,/usr/local/share/kafka/plugins,/opt/connectors,
93+
#plugin.path=
Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
# Licensed to the Apache Software Foundation (ASF) under one or more
2+
# contributor license agreements. See the NOTICE file distributed with
3+
# this work for additional information regarding copyright ownership.
4+
# The ASF licenses this file to You under the Apache License, Version 2.0
5+
# (the "License"); you may not use this file except in compliance with
6+
# the License. You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
16+
name=local-file-sink
17+
connector.class=FileStreamSink
18+
tasks.max=1
19+
file=test.sink.txt
20+
topics=connect-test
Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
# Licensed to the Apache Software Foundation (ASF) under one or more
2+
# contributor license agreements. See the NOTICE file distributed with
3+
# this work for additional information regarding copyright ownership.
4+
# The ASF licenses this file to You under the Apache License, Version 2.0
5+
# (the "License"); you may not use this file except in compliance with
6+
# the License. You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
16+
name=local-file-source
17+
connector.class=FileStreamSource
18+
tasks.max=1
19+
file=test.txt
20+
topic=connect-test

Kafka/config/connect-log4j.properties

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
# Licensed to the Apache Software Foundation (ASF) under one or more
2+
# contributor license agreements. See the NOTICE file distributed with
3+
# this work for additional information regarding copyright ownership.
4+
# The ASF licenses this file to You under the Apache License, Version 2.0
5+
# (the "License"); you may not use this file except in compliance with
6+
# the License. You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
16+
log4j.rootLogger=INFO, stdout
17+
18+
19+
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
20+
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
21+
log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n
22+
23+
log4j.logger.org.apache.zookeeper=ERROR
24+
log4j.logger.org.I0Itec.zkclient=ERROR
25+
log4j.logger.org.reflections=ERROR
Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
# Licensed to the Apache Software Foundation (ASF) under one or more
2+
# contributor license agreements. See the NOTICE file distributed with
3+
# this work for additional information regarding copyright ownership.
4+
# The ASF licenses this file to You under the Apache License, Version 2.0
5+
# (the "License"); you may not use this file except in compliance with
6+
# the License. You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
16+
# These are defaults. This file just demonstrates how to override some settings.
17+
bootstrap.servers=localhost:9092
18+
19+
# The converters specify the format of data in Kafka and how to translate it into Connect data. Every Connect user will
20+
# need to configure these based on the format they want their data in when loaded from or stored into Kafka
21+
key.converter=org.apache.kafka.connect.json.JsonConverter
22+
value.converter=org.apache.kafka.connect.json.JsonConverter
23+
# Converter-specific settings can be passed in by prefixing the Converter's setting with the converter we want to apply
24+
# it to
25+
key.converter.schemas.enable=true
26+
value.converter.schemas.enable=true
27+
28+
# The internal converter used for offsets and config data is configurable and must be specified, but most users will
29+
# always want to use the built-in default. Offset and config data is never visible outside of Kafka Connect in this format.
30+
internal.key.converter=org.apache.kafka.connect.json.JsonConverter
31+
internal.value.converter=org.apache.kafka.connect.json.JsonConverter
32+
internal.key.converter.schemas.enable=false
33+
internal.value.converter.schemas.enable=false
34+
35+
offset.storage.file.filename=/tmp/connect.offsets
36+
# Flush much faster than normal, which is useful for testing/debugging
37+
offset.flush.interval.ms=10000
38+
39+
# Set to a list of filesystem paths separated by commas (,) to enable class loading isolation for plugins
40+
# (connectors, converters, transformations). The list should consist of top level directories that include
41+
# any combination of:
42+
# a) directories immediately containing jars with plugins and their dependencies
43+
# b) uber-jars with plugins and their dependencies
44+
# c) directories immediately containing the package directory structure of classes of plugins and their dependencies
45+
# Note: symlinks will be followed to discover dependencies or plugins.
46+
# Examples:
47+
# plugin.path=/usr/local/share/java,/usr/local/share/kafka/plugins,/opt/connectors,
48+
#plugin.path=

Kafka/config/consumer.properties

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
# Licensed to the Apache Software Foundation (ASF) under one or more
2+
# contributor license agreements. See the NOTICE file distributed with
3+
# this work for additional information regarding copyright ownership.
4+
# The ASF licenses this file to You under the Apache License, Version 2.0
5+
# (the "License"); you may not use this file except in compliance with
6+
# the License. You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
# see org.apache.kafka.clients.consumer.ConsumerConfig for more details
16+
17+
# list of brokers used for bootstrapping knowledge about the rest of the cluster
18+
# format: host1:port1,host2:port2 ...
19+
bootstrap.servers=localhost:9092
20+
21+
# consumer group id
22+
group.id=test-consumer-group
23+
24+
# What to do when there is no initial offset in Kafka or if the current
25+
# offset does not exist any more on the server: latest, earliest, none
26+
#auto.offset.reset=

Kafka/config/log4j.properties

Lines changed: 92 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,92 @@
1+
# Licensed to the Apache Software Foundation (ASF) under one or more
2+
# contributor license agreements. See the NOTICE file distributed with
3+
# this work for additional information regarding copyright ownership.
4+
# The ASF licenses this file to You under the Apache License, Version 2.0
5+
# (the "License"); you may not use this file except in compliance with
6+
# the License. You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
16+
# Unspecified loggers and loggers with additivity=true output to server.log and stdout
17+
# Note that INFO only applies to unspecified loggers, the log level of the child logger is used otherwise
18+
log4j.rootLogger=INFO, stdout, kafkaAppender
19+
20+
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
21+
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
22+
log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
23+
24+
log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender
25+
log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH
26+
log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log
27+
log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout
28+
log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
29+
30+
log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender
31+
log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH
32+
log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log
33+
log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout
34+
log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
35+
36+
log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender
37+
log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH
38+
log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log
39+
log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout
40+
log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
41+
42+
log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender
43+
log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH
44+
log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log
45+
log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout
46+
log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
47+
48+
log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender
49+
log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH
50+
log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log
51+
log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout
52+
log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
53+
54+
log4j.appender.authorizerAppender=org.apache.log4j.DailyRollingFileAppender
55+
log4j.appender.authorizerAppender.DatePattern='.'yyyy-MM-dd-HH
56+
log4j.appender.authorizerAppender.File=${kafka.logs.dir}/kafka-authorizer.log
57+
log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout
58+
log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
59+
60+
# Change the two lines below to adjust ZK client logging
61+
log4j.logger.org.I0Itec.zkclient.ZkClient=INFO
62+
log4j.logger.org.apache.zookeeper=INFO
63+
64+
# Change the two lines below to adjust the general broker logging level (output to server.log and stdout)
65+
log4j.logger.kafka=INFO
66+
log4j.logger.org.apache.kafka=INFO
67+
68+
# Change to DEBUG or TRACE to enable request logging
69+
log4j.logger.kafka.request.logger=WARN, requestAppender
70+
log4j.additivity.kafka.request.logger=false
71+
72+
# Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE for additional output
73+
# related to the handling of requests
74+
#log4j.logger.kafka.network.Processor=TRACE, requestAppender
75+
#log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender
76+
#log4j.additivity.kafka.server.KafkaApis=false
77+
log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender
78+
log4j.additivity.kafka.network.RequestChannel$=false
79+
80+
log4j.logger.kafka.controller=TRACE, controllerAppender
81+
log4j.additivity.kafka.controller=false
82+
83+
log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender
84+
log4j.additivity.kafka.log.LogCleaner=false
85+
86+
log4j.logger.state.change.logger=TRACE, stateChangeAppender
87+
log4j.additivity.state.change.logger=false
88+
89+
# Access denials are logged at INFO level, change to DEBUG to also log allowed accesses
90+
log4j.logger.kafka.authorizer.logger=INFO, authorizerAppender
91+
log4j.additivity.kafka.authorizer.logger=false
92+

0 commit comments

Comments
 (0)