2019/3/14 星期四
Linux 初始化脚本 (centos6 centos7 通用)Linux 初始化脚本 (centos6 centos7 通用)
zookeeper生产环境搭建 zookeeper生产环境搭建
在安装前请务必安装好zookeeper 查看上面2个链接地址!
kafka 生产环境搭建
[root@emm-kafka01-10--174 ~]# cd /opt/ins/
[root@emm-kafka01-10--174 ins]# ll
total 233044
-rwxr-xr-x 1 root root 166044032 Mar 13 15:58 jdk-8u102-linux-x64.rpm
-rw-r--r-- 1 root root 50326212 Mar 13 16:14 kafka_2.12-1.1.0.tgz
-rw-r--r-- 1 root root 22261552 Mar 13 16:14 zookeeper-3.4.8.tar.gz
[root@emm-kafka01-10--174 ins]# tar -zxvf kafka_2.12-1.1.0.tgz -C /usr/local/
[root@emm-kafka01-10--174 ins]# cd /usr/local/
[root@emm-kafka01-10--174 local]# ln -s kafka_2.12-1.1.0/ kafka
[root@emm-kafka01-10--174 local]# ll
total 4
drwxr-xr-x. 2 root root 6 Apr 11 2018 bin
drwxr-xr-x. 2 root root 6 Apr 11 2018 etc
drwxr-xr-x. 2 root root 6 Apr 11 2018 games
drwxr-xr-x. 2 root root 6 Apr 11 2018 include
lrwxrwxrwx 1 root root 17 Mar 14 09:51 kafka -> kafka_2.12-1.1.0/
drwxr-xr-x 6 root root 83 Mar 24 2018 kafka_2.12-1.1.0
drwxr-xr-x. 2 root root 6 Apr 11 2018 lib
drwxr-xr-x. 2 root root 6 Apr 11 2018 lib64
drwxr-xr-x. 2 root root 6 Apr 11 2018 libexec
drwxr-xr-x. 2 root root 6 Apr 11 2018 sbin
drwxr-xr-x. 5 root root 46 Apr 11 2018 share
drwxr-xr-x. 2 root root 6 Nov 12 13:03 src
lrwxrwxrwx 1 root root 15 Mar 13 18:20 zookeeper -> zookeeper-3.4.8
drwxr-xr-x 11 root root 4096 Mar 13 18:22 zookeeper-3.4.8
修改配置文件
我们不会对 zookeeper.properties producer.properties consumer.properties 做什么特殊的配置
我们只对server.properties 做修改配置
[root@emm-kafka01-10--174 config]# pwd
/usr/local/kafka/config
[root@emm-kafka01-10--174 config]# ll
total 64
-rw-r--r-- 1 root root 906 Mar 24 2018 connect-console-sink.properties
-rw-r--r-- 1 root root 909 Mar 24 2018 connect-console-source.properties
-rw-r--r-- 1 root root 5807 Mar 24 2018 connect-distributed.properties
-rw-r--r-- 1 root root 883 Mar 24 2018 connect-file-sink.properties
-rw-r--r-- 1 root root 881 Mar 24 2018 connect-file-source.properties
-rw-r--r-- 1 root root 1111 Mar 24 2018 connect-log4j.properties
-rw-r--r-- 1 root root 2730 Mar 24 2018 connect-standalone.properties
-rw-r--r-- 1 root root 1221 Mar 24 2018 consumer.properties
-rw-r--r-- 1 root root 4727 Mar 24 2018 log4j.properties
-rw-r--r-- 1 root root 1919 Mar 24 2018 producer.properties
-rw-r--r-- 1 root root 6851 Mar 24 2018 server.properties
-rw-r--r-- 1 root root 1032 Mar 24 2018 tools-log4j.properties
-rw-r--r-- 1 root root 1023 Mar 24 2018 zookeeper.properties
[root@emm-kafka01-10--174 config]# ls -l server.properties
-rw-r--r-- 1 root root 6851 Mar 24 2018 server.properties
[root@emm-kafka01-10--174 config]# vim server.properties
...
...
...
如下
config下的 server.properties
broker.id=10 --这个id如果部署的是kafka集群,id是不能一样的
port=9092 --默认kafka端口,如果一台机器上面部署了多个kafka实例,需要两个实例端口不一样
log.dirs=/var/log/kafka/kafka-logs 这个需要修改,这个是存topic相关信息
zookeeper.connect=10.2.10.174:2181,10.2.10.175:2181,10.2.10.176:2181/kafkagroup
[root@emm-kafka01-10--174 config]# grep ‘^[a-Z]‘ server.properties
broker.id=174 //每个实例都不一样
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/var/log/kafka/kafka-logs
num.partitions=1 //默认的每个topic的分区 为1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=10.2.10.174:2181,10.2.10.175:2181,10.2.10.176:2181/kafkagroup
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0
注意:
kafka实现功能需要zookeeper调度,所以这是与zk单机或者集群的连接,上面是与集群连接的方式,也可以去掉/kafkagroup,但是zk的znode结构就会比较混乱,所以建议加一个路径
这样在zk的znode下就会出现 kafkagroup
其他2台 的server.properties 配置文件 中的 broker.id=改成 175 176
其他的不变
[root@emm-kafka01-10--174 config]# scp server.properties root@10.2.10.175:/usr/local/kafka/config/
server.properties 100% 6911 2.5MB/s 00:00
[root@emm-kafka01-10--174 config]# scp server.properties root@10.2.10.176:/usr/local/kafka/config/
server.properties 100% 6911 2.8MB/s 00:0
修改kafka启动脚本(调节启动内存占用大小
[root@emm-kafka01-10--174 bin]# vim /usr/local/kafka/bin/kafka-server-start.sh
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
export KAFKA_HEAP_OPTS="-Xmx1G -Xms1G"
fi
改成
我们的内存是8G 这里我们这样改
export KAFKA_HEAP_OPTS="-Xmx4G -Xms1G"
最大4G 初始 1G
内存为4G 就改成
export KAFKA_HEAP_OPTS="-Xmx2G -Xms1G"
[root@emm-kafka01-10--174 bin]# scp kafka-server-start.sh root@10.2.10.175:/usr/local/kafka/bin/
kafka-server-start.sh 100% 1376 1.5MB/s 00:00
[root@emm-kafka01-10--174 bin]# scp kafka-server-start.sh root@10.2.10.176:/usr/local/kafka/bin/
kafka-server-start.sh 100% 1376 1.2MB/s 00:00
4、启动kafka //后台启动 再启动之前,我们先设置环境变量
[root@emm-kafka01-10--174 bin]# vim /etc/profile
export PATH=/usr/local/kafka/bin:/usr/local/zookeeper/bin:$PATH
[root@emm-kafka01-10--174 bin]# source /etc/profile
[root@emm-kafka01-10--174 bin]# which kafka-server-start.sh
/usr/local/kafka/bin/kafka-server-start.sh
[root@emm-kafka01-10--174 bin]# which zkServer.sh
/usr/local/zookeeper/bin/zkServer.sh
其他2台同上
后台启动kafka
[root@emm-kafka01-10--174 bin]# cd ~
[root@emm-kafka01-10--174 ~]# nohup kafka-server-start.sh /usr/local/kafka/config/server.properties 1>/dev/null 2>&1 &
[1] 26314
[root@emm-kafka01-10--174 ~]# jps
14290 QuorumPeerMain
26643 Jps
26314 Kafka
[root@emm-kafka01-10--174 ~]# ps -ef|grep kafka
root 26314 24915 35 10:12 pts/0 00:00:11 java -Xmx2G -Xms1G -server
[root@emm-kafka01-10--174 ~]# sh zkCli.sh
[zk: localhost:2181(CONNECTED) 0] ls /
[kafkagroup, zookeeper]
[zk: localhost:2181(CONNECTED) 1] ls /kafkagroup
[cluster, controller, controller_epoch, brokers, admin, isr_change_notification, consumers, log_dir_event_notification, latest_producer_id_block, config]
以上安装成功
我们接下来就是测试kafka
[root@emm-kafka01-10--174 ~]# kafka-topics.sh --zookeeper 10.2.10.174/kafkagroup --create --topic majihui --partitions 2 --replication-factor 2
Created topic "majihui".
[root@emm-kafka01-10--174 ~]# kafka-topics.sh --zookeeper 10.2.10.174/kafkagroup --describe --topic majihui
Topic:majihui PartitionCount:2 ReplicationFactor:2 Configs:
Topic: majihui Partition: 0 Leader: 174 Replicas: 174,175 Isr: 174,175
Topic: majihui Partition: 1 Leader: 175 Replicas: 175,176 Isr: 175,176
[root@emm-kafka02-10--175 ~]# kafka-topics.sh --zookeeper 10.2.10.174 --list
[root@emm-kafka02-10--175 ~]# kafka-topics.sh --zookeeper 10.2.10.174/kafkagroup --list
__consumer_offsets
majihui
启动生产者
[root@emm-kafka02-10--175 ~]# kafka-console-producer.sh --zookeeper 10.2.10.174:2181 --topic majihui
zookeeper is not a recognized option
这个命令不对,命令改了
[root@emm-kafka02-10--175 ~]# kafka-console-producer.sh --broker-list 10.2.10.174:9092 --topic majihui
>hello
启动消费者
[root@emm-kafka01-10--174 ~]# kafka-console-consumer.sh --bootstrap-server 10.2.10.174:9092 --topic majihui
hello
删除topic majihui
[root@emm-kafka02-10--175 ~]# kafka-topics.sh --zookeeper 10.2.10.174/kafkagroup --delete --topic majihui
Topic majihui is marked for deletion.
Note: This will have no impact if delete.topic.enable is not set to true.
[root@emm-kafka02-10--175 ~]# kafka-topics.sh --zookeeper 10.2.10.174/kafkagroup --list
__consumer_offsets
[root@emm-kafka01-10--174 ~]# kafka-topics.sh --zookeeper 10.2.10.174/kafkagroup --delete --topic __consumer_offsets
Error while executing topic command : Topic __consumer_offsets is a kafka internal topic and is not allowed to be marked for deletion.
//执行主题命令时出错:主题__consumer_offsets是kafka内部主题,不允许标记为删除。
[2019-03-14 11:24:17,407] ERROR kafka.admin.AdminOperationException: Topic __consumer_offsets is a kafka internal topic and is not allowed to be marked for deletion.
at kafka.admin.TopicCommand$.$anonfun$deleteTopic$1(TopicCommand.scala:188)
at kafka.admin.TopicCommand$.$anonfun$deleteTopic$1$adapted(TopicCommand.scala:185)
at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:52)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
at kafka.admin.TopicCommand$.deleteTopic(TopicCommand.scala:185)
at kafka.admin.TopicCommand$.main(TopicCommand.scala:71)
at kafka.admin.TopicCommand.main(TopicCommand.scala)
(kafka.admin.TopicCommand$)
[root@emm-kafka01-10--174 ~]# kafka-topics.sh --zookeeper 10.2.10.174/kafkagroup --list
__consumer_offsets
由于版本的升级,以前的命令不可以用了
这里注意一下。
原文地址:https://blog.51cto.com/12445535/2362800