hadoop生态搭建(3节点)-05.mysql配置_单节点

# ==================================================================node1

# ==================================================================安装 mysql

# 查看当前安装的mariadb包
rpm -qa | grep mariadb

# 有就将它们统统强制性卸载掉:
rpm -e --nodeps mariadb-libs-5.5.52-1.el7.x86_64

rpm -qa | grep -i mysql

# 创建用户组和用户
groupadd mysql
useradd -r -g mysql -s /bin/false mysql

# 解压到指定目录
tar -zxvf ~/mysql-5.7.22-linux-glibc2.12-x86_64.tar.gz -C /usr/local
mv /usr/local/mysql-5.7.22-linux-glibc2.12-x86_64 /usr/local/mysql
rm –r ~/mysql-5.7.22-linux-glibc2.12-x86_64.tar.gz

# ==================================================================环境变量

vi /etc/profile

# 在export PATH USER LOGNAME MAIL HOSTNAME HISTSIZE HISTCONTROL下添加
export JAVA_HOME=/usr/java/jdk1.8.0_111
export ZOOKEEPER_HOME=/usr/local/zookeeper-3.4.12
export HADOOP_HOME=/usr/local/hadoop-2.7.6
export MYSQL_HOME=/usr/local/mysql

export PATH=$PATH:$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$ZOOKEEPER_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$MYSQL_HOME/bin
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar

export HADOOP_INSTALL=$HADOOP_HOME
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native

# 使环境变量生效
source /etc/profile

# 查看配置结果
echo $MYSQL_HOME

# ==================================================================配置 mysql

mkdir $MYSQL_HOME/data
mkdir $MYSQL_HOME/run
mkdir $MYSQL_HOME/logs
# mkdir /var/lib/mysql

vi /etc/my.cnf

[client]
default-character-set=utf8
# character-set-client=utf8
loose-default-character-set=utf8
port=3306
# socket=/var/lib/mysql/mysql.sock
socket=/usr/local/mysql/mysql.sock

[mysqld]
server_id=1
port=3306
character_set_server=utf8
init_connect=‘SET NAMES utf8‘
basedir=/usr/local/mysql
datadir=/usr/local/mysql/data
# socket=/var/lib/mysql/mysql.sock
socket=/usr/local/mysql/mysql.sock
#不区分大小写
lower_case_table_names=1
log-error=/usr/local/mysql/logs/mysqld.log
pid-file=/usr/local/mysql/run/mysqld.pid
default_storage_engine=InnoDB
slow_query_log=1
slow_query_log_file=/usr/local/mysql/logs/mysql_slow_query.log
long_query_time=5

# chmod 777 /var/lib/mysql
chown -R mysql:mysql $MYSQL_HOME

# 添加开机启动
cp $MYSQL_HOME/support-files/mysql.server /etc/rc.d/init.d/mysqld
vi /etc/rc.d/init.d/mysqld

basedir=/usr/local/mysql
datadir=/usr/local/mysql/data

# 增加mysqld服务控制脚本执行权限
chmod +x /etc/rc.d/init.d/mysqld

# 将mysqld服务加入到系统服务
chkconfig --add mysqld

# 检查mysqld服务是否已经生效
chkconfig --list mysqld

# 初始化数据库
$MYSQL_HOME/bin/mysqld --initialize --user=mysql --basedir=/usr/local/mysql --datadir=/usr/local/mysql/data

# 生成了临时密码
# 执行以下命令创建RSA private key
$MYSQL_HOME/bin/mysql_ssl_rsa_setup --datadir=/usr/local/mysql/data

grep ‘temporary password‘ /usr/local/mysql/logs/mysqld.log

# 启动mysql
systemctl start mysqld.service

# service mysqld start
# 启动数据库
# ./mysqld_safe --user=mysql &;

mysql -uroot -p

# 如果出现错误 需要添加软连接
# ln -s /usr/local/mysql/bin/mysql /usr/bin

# 修改密码
> alter user ‘root‘@‘localhost‘ identified by ‘123456‘;
> flush privileges;

# 无法远程连接时
> use mysql;
> update user set host = ‘%‘ where user = ‘root‘;
> select host, user from user;

# 如果要安装Hive,再创建Hive数据库和用户 再执行下面的语句> create database hive character set utf8 ;> create user ‘hive‘@‘%‘identified by ‘Hive-123‘;> grant all privileges on *.* to ‘hive‘@‘%‘;> flush privileges;
show databases;

quit;

reboot

# 重启验证有效后再进行快照
mysql -uroot -p

quit;

shutdown -h now
# mysql

# ==================================================================安装 mysql (windows)

# 注册服务
mysqld --defaults-file=my.ini --initialize-insecure

# 安装
mysqld --install mysql57

# 启动服务
net start mysql57

mysql -u root -p

> use mysql;

> update user set authentication_string=password(‘123456‘) where user=‘root‘;

> flush privileges;

> exit

原文地址:https://www.cnblogs.com/zcf5522/p/9754735.html

时间: 2024-09-29 18:48:40

hadoop生态搭建(3节点)-05.mysql配置_单节点的相关文章

hadoop生态搭建(3节点)-17.sqoop配置_单节点

# ==================================================================安装 sqoop tar -zxvf ~/sqoop-1.4.7.bin__hadoop-2.6.0.tar.gz mv ~/sqoop-1.4.7.bin__hadoop-2.6.0 /usr/local/sqoop-1.4.7 # 环境变量 # =========================================================

hadoop生态搭建(3节点)-13.mongodb配置

# 13.mongodb配置_副本集_认证授权# ==================================================================安装 mongodb tar -zxvf ~/mongodb-linux-x86_64-rhel70-3.4.5.tgz -C /usr/local mv /usr/local/mongodb-linux-x86_64-rhel70-3.4.5 /usr/local/mongodb-3.4.5 rm -r ~/mon

hadoop生态搭建(3节点)-12.rabbitmq配置

# 安装 需要相关包# ==================================================================node1 node2 node3 yum install -y gcc gcc-c++ zlib zlin-devel perl ncurses-devel # 安装 openssl# ==================================================================node1 scp -r

js 节点 document html css 表单节点操作

节点操作:访问.属性.创建 (1)节点的访问:firstChild.lastChild.childNodes.parentChild(父子节) 可以使用元素对象的方法进行代替:getElementById().getElementsByTagName() (2)节点属性的操作:setAttribute().removeAttribute().getAttribute() (3)节点的创建.删除.追加: 创建节点:document.createElement(tagName) 删除节点(必须父节点

hadoop生态搭建(3节点)-04.hadoop配置

如果之前没有安装jdk和zookeeper,安装了的请直接跳过 # https://www.oracle.com/technetwork/java/javase/downloads/java-archive-javase8-2177648.html # ==================================================================安装 jdk mkdir -p /usr/java tar -zxvf ~/jdk-8u111-linux-x64

hadoop生态搭建(3节点)-10.spark配置

# https://www.scala-lang.org/download/2.12.4.html# ==================================================================安装 scala tar -zxvf ~/scala-2.12.4.tgz -C /usr/local rm –r ~/scala-2.12.4.tgz # http://archive.apache.org/dist/spark/spark-2.3.0/ # ==

hadoop生态搭建(3节点)-06.hbase配置

# http://archive.apache.org/dist/hbase/1.2.4/ # ==================================================================安装 hbase tar -zxvf ~/hbase-1.2.4-bin.tar.gz -C /usr/local rm –r ~/hbase-1.2.4-bin.tar.gz # 配置环境变量# =====================================

hadoop生态搭建(3节点)-07.hive配置

# http://archive.apache.org/dist/hive/hive-2.1.1/ # ==================================================================安装 hive tar -zxvf apache-hive-2.1.1-bin.tar.gz -C /usr/local mv /usr/local/apache-hive-2.1.1-bin /usr/local/hive-2.1.1 rm –r ~/apach

hadoop生态搭建(3节点)-11.storm配置

# http://archive.apache.org/dist/storm/apache-storm-1.1.0/ # ==================================================================安装 storm tar -zxvf ~/apache-storm-1.1.0.tar.gz -C /usr/local mv /usr/local/apache-storm-1.1.0 /usr/local/storm-1.1.0 rm –r