抱歉,您的浏览器无法访问本站

本页面需要浏览器支持(启用)JavaScript


了解详情 >

项目部署

MySQL 服务

MySQL 规划

服务 端口 服务器 容器名
MySQL-node-01 13306 192.168.123.121 pxc_node_01
MySQL-node-02 13307 192.168.123.121 pxc_node_02
MySQL-node-03 13308 192.168.123.121 pxc_node_03
MySQL-node-04 13309 192.168.123.121 pxc_node_04
MySQL-node-05 13310 192.168.123.121 ms_node_01
MySQL-node-06 13311 192.168.123.98 ms_node_02
MyCat-node-01 11986, 18068, 19068 192.168.123.141 mycat_node_01
MyCat-node-02 11987, 18069, 19069 192.168.123.98 mycat_node_02
HaProxy 4001, 4002 192.168.123.54 haproxy

MySQL 实施

部署 pxc 集群

PXC: Percona XtraDB Cluster. 基于Percona的高可用解决方案.

# 创建数据卷(存储路径:/var/lib/docker/volumes)注意在对应的服务器部署
docker volume create rent-v1
docker volume create rent-v2
docker volume create rent-v3
docker volume create rent-v4
docker volume create rent-v5
docker volume create rent-v6
# 创建网络, 考虑到安全问题 --subnet=172.18.0.0/24 
docker network create pxc-network
# 集群1, 第一个节点
docker create -p 13306:3306 -v rent-v1:/var/lib/mysql -v /data/mysql/pxc_node_01/conf:/etc/my.cnf.d -e MYSQL_ROOT_PASSWORD=lgq51233 -e CLUSTER_NAME=pxc --name=pxc_node_01 --net=pxc-network pxc

# 第二个节点(加入CLUSTER_JOIN参数)
docker create -p 13307:3306 -v rent-v2:/var/lib/mysql -v /data/mysql/pxc_node_02/conf:/etc/my.cnf.d -e MYSQL_ROOT_PASSWORD=lgq51233 -e CLUSTER_NAME=pxc --name=pxc_node_02 -e CLUSTER_JOIN=pxc_node_01 --net=pxc-network pxc

# 集群2, 第一个节点
docker create -p 13308:3306 -v rent-v3:/var/lib/mysql -v /data/mysql/pxc_node_03/conf:/etc/my.cnf.d -e MYSQL_ROOT_PASSWORD=lgq51233 -e CLUSTER_NAME=pxc --name=pxc_node_03 --net=pxc-network --ip=172.18.0.4 pxc

# 第二个节点(加入CLUSTER_JOIN参数)
docker create -p 13309:3306 -v rent-v4:/var/lib/mysql -v /data/mysql/pxc_node_04/conf:/etc/my.cnf.d -e MYSQL_ROOT_PASSWORD=lgq51233 -e CLUSTER_NAME=pxc --name=pxc_node_04 -e CLUSTER_JOIN=pxc_node_03 --net=pxc-network --ip=172.18.0.5 pxc

# 启动
docker start pxc_node_01 && docker logs -f pxc_node_01
docker start pxc_node_02 && docker logs -f pxc_node_02
docker start pxc_node_03 && docker logs -f pxc_node_03
docker start pxc_node_04 && docker logs -f pxc_node_04

# 查看集群节点
show status like 'wsrep_cluster%';

pxc_node_01/conf/my.cnf 配置文件

[mysqld]
server-id=1
wsrep_provider=/usr/lib64/galera3/libgalera_smm.so
wsrep_cluster_name=pxc
wsrep_node_name=pxc_node_01
wsrep_cluster_address='gcomm://192.168.123.121:4576,192.168.123.98:4576'
wsrep_node_address=192.168.123.121
wsrep_sst_method=xtrabackup-v2
pxc_strict_mode=ENFORCING
binlog_format=ROW
default_storage_engine=InnoDB
innodb_autoinc_lock_mode=2

pxc_node_02/conf/my.cnf 配置文件

[mysqld]
server-id=2
wsrep_provider=/usr/lib64/galera3/libgalera_smm.so
wsrep_cluster_name=pxc
wsrep_node_name=pxc_node_02
wsrep_cluster_address='gcomm://192.168.123.121:4576,192.168.123.98:4576'
wsrep_provider_options = "gmcast.listen_addr=tcp://192.168.123.121:4576;"
wsrep_node_address=192.168.123.98
wsrep_sst_method=xtrabackup-v2
pxc_strict_mode=ENFORCING
binlog_format=ROW
default_storage_engine=InnoDB
innodb_autoinc_lock_mode=2

部署 MS 架构

部署master

# master
mkdir /data/mysql/rent/master_01/conf -p
vim my.cnf
chmod 644 my.cnf

# 输入如下内容
[mysqld]
log-bin=mysql-bin # 开启二进制日志
server-id=1 # 服务id, 不可重复
sql_mode='STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION'

# 创建容器
docker create --name ms_node_01 -v rent-v5:/var/lib/mysql -v /data/mysql/rent/master_01/conf:/etc/my.cnf.d -p 13310:3306 -e MYSQL_ROOT_PASSWORD=lgq51233 percona:5.7.23

# 启动
docker start ms_node_01 && docker logs -f ms_node_01

# 创建同步账户以及授权
create user 'rent'@'%' identified by 'lgq51233';
grant replication slave on *.* to 'rent'@'%';
flush privileges;

# 查看master状态
show master status;

部署 slave

# slave
mkdir /data/mysql/rent/slave_01/conf -p
vim my.cnf
chmod 644 my.cnf
# 输入如下内容
[mysqld]
server-id=2 #服务id, 不可重复
sql_mode='STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION'

# 创建容器
docker create --name ms_node_02 -v rent-v6:/var/lib/mysql -v /data/mysql/rent/slave_01/conf:/etc/my.cnf.d -p 13311:3306 -e MYSQL_ROOT_PASSWORD=lgq51233 percona:5.7.23

# 启动
docker start ms_node_02 && docker logs -f ms_node_02

# 设置master相关信息
CHANGE MASTER TO
    master_host='192.168.123.121',
    master_user='rent',
    master_password='lgq51233',
    master_port=13310,
    master_log_file='mysql-bin.000001',
    master_log_pos=154;

# 启动同步
start slave;
# 查看master状态
show slave status;

部署 MyCat

server.xml配置

<?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE mycat:server SYSTEM "server.dtd">
<mycat:server xmlns:mycat="http://io.mycat/">
    <system>
        <property name="nonePasswordLogin">0</property>
        <property name="useHandshakeV10">1</property>
        <property name="useSqlStat">0</property>
        <property name="useGlobleTableCheck">0</property>
        <property name="sequnceHandlerType">2</property>
        <property name="subqueryRelationshipCheck">false</property>
        <property name="processorBufferPoolType">0</property>
        <property name="handleDistributedTransactions">0</property>
        <property name="useOffHeapForMerge">1</property>
        <property name="memoryPageSize">64k</property>
        <property name="spillsFileBufferSize">1k</property>
        <property name="useStreamOutput">0</property>
        <property name="systemReserveMemorySize">384m</property>
        <property name="useZKSwitch">false</property>
         <!-- 设置服务端口以及管理端口 -->
        <property name="serverPort">18068</property>
        <property name="managerPort">19068</property>
    </system>
    <!--这里是设置的rent用户和虚拟逻辑库-->
    <user name="rent" defaultAccount="true">
        <property name="password">rent123</property>
        <property name="schemas">rent</property>
    </user>
</mycat:server>

schame.xml配置

<?xml version="1.0" ?>
<!DOCTYPE mycat:schema SYSTEM "schema.dtd">
<mycat:schema xmlns:mycat="http://io.mycat/">
    <!--配置数据表-->
    <schema name="rent" checkSQLschema="false" sqlMaxLimit="100">
        <!-- 分区策略 mod-long -->
        <table name="tb_house_resources" dataNode="dn1,dn2" rule="mod-long" />
        <table name="tb_ad" dataNode="dn3" />
        <table name="tb_estate" dataNode="dn3" />
    </schema>
    <!--配置分片关系-->
    <dataNode name="dn1" dataHost="cluster1" database="rent" />
    <dataNode name="dn2" dataHost="cluster2" database="rent" />
    <dataNode name="dn3" dataHost="cluster3" database="rent" />
    <!--配置连接信息-->
    <dataHost name="cluster1" maxCon="1000" minCon="10" balance="2" writeType="1" dbType="mysql" dbDriver="native" switchType="1" slaveThreshold="100">
        <heartbeat>select user()</heartbeat>
        <writeHost host="W1" url="192.168.123.121:13306" user="root" password="lgq51233">
            <readHost host="W1R1" url="192.168.123.121:13307" user="root" password="lgq51233" />
        </writeHost>
    </dataHost>
    <dataHost name="cluster2" maxCon="1000" minCon="10" balance="2" writeType="1" dbType="mysql" dbDriver="native" switchType="1" slaveThreshold="100">
        <heartbeat>select user()</heartbeat>
        <writeHost host="W2" url="192.168.123.121:13308" user="root" password="lgq51233">
            <readHost host="W2R1" url="192.168.123.121:13309" user="root" password="lgq51233" />
        </writeHost>
    </dataHost>
    <dataHost name="cluster3" maxCon="1000" minCon="10" balance="3" writeType="1" dbType="mysql" dbDriver="native" switchType="1" slaveThreshold="100">
        <heartbeat>select user()</heartbeat>
        <writeHost host="W2" url="192.168.123.121:13310" user="root" password="lgq51233">
            <readHost host="W2R1" url="192.168.123.98:13311" user="root" password="lgq51233" />
        </writeHost>
    </dataHost>
</mycat:schema>

rule.xml 配置

<function name="mod-long" class="io.mycat.route.function.PartitionByMod">
    <property name="count">2</property>
</function>

wrapper.conf 配置

# 设置jmx端口
wrapper.java.additional.7=-Dcom.sun.management.jmxremote.port=11986

启动

# 测试
./mycat console
# 启动
./startup_nowrap.sh && tail -f ../logs/mycat.log

集群的话复制一份即可.

部署 HAProxy

# 拉取镜像
docker pull haproxy
# 创建目录存放配置文件
mkdir /rent/haproxy
# 创建容器
docker create --name haproxy --net host -v /rent/haproxy:/usr/local/etc/haproxy haproxy

# 编辑
vim /rent/haproxy/haproxy.cfg

# 输入如下内容
global
    log 127.0.0.1 local2
    maxconn 4000
    daemon
defaults
    mode http
    log global
    option httplog
    option dontlognull
    option http-server-close
    option forwardfor except 127.0.0.0/8
    option redispatch
    retries 3
    timeout http-request 10s
    timeout queue 1m
    timeout connect 10s
    timeout client 1m
    timeout server 1m
    timeout http-keep-alive 10s
    timeout check 10s
    maxconn 3000
listen admin_stats
    bind 0.0.0.0:4001
    mode http
    stats uri /dbs
    stats realm Global\ statistics
    stats auth admin:admin123
listen proxy-mysql
    bind 0.0.0.0:4002
    mode tcp
    balance roundrobin
    option tcplog
    server mycat_1 192.168.123.141:18068 check port 18068 maxconn 2000
    server mycat_2 192.168.123.98:18068 check port 18068 maxconn 2000

# 启动    
docker start haproxy && docker logs -f haproxy
# 测试 http://192.168.123.54:4001/dbs

Redis 服务

采用3主3从架构,

Redis 规划

服务 端口 服务器 容器名
Redis-node-01 6379 192.168.123.121 redis_node_01
Redis-node-02 6380 192.168.123.98 redis_node_02
Redis-node-03 6381 192.168.123.54 redis_node_03
Redis-node-04 16379 192.168.123.98 redis_node_04
Redis-node-05 16381 192.168.123.54 redis_node_05
Redis-node-06 16381 192.168.123.121 redis_node_06

Redis 实施

docker volume create redis_node_01
docker volume create redis_node_02
docker volume create redis_node_03
docker volume create redis_node_04
docker volume create redis_node_05
docker volume create redis_node_06

#  创建容器
#创建容器
docker create --name redis_node_01 --net host -v redis_node_01:/data redis --cluster-enabled yes --cluster-config-file redis-node-01.conf --port 6379

docker create --name redis_node_02 --net host -v redis_node_02:/data redis --cluster-enabled yes --cluster-config-file redis-node-02.conf --port 6380

docker create --name redis_node_03 --net host -v redis_node_03:/data redis --cluster-enabled yes --cluster-config-file redis-node-03.conf --port 6381

docker create --name redis_node_04 --net host -v redis_node_04:/data redis --cluster-enabled yes --cluster-config-file redis-node-04.conf --port 16379

docker create --name redis_node_05 --net host -v redis_node_05:/data redis --cluster-enabled yes --cluster-config-file redis-node-05.conf --port 16380

docker create --name redis_node_06 --net host -v redis_node_06:/data redis --cluster-enabled yes --cluster-config-file redis-node-06.conf --port 16381

# 启动容器
docker start redis_node_01 redis_node_06
docker start redis_node_02 redis_node_04
docker start redis_node_03 redis_node_05

# 进入redis-node01容器进行操作
docker exec -it redis_node_01 /bin/bash
redis-cli --cluster create 192.168.123.121:6379 192.168.123.98:6380 192.168.123.54:6381 192.168.123.98:16379 192.168.123.54:16380 192.168.123.121:16381 --cluster-replicas 1

# 查看节点
redis-cli
CLUSTER NODES

Elasticsearch 服务

Elasticsearch 规划

服务 端口 服务器 容器名
es-node-01 9200, 9300 192.168.123.121 es_node_01
es-node-02 9200, 9300 192.168.123.98 es_node_02
es-node-03 9200, 9300 192.168.123.54 es_node_03

Elasticsearch 实施

# elasticsearch.yml
cluster.name: es-rent-cluster
node.name: node01
node.master: true
node.data: true
network.host: 0.0.0.0
http.port: 9200
discovery.seed_hosts: ["192.168.123.121","192.168.123.98","192.168.123.54"]
cluster.initial_master_nodes: ["node01"]
http.cors.enabled: true
http.cors.allow-origin: "*"

# jvm.options
-Xms512m
-Xmx512m

# 将IK的zip压缩包解压到/rent/es-cluster/ik
# 将pinyin的压缩包解压到/rent/es-cluster/pinyin

# 创建容器
docker create --name es_node_01 --net host -v /rent/es-cluster/node01/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml -v /rent/es-cluster/node01/jvm.options:/usr/share/elasticsearch/config/jvm.options -v /rent/es-cluster/node01/data:/usr/share/elasticsearch/data -v /rent/es-cluster/ik:/usr/share/elasticsearch/plugins/ik -v /rent/es-cluster/pinyin:/usr/share/elasticsearch/plugins/pinyin docker.elastic.co/elasticsearch/elasticsearch:7.6.0

docker create --name es_node_02 --net host -v /rent/es-cluster/node02/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml -v /rent/es-cluster/node02/jvm.options:/usr/share/elasticsearch/config/jvm.options -v /rent/es-cluster/node02/data:/usr/share/elasticsearch/data -v /rent/es-cluster/ik:/usr/share/elasticsearch/plugins/ik -v /rent/es-cluster/pinyin:/usr/share/elasticsearch/plugins/pinyin docker.elastic.co/elasticsearch/elasticsearch:7.6.0

docker create --name es_node_03 --net host -v /rent/es-cluster/node03/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml -v /rent/es-cluster/node03/jvm.options:/usr/share/elasticsearch/config/jvm.options -v /rent/es-cluster/node03/data:/usr/share/elasticsearch/data -v /rent/es-cluster/ik:/usr/share/elasticsearch/plugins/ik -v /rent/es-cluster/pinyin:/usr/share/elasticsearch/plugins/pinyin docker.elastic.co/elasticsearch/elasticsearch:7.6.0

# 启动测试
docker start es_node_01 && docker logs -f es_node_01
docker start es_node_02 && docker logs -f es_node_02
docker start es_node_03 && docker logs -f es_node_03

mapping配置

{
    "settings": {
        "index": {
            "number_of_shards": 6,
            "number_of_replicas": 1,
            "analysis": {
                "analyzer": {
                    "pinyin_analyzer": {
                        "tokenizer": "my_pinyin"
                    }
                },
                "tokenizer": {
                    "my_pinyin": {
                        "type": "pinyin",
                        "keep_separate_first_letter": false,
                        "keep_full_pinyin": true,
                        "keep_original": true,
                        "limit_first_letter_length": 16,
                        "lowercase": true,
                        "remove_duplicated_term": true
                    }
                }
            }
        }
    },
    "mappings": {
        "dynamic": false,
        "properties": {
            "title": {
                "type": "text",
                "analyzer": "ik_max_word",
                "fields": {
                    "pinyin": {
                        "type": "text",
                        "analyzer": "pinyin_analyzer"
                    }
                }
            },
            "image": {
                "type": "keyword",
                "index": false
            },
            "orientation": {
                "type": "keyword",
                "index": false
            },
            "houseType": {
                "type": "keyword",
                "index": false
            },
            "rentMethod": {
                "type": "keyword",
                "index": false
            },
            "time": {
                "type": "keyword",
                "index": false
            },
            "rent": {
                "type": "keyword",
                "index": false
            },
            "floor": {
                "type": "keyword",
                "index": false
            }
        }
    }
}

插入测试数据

/**
 * 批量插入数据 数据爬虫爬的
 *
 * @throws IOException
 */
@Test
public void testBulk() throws IOException {
    Request request = new Request("POST", "/rent/_doc/_bulk");
    request.addParameter("pretty", "true");
    StringBuilder sb = new StringBuilder();
    String createStr = "{\"index\":{\"_index\":\"rent\",\"_type\":\"_doc\"}}";
    List<String> lines = FileUtils.readLines(new File("E:\\WorkSpace\\ServerSoft\\51rent-upload\\data\\lianjia\\data.json"), "UTF-8");
    int count = 0;
    for (String line : lines) {
        sb.append(createStr).append("\n");
        sb.append(line).append("\n");
        if (count >= 100) {
            // 200条提交一次
            request.setJsonEntity(sb.toString());
            this.restClient.performRequest(request);
            count = 0;
            sb = new StringBuilder();
        }
        count++;
    }
    if (!sb.toString().isEmpty()) {
        request.setJsonEntity(sb.toString());
        this.restClient.performRequest(request);
    }
}

RocketMQ 服务

RocketMQ 规划

服务 端口 服务器 容器名
rmqserver_01 9876 192.168.123.98 rmqserver_01
rmqserver_02 9876 192.168.123.54 rmqserver_02
rmqbroker_01 10911 192.168.123.54 rmqbroker_01
rmqbroker_02 10911 192.168.123.98 rmqbroker_02
rmqbroker_01_slave 10711 192.168.123.121 rmqbroker_01_slave
rmqbroker_02_slave 10711 192.168.123.54 rmqbroker_02_slave
rocketmq_console 8082 192.168.123.54 rocketmq_console

RocketMQ 实施

创建2个master nameserver

# nameserver1
docker create -p 9876:9876 --name rmqserver_01 \
-e "JAVA_OPT_EXT=-server -Xms128m -Xmx128m -Xmn128m" \
-e "JAVA_OPTS=-Duser.home=/opt" \
-v /data/rmq/rmqserver_01/logs:/root/logs \
-v /data/rmq/rmqserver_01/store:/root/store \
rocketmqinc/rocketmq sh mqnamesrv
# nameserver2
docker create -p 9876:9876 --name rmqserver_02 \
-e "JAVA_OPT_EXT=-server -Xms128m -Xmx128m -Xmn128m" \
-e "JAVA_OPTS=-Duser.home=/opt" \
-v /data/rmq/rmqserver_02/logs:/root/logs \
-v /data/rmq/rmqserver_02/store:/root/store \
rocketmqinc/rocketmq sh mqnamesrv

创建第1个master broker

# broker需要暴露3个端口
# - 与客户端交互的端口
# - 本集群中与其他节点交互的端口
# - MS交互的端口
docker create --net host --name rmqbroker_01 \
-e "JAVA_OPTS=-Duser.home=/opt" \
-e "JAVA_OPT_EXT=-server -Xms128m -Xmx128m -Xmn128m" \
-v /data/rmq/rmqbroker_01/conf/broker.conf:/etc/rocketmq/broker.conf \
-v /data/rmq/rmqbroker_01/logs:/root/logs \
-v /data/rmq/rmqbroker_01/store:/root/store \
rocketmqinc/rocketmq sh mqbroker -c /etc/rocketmq/broker.conf
# 配置
brokerIP1=192.168.123.54
brokerIP2=192.168.123.98
namesrvAddr=192.168.123.98:9876;192.168.123.54:9876
brokerClusterName=RentCluster
brokerId=0
deleteWhen=04
fileReservedTime=48
brokerName=broker01
brokerRole=SYNC_MASTER
flushDiskType=ASYNC_FLUSH
listenPort=10911
enablePropertyFilter=true

创建第2个master broker

docker create --net host --name rmqbroker_02 \
-e "JAVA_OPTS=-Duser.home=/opt" \
-e "JAVA_OPT_EXT=-server -Xms128m -Xmx128m -Xmn128m" \
-v /data/rmq/rmqbroker_02/conf/broker.conf:/etc/rocketmq/broker.conf \
-v /data/rmq/rmqbroker_02/logs:/root/logs \
-v /data/rmq/rmqbroker_02/store:/root/store \
rocketmqinc/rocketmq sh mqbroker -c /etc/rocketmq/broker.conf
# 配置
brokerIP1=192.168.123.98
brokerIP2=192.168.123.54
namesrvAddr=192.168.123.98:9876;192.168.123.54:9876
brokerClusterName=RentCluster
brokerId=0
deleteWhen=04
fileReservedTime=48
brokerName=broker02
brokerRole=SYNC_MASTER
flushDiskType=ASYNC_FLUSH
listenPort=10911
enablePropertyFilter=true

创建第1个slave broker

docker create --net host --name rmqbroker_01_slave \
-e "JAVA_OPTS=-Duser.home=/opt" \
-e "JAVA_OPT_EXT=-server -Xms128m -Xmx128m -Xmn128m" \
-v /data/rmq/rmqbroker_01_slave/conf/broker.conf:/etc/rocketmq/broker.conf \
-v /data/rmq/rmqbroker_01_slave/logs:/root/logs \
-v /data/rmq/rmqbroker_01_slave/store:/root/store \
rocketmqinc/rocketmq sh mqbroker -c /etc/rocketmq/broker.conf
# 配置
brokerIP1=192.168.123.121
brokerIP2=192.168.123.121
namesrvAddr=192.168.123.98:9876;192.168.123.54:9876
brokerClusterName=RentCluster
brokerId=1
deleteWhen=04
fileReservedTime=48
brokerName=brokerslave01
brokerRole=SLAVE
flushDiskType=ASYNC_FLUSH
listenPort=10711
enablePropertyFilter=true

创建第2个slave broker

docker create --net host --name rmqbroker_02_slave \
-e "JAVA_OPTS=-Duser.home=/opt" \
-e "JAVA_OPT_EXT=-server -Xms128m -Xmx128m -Xmn128m" \
-v /data/rmq/rmqbroker_02_slave/conf/broker.conf:/etc/rocketmq/broker.conf \
-v /data/rmq/rmqbroker_02_slave/logs:/root/logs \
-v /data/rmq/rmqbroker_02_slave/store:/root/store \
rocketmqinc/rocketmq sh mqbroker -c /etc/rocketmq/broker.conf
# 配置
brokerIP1=192.168.123.54
brokerIP2=192.168.123.54
namesrvAddr=192.168.123.98:9876;192.168.123.54:9876
brokerClusterName=RentCluster
brokerId=1
deleteWhen=04
fileReservedTime=48
brokerName=brokerslave02
brokerRole=SLAVE
flushDiskType=ASYNC_FLUSH
listenPort=10711
enablePropertyFilter=true

启动

# 192.168.123.98
docker start rmqserver_01 rmqbroker_02
# 192.168.123.121
docker start rmqbroker_01_slave
# 192.168.123.54
docker start rmqserver_02 rmqbroker_01 rmqbroker_02_slave 

安装RocketMQ管理工具

# 克隆项目
git clone https://github.com/apache/rocketmq-externals.git
# 进入项目
cd rocketmq-externals/rocketmq-console/
# 修改访问路径, 默认是空
vim src/main/resources/application.properties
server.contextPath=/rocketmq
# 修改端口
server.port=8080
# NameServer地址, 修改成你自己的服务地址。多个地址用英文分号";"隔开
rocketmq.config.namesrvAddr=192.168.123.98:9876;192.168.123.54:9876
# 编译打包
mvn clean package -Dmaven.test.skip=true
# 启动
mvn spring-boot:run
# or
java -jar target/rocketmq-console-ng-1.0.0.jar
# 如果配置文件没有写, 可以启动的时候加上
java -jar target/rocketmq-console-ng-1.0.0.jar -rocketmq.config.namesrvAddr='192.168.123.98:9876;192.168.123.54:9876'
# 访问 http://localhost:8080/rocketmq

# 编译成docker镜像
mvn clean package -Dmaven.test.skip=true docker:build

Zookeeper 服务

Zookeeper 规划

服务 端口 服务器 容器名
zk-01 2181, 2888, 3888 192.168.123.121 zk_01
zk-02 2181, 2888, 3888 192.168.123.98 zk_02
zk-03 2181, 2888, 3888 192.168.123.54 zk_03

Zookeeper 实施

# 拉取zk镜像, 注意版本, 客户端的驱动版本和服务器的统一
docker pull zookeeper
# 创建容器
# -e  ZOO_SERVERS="server.1=192.168.123.121:2888:3888 server.2=192.168.123.98:2888:3888 server.3=192.168.123.54:2888:3888"
docker create --name zk_01 --net host -v /rent/zookeeper/data:/data -v /rent/zookeeper/conf:/conf -e ZOO_MY_ID=1 zookeeper
docker create --name zk_02 --net host -v /rent/zookeeper/data:/data -v /rent/zookeeper/conf:/conf -e ZOO_MY_ID=2 zookeeper
docker create --name zk_03 --net host -v /rent/zookeeper/data:/data -v /rent/zookeeper/conf:/conf -e ZOO_MY_ID=3 zookeeper

vim zoo.cfg
# zoo.cfg
clientPort=2181
dataDir=/data
dataLogDir=/data/log
tickTime=2000
initLimit=5
syncLimit=2
autopurge.snapRetainCount=3
autopurge.purgeInterval=0
maxClientCnxns=60
standaloneEnabled=true
admin.enableServer=true
server.1=192.168.123.121:2888:3888
server.2=192.168.123.98:2888:3888
server.3=192.168.123.54:2888:3888

# log4j.properties
# Define some default values that can be overridden by system properties
zookeeper.root.logger=INFO, CONSOLE
zookeeper.console.threshold=INFO
zookeeper.log.dir=.
zookeeper.log.file=zookeeper.log
zookeeper.log.threshold=INFO
zookeeper.log.maxfilesize=256MB
zookeeper.log.maxbackupindex=20
zookeeper.tracelog.dir=${zookeeper.log.dir}
zookeeper.tracelog.file=zookeeper_trace.log
log4j.rootLogger=${zookeeper.root.logger}
# console
# Add "console" to rootlogger above if you want to use this
log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
log4j.appender.CONSOLE.Threshold=${zookeeper.console.threshold}
log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n
# Add ROLLINGFILE to rootLogger to get log file output
log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender
log4j.appender.ROLLINGFILE.Threshold=${zookeeper.log.threshold}
log4j.appender.ROLLINGFILE.File=${zookeeper.log.dir}/${zookeeper.log.file}
log4j.appender.ROLLINGFILE.MaxFileSize=${zookeeper.log.maxfilesize}
log4j.appender.ROLLINGFILE.MaxBackupIndex=${zookeeper.log.maxbackupindex}
log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n
# Add TRACEFILE to rootLogger to get log file output
#    Log TRACE level and above messages to a log file
log4j.appender.TRACEFILE=org.apache.log4j.FileAppender
log4j.appender.TRACEFILE.Threshold=TRACE
log4j.appender.TRACEFILE.File=${zookeeper.tracelog.dir}/${zookeeper.tracelog.file}
log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout
### Notice we are including log4j's NDC here (%x)
log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L][%x] - %m%n

# 启动容器
docker start zk_01 && docker logs -f zk_01
docker start zk_02 && docker logs -f zk_02
docker start zk_03 && docker logs -f zk_03

项目构建

项目规划

项目 域名 服务器
rent-manage-api-server api.manage.rent.com 192.168.123.121
rent-manage-web manage.rent.com 192.168.123.121
rent-web rent.com 192.168.123.98
rent-im im.rent.com 192.168.123.54

项目实施

构建SpringBoot项目

添加springboot构建插件

<build>
    <plugins>
        <plugin>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-maven-plugin</artifactId>
            <configuration>
                <!--替换成实际的类全路径-->
                <mainClass>xxxxx</mainClass>
            </configuration>
            <executions>
                <execution>
                    <goals>
                        <goal>repackage</goal>
                    </goals>
                </execution>
            </executions>
        </plugin>
    </plugins>
</build>

把项目上传到服务器/rent/publish, 解压编译。

# 执行编译指令
mvn install -Dmaven.test.skip=true
# 查看运行进程
jps -l

构建后台管理页面

# Ant Design Pro
umi build

构建出来的静态页面通过nginx进行访问。

构建JavaScript客户端

上传项目到服务器, 解压。

# 构建 
npm run build
# 部署WebApi项目
# 报错 需要把cross-env这个去掉
npm run dev

配置 Nginx

nginx.conf配置文件

#user  nobody;
worker_processes  1;
#error_log  logs/error.log;
#error_log  logs/error.log  notice;
#error_log  logs/error.log  info;
#pid        logs/nginx.pid;
events {
    worker_connections  1024;
}

http {
    include       mime.types;
    default_type  application/octet-stream;
    #log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
    #                  '$status $body_bytes_sent "$http_referer" '
    #                  '"$http_user_agent" "$http_x_forwarded_for"';
    #access_log  logs/access.log  main;
    sendfile        on;
    #tcp_nopush     on;
    #keepalive_timeout  0;
    keepalive_timeout  65;
    #gzip  on;
    # 后台前端
    server {
        listen 80;
        server_name manage.rent.com;
        charset utf-8;
        proxy_set_header X-Forwarded-Host $host;
        proxy_set_header X-Forwarded-Server $host;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        location ^~ /rent/ {
            proxy_pass http://192.168.123.54:8080/;
            proxy_connect_timeout 600;
            proxy_read_timeout 600;
        }
        location / {
            root /rent/publish/manage-web;
        }
    }
    # 微服务api接口
    server {
        listen 80;
        server_name api.manage.rent.com;
        charset utf-8;
        proxy_set_header X-Forwarded-Host $host;
        proxy_set_header X-Forwarded-Server $host;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        location / {
            proxy_pass http://192.168.123.98:18080;
            proxy_connect_timeout 600;
            proxy_read_timeout 600;
        }
    }
    # 反向代理WebSocket
    server {
        listen 80;
        server_name im.rent.com;
        charset utf-8;
        proxy_set_header X-Forwarded-Host $host;
        proxy_set_header X-Forwarded-Server $host;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        location / {
            proxy_pass http://192.168.123.54:18080;
            proxy_connect_timeout 600;
            proxy_read_timeout 600;
            proxy_set_header Upgrade $http_upgrade;
            proxy_set_header Connection "upgrade";
        }
    }
    # JS客户端
    server {
        listen 80;
        server_name www.rent.com;
        charset utf-8;
        proxy_set_header X-Forwarded-Host $host;
        proxy_set_header X-Forwarded-Server $host;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
         location / {
            root /rent/publish/rent-web;
        }
    }
}

评论