Neo4j社区版高可用搭建

1 架构(drbd+keepalived+neo4j)

 

uploading.4e448015.gif转存失败重新上传取消

2 推荐配置

 最低配置推荐配置
CPUIntel Core i3 8核Intel Core i7 16核
Memory2GB16—32GB or more
Disk10GB SATASSD w/ SATA Express, or NVMe
FilesystemEXT4 (or similar)EXT4/ZFS
JavaOpenJDK 8Oracle Java 8

3 环境说明

角色hostnameip磁盘vip
node101192.168.245.201/dev/sdb192.168.245.203
node102192.168.245.202/dev/sdb192.168.245.203

4 安装准备

4.1 关闭防火墙和selinux

node1 & node2
# 防火墙
# systemctl stop firewalld
# systemctl disable firewalld
​
# selinux
# setenforce 0
vim /etc/sysconfig/selinux
SELINUX=disabled

4.2 配置/etc/hosts

node1 & node2
[root@node1 ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.245.201  node101
192.168.245.202  node102

4.3 SSH互信

node1:
# ssh-keygen
# ssh-copy-id -i /root/.ssh/id_rsa.pub  192.168.245.202
node2:
# ssh-keygen
# ssh-copy-id -i /root/.ssh/id_rsa.pub  192.168.245.201

4.4 时钟同步

node1 & node2
crontab -e
*/5 * * * * ntpdate cn.pool.ntp.org ###添加任务
systemctl restart crond

5 安装DRBD

5.1 磁盘检查

node1 & node2
[root@node1 ~]# fdisk -l|grep /dev/sdb
Disk /dev/sdb: 21.5 GB, 21474836480 bytes, 41943040 sectors
[root@node2 ~]# fdisk -l|grep /dev/sdb
Disk /dev/sdb: 21.5 GB, 21474836480 bytes, 41943040 sectors

5.2 安装

node1 & node2
# 方式1 yum方式
rpm -ivh http://www.elrepo.org/elrepo-release-7.0-2.el7.elrepo.noarch.rpm
yum install -y drbd84-utils kmod-drbd84
​
# 方式2 rpm方式
# 下载rmp到本地,执行下述命令
rpm -ivh elrepo-release-7.0-4.el7.elrepo.noarch.rpm
rpm -ivh drbd90-utils-9.10.0-1.el7.elrepo.x86_64.rpm
rpm -ivh kmod-drbd90-9.0.16-1.el7_6.elrepo.x86_64.rpm

5.3 配置文件/etc/drbd.d/global_common.conf

node1 & node2
[root@node101 neo4j]# cat /etc/drbd.d/global_common.conf 
# DRBD is the result of over a decade of development by LINBIT.
# In case you need professional services for DRBD or have
# feature requests visit http://www.linbit.com
​
global {
    usage-count no;
​
    # Decide what kind of udev symlinks you want for "implicit" volumes
    # (those without explicit volume <vnr> {} block, implied vnr=0):
    # /dev/drbd/by-resource/<resource>/<vnr>   (explicit volumes)
    # /dev/drbd/by-resource/<resource>         (default for implict)
    # udev-always-use-vnr; # treat implicit the same as explicit volumes
​
    # minor-count dialog-refresh disable-ip-verification
    # cmd-timeout-short 5; cmd-timeout-medium 121; cmd-timeout-long 600;
}
​
common {
    handlers {
        # These are EXAMPLE handlers only.
        # They may have severe implications,
        # like hard resetting the node under certain circumstances.
        # Be careful when choosing your poison.
​
        # pri-on-incon-degr "/usr/lib/drbd/notify-pri-on-incon-degr.sh; /usr/lib/drbd/notify-emergency
-reboot.sh; echo b > /proc/sysrq-trigger ; reboot -f";      # pri-lost-after-sb "/usr/lib/drbd/notify-pri-lost-after-sb.sh; /usr/lib/drbd/notify-emergency
-reboot.sh; echo b > /proc/sysrq-trigger ; reboot -f";      # local-io-error "/usr/lib/drbd/notify-io-error.sh; /usr/lib/drbd/notify-emergency-shutdown.sh
; echo o > /proc/sysrq-trigger ; halt -f";      # fence-peer "/usr/lib/drbd/crm-fence-peer.sh";
        # split-brain "/usr/lib/drbd/notify-split-brain.sh root";
        # out-of-sync "/usr/lib/drbd/notify-out-of-sync.sh root";
        # before-resync-target "/usr/lib/drbd/snapshot-resync-target-lvm.sh -p 15 -- -c 16k";
        # after-resync-target /usr/lib/drbd/unsnapshot-resync-target-lvm.sh;
        # quorum-lost "/usr/lib/drbd/notify-quorum-lost.sh root";
    }
​
    startup {
        # wfc-timeout degr-wfc-timeout outdated-wfc-timeout wait-after-sb
    }
​
    options {
        # cpu-mask on-no-data-accessible
​
        # RECOMMENDED for three or more storage nodes with DRBD 9:
        # quorum majority;
        # on-no-quorum suspend-io | io-error;
    }
​
    disk {
        on-io-error detach;
        # size on-io-error fencing disk-barrier disk-flushes
        # disk-drain md-flushes resync-rate resync-after al-extents
                # c-plan-ahead c-delay-target c-fill-target c-max-rate
                # c-min-rate disk-timeout
    }
​
    net {
        protocol C;
        # protocol timeout max-epoch-size max-buffers
        # connect-int ping-int sndbuf-size rcvbuf-size ko-count
        # allow-two-primaries cram-hmac-alg shared-secret after-sb-0pri
        # after-sb-1pri after-sb-2pri always-asbp rr-conflict
        # ping-timeout data-integrity-alg tcp-cork on-congestion
        # congestion-fill congestion-extents csums-alg verify-alg
        # use-rle
    }
}

5.4 资源文件

node1 & node2
[root@node101 ~]# cat /etc/drbd.d/neo4j.res 
resource neo4j {
on node101 {
    device  /dev/drbd0;
    disk    /dev/sdb1;
    address 192.168.245.201:7789;
    meta-disk   internal;
    }
on node102 {
    device  /dev/drbd0;
    disk    /dev/sdb1;
    address 192.168.245.202:7789;
    meta-disk   internal;
    }
}

5.5 启用DRBD

node1 & node2
[root@node101 ~]# modprobe drbd
[root@node101 ~]# 
[root@node101 ~]# drbdadm create-md neo4j
initializing activity log
initializing bitmap (512 KB) to all zero
Writing meta data...
New drbd meta data block successfully created.
[root@node101 ~]# 
[root@node101 ~]# drbdadm up neo4j #(如果报错 fdisk /dev/sdb)
[root@node101 ~]# 
[root@node101 ~]# drbdadm -- --force primary neo4j #(仅主库执行)
[root@node101 ~]# 
[root@node101 ~]# drbdadm status neo4j
neo4j role:Primary
  disk:UpToDate
  node102 role:Secondary
    replication:SyncSource peer-disk:Inconsistent done:0.19
[root@node101 ~]# 

5.6 创建文件系统

node1 & node2
mkdir -p /data/drbd
mkfs.ext4 /dev/drbd0
mount /dev/drbd0 /data/drbd
echo 'test' > /data/drbd/1.txt

5.7 主备切换

主:
cd ~
umount /data/drbd
drbdadm secondary neo4j
drbdadm status neo4j
从:
drbdadm primary neo4j
mount /dev/drbd0 /data/drbd
drbdadm status neo4j

6 搭建keepalived

6.1 安装

node1 & node2
cd /usr/local/src
下载keepalived-2.0.18.tar.gz到此目录
tar –xzvf keepalived-2.0.18.tar.gz
cd keepalived-2.0.18
 yum install -y gcc openssl-devel popt-devel  #(先挂载本地yum源)
./configure --prefix=/usr/local/keepalived
make && make install

6.2 配置

node1 cat /etc/keepalived/keepalived.conf

! Configuration File for keepalived
 
global_defs {
   router_id LVS_DEVEL
   #vrrp_strict
}

vrrp_script check_neo4j {
    script "/data/scripts/check_neo4j.sh"
    interval 1
    fall 2
}
 
vrrp_instance VI_1 {
    state MASTER
    interface ens33
    virtual_router_id 52
    priority 100
    advert_int 1
    # 如果两节点的上联交换机禁用了组播,则采用vrrp单播通告的方式
    # 本机ip
    unicast_src_ip 192.168.245.201
    unicast_peer {
      # 其他机器ip
        192.168.245.202
    }
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.245.203
    }
    notify_stop /data/scripts/notify_stop.sh
    notify_master /data/scripts/notify_master.sh
    track_script {
        check_neo4j
    }
}

node2 cat /etc/keepalived/keepalived.conf

! Configuration File for keepalived
 
global_defs {
   router_id LVS_DEVEL
   #vrrp_strict
}
 
vrrp_instance VI_1 {
    state BACKUP
    interface ens33
    virtual_router_id 52
    priority 90
    advert_int 1
    # 如果两节点的上联交换机禁用了组播,则采用vrrp单播通告的方式
    # 本机ip
    unicast_src_ip 192.168.245.202
    unicast_peer {
      #   其他机器ip
        192.168.245.201
    }
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.245.203
    }
    notify_master /data/scripts/notify_master.sh 
    notify_backup /data/scripts/notify_backup.sh
}

node1 & node2

mkdir -p /data/scripts   /data/logs  /data/logs/keepalived/
主:
[root@node1 scripts]# ll
total 12
-rw-r--r-- 1 root root 383 Feb 23 14:57 check_neo4j.sh
-rw-r--r-- 1 root root 496 Feb 23 14:59 notify_master.sh
-rw-r--r-- 1 root root 443 Feb 23 14:58 notify_stop.sh
从:
[root@node2 scripts]# ll
total 8
-rw-r--r-- 1 root root 467 Feb 23 15:03 notify_backup.sh
-rw-r--r-- 1 root root 495 Feb 23 15:04 notify_master.sh

6.3 启动

node1 $ node2
service keepalived start

6.4 观察vip生成

[root@node101 scripts]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
    link/ether 00:0c:29:a7:12:7f brd ff:ff:ff:ff:ff:ff
    inet 192.168.245.201/24 brd 192.168.245.255 scope global noprefixroute ens33
       valid_lft forever preferred_lft forever
    inet 192.168.245.203/32 scope global ens33
       valid_lft forever preferred_lft forever
    inet6 fe80::12a8:7432:6f52:e67b/64 scope link noprefixroute 
       valid_lft forever preferred_lft forever
3: virbr0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default qlen 1000
    link/ether 52:54:00:c1:fa:a5 brd ff:ff:ff:ff:ff:ff
    inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0
       valid_lft forever preferred_lft forever
4: virbr0-nic: <BROADCAST,MULTICAST> mtu 1500 qdisc fq_codel master virbr0 state DOWN group default qlen 1000
    link/ether 52:54:00:c1:fa:a5 brd ff:ff:ff:ff:ff:ff
[root@node101 scripts]# 

7 搭建Neo4j

7.1 安装到/data/drbd盘

node1
cd /drbd/neo4j
tar –xzvf neo4j-community-3.5.14-unix.tar.gz

7.2 启动图数据库

node1
vim conf/neo4j.conf
.bin/neo4j start

7.3 测试集群

http://192.168.245.201:7474/browser/
http://192.168.245.203:7474/browser/
都可以连接
Logo

瓜分20万奖金 获得内推名额 丰厚实物奖励 易参与易上手

更多推荐