若依部署 前端 nginx 输入brew info nginx
1 2 3 4 5 6 7 8 9 10 11 12 Docroot is: /opt/homebrew/var/www The default port has been set in /opt/homebrew/etc/nginx/nginx.conf to 8080 so that nginx can run without sudo. nginx will load all files in /opt/homebrew/etc/nginx/servers/. To start nginx now and restart at login: brew services start nginx Or, if you don't want/need a background service you can just run: /opt/homebrew/opt/nginx/bin/nginx -g daemon\ off\;
修改/opt/homebrew/etc/nginx/nginx.conf
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 #user nobody; worker_processes 1; #error_log logs/error.log; #error_log logs/error.log notice; #error_log logs/error.log info; #pid logs/nginx.pid; events { worker_connections 1024; } http { include mime.types; default_type application/octet-stream; server { listen 80; server_name localhost; location /prod-api/ { proxy_set_header Host $http_host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header REMOTE-HOST $remote_addr; proxy_set_header X-Forward-For $http_x_forwarded_for; proxy_pass http://127.0.0.1:8081/; } location /dev-api/ { proxy_set_header Host $http_host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header REMOTE-HOST $remote_addr; proxy_set_header X-Forward-For $http_x_forwarded_for; proxy_pass http://127.0.0.1:8081/; } location / { root html/dist; index index.html index.htm; try_files $uri $uri/ /index.html; } }
其中
listen 80; 监听80端口
location / { root html/dist; 根目录在/opt/homebrew/var/www
中的dist里面 index index.html index.htm; }
location /prod-api/ 需要配置生产环境的代理地址
后端 父项目的pom文件如下
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 <?xml version="1.0" encoding="UTF-8" ?> <project xmlns ="http://maven.apache.org/POM/4.0.0" xmlns:xsi ="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation ="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd" > <modelVersion > 4.0.0</modelVersion > <groupId > com.ruoyi</groupId > <artifactId > ruoyi</artifactId > <version > 3.8.5</version > <name > ruoyi</name > <url > http://www.ruoyi.vip</url > <description > 若依管理系统</description > <properties > <ruoyi.version > 3.8.5</ruoyi.version > <project.build.sourceEncoding > UTF-8</project.build.sourceEncoding > <project.reporting.outputEncoding > UTF-8</project.reporting.outputEncoding > <java.version > 1.8</java.version > <maven-jar-plugin.version > 3.1.1</maven-jar-plugin.version > <druid.version > 1.2.15</druid.version > <bitwalker.version > 1.21</bitwalker.version > <swagger.version > 3.0.0</swagger.version > <kaptcha.version > 2.3.3</kaptcha.version > <pagehelper.boot.version > 1.4.6</pagehelper.boot.version > <fastjson.version > 2.0.20</fastjson.version > <oshi.version > 6.4.0</oshi.version > <commons.io.version > 2.11.0</commons.io.version > <commons.fileupload.version > 1.4</commons.fileupload.version > <commons.collections.version > 3.2.2</commons.collections.version > <poi.version > 4.1.2</poi.version > <velocity.version > 2.3</velocity.version > <jwt.version > 0.9.1</jwt.version > </properties > <dependencyManagement > <dependencies > <dependency > <groupId > org.springframework.boot</groupId > <artifactId > spring-boot-dependencies</artifactId > <version > 2.5.14</version > <type > pom</type > <scope > import</scope > </dependency > <dependency > <groupId > com.alibaba</groupId > <artifactId > druid-spring-boot-starter</artifactId > <version > ${druid.version}</version > </dependency > <dependency > <groupId > eu.bitwalker</groupId > <artifactId > UserAgentUtils</artifactId > <version > ${bitwalker.version}</version > </dependency > <dependency > <groupId > com.github.pagehelper</groupId > <artifactId > pagehelper-spring-boot-starter</artifactId > <version > ${pagehelper.boot.version}</version > </dependency > <dependency > <groupId > com.github.oshi</groupId > <artifactId > oshi-core</artifactId > <version > ${oshi.version}</version > </dependency > <dependency > <groupId > io.springfox</groupId > <artifactId > springfox-boot-starter</artifactId > <version > ${swagger.version}</version > <exclusions > <exclusion > <groupId > io.swagger</groupId > <artifactId > swagger-models</artifactId > </exclusion > </exclusions > </dependency > <dependency > <groupId > commons-io</groupId > <artifactId > commons-io</artifactId > <version > ${commons.io.version}</version > </dependency > <dependency > <groupId > commons-fileupload</groupId > <artifactId > commons-fileupload</artifactId > <version > ${commons.fileupload.version}</version > </dependency > <dependency > <groupId > org.apache.poi</groupId > <artifactId > poi-ooxml</artifactId > <version > ${poi.version}</version > </dependency > <dependency > <groupId > org.apache.velocity</groupId > <artifactId > velocity-engine-core</artifactId > <version > ${velocity.version}</version > </dependency > <dependency > <groupId > commons-collections</groupId > <artifactId > commons-collections</artifactId > <version > ${commons.collections.version}</version > </dependency > <dependency > <groupId > com.alibaba.fastjson2</groupId > <artifactId > fastjson2</artifactId > <version > ${fastjson.version}</version > </dependency > <dependency > <groupId > io.jsonwebtoken</groupId > <artifactId > jjwt</artifactId > <version > ${jwt.version}</version > </dependency > <dependency > <groupId > pro.fessional</groupId > <artifactId > kaptcha</artifactId > <version > ${kaptcha.version}</version > </dependency > <dependency > <groupId > com.ruoyi</groupId > <artifactId > ruoyi-quartz</artifactId > <version > ${ruoyi.version}</version > </dependency > <dependency > <groupId > com.ruoyi</groupId > <artifactId > ledger</artifactId > <version > 3.8.5</version > </dependency > <dependency > <groupId > com.ruoyi</groupId > <artifactId > ruoyi-generator</artifactId > <version > ${ruoyi.version}</version > </dependency > <dependency > <groupId > com.ruoyi</groupId > <artifactId > ruoyi-framework</artifactId > <version > ${ruoyi.version}</version > </dependency > <dependency > <groupId > com.ruoyi</groupId > <artifactId > ruoyi-system</artifactId > <version > ${ruoyi.version}</version > </dependency > <dependency > <groupId > com.ruoyi</groupId > <artifactId > ruoyi-common</artifactId > <version > ${ruoyi.version}</version > </dependency > </dependencies > </dependencyManagement > <modules > <module > ruoyi-admin</module > <module > ruoyi-framework</module > <module > ruoyi-system</module > <module > ruoyi-quartz</module > <module > ruoyi-generator</module > <module > ruoyi-common</module > <module > ledger</module > </modules > <packaging > pom</packaging > <dependencies > </dependencies > <build > <plugins > <plugin > <groupId > org.apache.maven.plugins</groupId > <artifactId > maven-compiler-plugin</artifactId > <version > 3.1</version > <configuration > <source > ${java.version}</source > <target > ${java.version}</target > <encoding > ${project.build.sourceEncoding}</encoding > </configuration > </plugin > </plugins > </build > <repositories > <repository > <id > public</id > <name > aliyun nexus</name > <url > https://maven.aliyun.com/repository/public</url > <releases > <enabled > true</enabled > </releases > </repository > </repositories > <pluginRepositories > <pluginRepository > <id > public</id > <name > aliyun nexus</name > <url > https://maven.aliyun.com/repository/public</url > <releases > <enabled > true</enabled > </releases > <snapshots > <enabled > false</enabled > </snapshots > </pluginRepository > </pluginRepositories > </project >
自制的module的pom文件如下
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 <?xml version="1.0" encoding="UTF-8" ?> <project xmlns ="http://maven.apache.org/POM/4.0.0" xmlns:xsi ="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation ="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd" > <parent > <artifactId > ruoyi</artifactId > <groupId > com.ruoyi</groupId > <version > 3.8.5</version > </parent > <modelVersion > 4.0.0</modelVersion > <packaging > jar</packaging > <artifactId > ledger</artifactId > <properties > <maven.compiler.source > 8</maven.compiler.source > <maven.compiler.target > 8</maven.compiler.target > <project.build.sourceEncoding > UTF-8</project.build.sourceEncoding > </properties > <dependencies > <dependency > <groupId > com.google.zxing</groupId > <artifactId > core</artifactId > <version > 3.3.3</version > </dependency > <dependency > <groupId > com.google.zxing</groupId > <artifactId > javase</artifactId > <version > 3.3.3</version > </dependency > <dependency > <groupId > com.ruoyi</groupId > <artifactId > ruoyi-common</artifactId > </dependency > </dependencies > </project >
admin的pom文件如下
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 <?xml version="1.0" encoding="UTF-8" ?> <project xmlns ="http://maven.apache.org/POM/4.0.0" xmlns:xsi ="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation ="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd" > <parent > <artifactId > ruoyi</artifactId > <groupId > com.ruoyi</groupId > <version > 3.8.5</version > </parent > <modelVersion > 4.0.0</modelVersion > <packaging > jar</packaging > <artifactId > ruoyi-admin</artifactId > <description > web服务入口 </description > <dependencies > <dependency > <groupId > org.springframework.boot</groupId > <artifactId > spring-boot-devtools</artifactId > <optional > true</optional > </dependency > <dependency > <groupId > io.springfox</groupId > <artifactId > springfox-boot-starter</artifactId > </dependency > <dependency > <groupId > io.swagger</groupId > <artifactId > swagger-models</artifactId > <version > 1.6.2</version > </dependency > <dependency > <groupId > mysql</groupId > <artifactId > mysql-connector-java</artifactId > </dependency > <dependency > <groupId > com.ruoyi</groupId > <artifactId > ruoyi-framework</artifactId > </dependency > <dependency > <groupId > com.ruoyi</groupId > <artifactId > ledger</artifactId > </dependency > <dependency > <groupId > com.ruoyi</groupId > <artifactId > ruoyi-quartz</artifactId > </dependency > <dependency > <groupId > com.ruoyi</groupId > <artifactId > ruoyi-generator</artifactId > </dependency > </dependencies > <build > <plugins > <plugin > <groupId > org.springframework.boot</groupId > <artifactId > spring-boot-maven-plugin</artifactId > <version > 2.1.1.RELEASE</version > <configuration > <fork > true</fork > </configuration > <executions > <execution > <goals > <goal > repackage</goal > </goals > </execution > </executions > </plugin > <plugin > <groupId > org.apache.maven.plugins</groupId > <artifactId > maven-war-plugin</artifactId > <version > 3.1.0</version > <configuration > <failOnMissingWebXml > false</failOnMissingWebXml > <warName > ${project.artifactId}</warName > </configuration > </plugin > </plugins > <finalName > ${project.artifactId}</finalName > </build > </project >
然后使用maven工具对父项目进行打包,获得了一个ruoyi-admin.jar
包,为了方便配置,我们可以把配置文件拿出来放到jar同级目录下面。
docker实操部署 mysql8 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 【运行容器:】 docker run -d \ --name mysql \ -p 3306:3306 \ -e TZ=Asia/Shanghai \ -e MYSQL_ROOT_PASSWORD=111111 \ -v /usr/data/mysql/data:/var/lib/mysql \ -v /usr/data/mysql/conf:/etc/mysql/conf.d \ -v /usr/data/mysql/init:/docker-entrypoint-initdb.d \ -v /usr/data/mysql/log:/var/log/mysql \ mysql ————————————— 分析: docker run -d mysql:8.0.19 以后台的方式运行 mysql 8.0.19 版本的镜像,生成一个容器。 --name mysql 容器名为 mysql -e MYSQL_ROOT_PASSWORD=111111 设置登陆密码为 123456,登陆用户为 root -p 3306:3306 将容器内部 3306 端口映射到 主机的 3306 端口,即通过 主机的 3306 可以访问容器的 3306 端口 -v /usr/data/mysql/log:/var/log/mysql 将容器的 日志文件夹 挂载到 主机的相应位置 -v /usr/data/mysql/data:/var/lib/mysql 将容器的 数据文件夹 挂载到 主机的相应位置 -v /usr/data/mysql/conf:/etc/mysql/conf.d 将容器的 自定义配置文件夹 挂载到主机的相应位置 【查看容器是否启动:】 docker ps -a
需要通过远程访问要使用到
GRANT ALL PRIVILEGES ON *.* TO 'root'@'%';
flush privileges
发现还是不行
ALTER USER 'root'@'%' IDENTIFIED WITH mysql_native_password BY '111111';
发现也是不行。
查看资料说可能是路由并未打开,输入sysctl net.ipv4.ip_forward
发现值为0。开启路由。
1 2 3 echo "net.ipv4.ip_forward = 1" >>/etc/sysctl.conf sysctl -p sysctl net.ipv4.ip_forward
开启路由即可!
redis 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 【运行容器:】 使用 docker run -v 挂载文件时,若想挂载文件,需要先在主机上创建文件,否则会当成目录挂载。 mkdir -p /usr/mydata/redis/conf touch /usr/mydata/redis/conf/redis.conf 需要使用 redis-server /etc/redis/redis.conf 指定配置文件启动 redis。 docker run -p 6379:6379 --name redis \ -v /usr/data/redis/conf/redis.conf:/etc/redis/redis.conf \ -v /usr/data/redis/conf/data:/data \ -d redis redis-server /etc/redis/redis.conf 分析: docker run -d redis 运行 redis 最新镜像,并生成一个容器 --name redis 容器名为 redis -p 6379:6379 指定端口映射,容器的 6379 端口映射到 主机的 6379 端口, -v /usr/mydata/redis/conf/redis.conf:/etc/redis/redis.conf 挂载 redis 的配置文件到主机的指定位置 -v /usr/mydata/redis/conf/data:/data 挂载 redis 的数据到主机的指定位置 redis-server /etc/redis/redis.conf 表示根据 /etc/redis/redis.conf 配置文件启动。
发现RDM居然要收费,使用ARDM 代替,发现连接没问题。
NGINX 前端记得把admin默认账号密码给改掉
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 0. docker pull nginx:1.18.0 [root@rcodunix9knh8vfc ~] 1.创建nginx容器 docker run -d --name nginx -p 80:80 nginx:1.18.0 2.创建挂在目录 mkdir -p /data/nginx/{conf,log ,html}3.把Nginx容器中的文件进行复制 nginx.conf复制到主机 docker cp nginx:/etc/nginx/nginx.conf /usr/data/nginx/conf/nginx.conf 4.将conf.d文件夹复制到主机 docker cp nginx:/etc/nginx/conf.d /usr/data/nginx/conf/conf.d 5.把html目录复制到主机 docker cp nginx:/usr/share/nginx/html /usr/data/nginx/ 6.停止刚刚创建的nginx容器 docker stop nginx 7.删除刚刚创建的容器 docker rm nginx 8.重新创建容器 docker run -d --name nginx -p 8090:80 \ -v /usr/data/nginx/conf/nginx.conf:/etc/nginx/nginx.conf \ -v /usr/data/nginx/conf/conf.d:/etc/nginx/conf.d \ -v /usr/data/nginx/log:/var/log/nginx \ -v /usr/data/nginx/html:/usr/share/nginx/html \ --privileged=true nginx:1.18.0 docker run --name nginx -d -p 8090:8090 -v /usr/data/nginx/html:/usr/share/nginx/html -v /usr/data/nginx/conf/nginx.conf:/etc/nginx/nginx.conf -v /usr/data/nginx/conf.d:/etc/nginx/conf.d -v /usr/data/nginx/logs:/var/log/nginx nginx:1.18.0 注意: 上面/data/nginx/html是服务器的地址 /usr/share/nginx/html是容器中的地址 9. 进入nginx的docker容器中 [root@VM-8-7-centos html] 10.把容器中nginx配置生效 [root@VM-8-7-centos html]
然后在/data/nginx/conf/nginx.conf
编辑配置文件,这边需要配置路径/usr/share/nginx/html;
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 worker_processes 1 ; events { worker_connections 1024 ; } http { include mime.types; default_type application/octet-stream; sendfile on; keepalive_timeout 65 ; server { listen 80 ; server_name localhost; location /prod-api/ { proxy_set_header Host $http_host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header REMOTE-HOST $remote_addr; proxy_set_header X-Forward-For $http_x_forwarded_for; proxy_pass http://10.21.0.248:8081/; } location /dev-api/ { proxy_set_header Host $http_host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header REMOTE-HOST $remote_addr; proxy_set_header X-Forward-For $http_x_forwarded_for; proxy_pass http://127.0.0.1:8081/; } location / { root /usr/share/nginx/html; index index.html index.htm; } error_page 500 502 503 504 /50x.html; location = /50x.html { root html; } } include servers/*; }
进入容器docker exec -it nginx bash
输入 nginx -s reload -c /etc/nginx/nginx.conf
Java 打包完成后需要对配置文件进行修改:
数据库连接
redis
logback.xml
在根目录下创建workspace目录。把配置文件和jar包都放入,新建Dockerfile 文件,文件名大小写得注意。
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 # 基础镜像 FROM java:8 # author MAINTAINER disda EXPOSE 8081 # 挂载目录,在主机下创建workspace并把jar放入 VOLUME /root/workspace # 创建目录 RUN mkdir -p /home/disda # 指定路径 WORKDIR /home/disda # 复制jar文件到路径 COPY ruoyi-admin.jar /home/disda/ruoyi-admin.jar COPY application.yml /home/disda/application.yml COPY application-dev1.yml /home/disda/application-dev1.yml COPY application-druid.yml /home/disda/application-druid.yml # 启动认证服务 ENTRYPOINT ["java","-jar","ruoyi-admin.jar"]
运行docker build -t zdh-web:v1 .
然后会构建好我们的镜像
然后运行docker run -d -p 8081:8081 --name zdh-web zdh-web:v1
归档导出 1 2 3 4 5 6 7 8 9 10 11 docker stop zdh-web docker stop nginx docker stop mysql docker stop redis docker save -o xxx.tar container_name docker load xxx.tar xxx_name:tag docker start dockerid
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 tar -zxvf docker-25.0.2.tgz and move all the file inside to /usr/bin and run systemctl daemon-reload systemctl start docker systemctl enable docker docker load -i container.tar docker run -d \ --name mysql \ -p 3306:3306 \ -e TZ=Asia/Shanghai \ -e MYSQL_ROOT_PASSWORD=111111 \ -v /usr/data/mysql/data:/var/lib/mysql \ -v /usr/data/mysql/conf:/etc/mysql/conf.d \ -v /usr/data/mysql/init:/docker-entrypoint-initdb.d \ -v /usr/data/mysql/log:/var/log/mysql \ mysql echo "net.ipv4.ip_forward = 1" >>/etc/sysctl.conf #将命令写入到/etc/sysctl.conf文件中 sysctl -p #加载内核 sysctl net.ipv4.ip_forward #再次查看路由是否开启 docker run -d --name nginx -p 80:80 nginx:1.18.0 mkdir -p /data/nginx/{conf,log,html} docker cp nginx:/etc/nginx/nginx.conf /data/nginx/conf/nginx.conf docker cp nginx:/etc/nginx/conf.d /data/nginx/conf/conf.d docker cp nginx:/usr/share/nginx/html /data/nginx/ docker stop nginx docker rm nginx docker run -d --name nginx -p 80:80 \ -v /data/nginx/conf/nginx.conf:/etc/nginx/nginx.conf \ -v /data/nginx/conf/conf.d:/etc/nginx/conf.d \ -v /data/nginx/log:/var/log/nginx \ -v /data/nginx/html:/usr/share/nginx/html \ --privileged=true nginx:1.18.0 docker exec -it nginx bash docker exec -it aed99f4c5a55 nginx -s reload
离线安装docker 1.下载docker安装包:
https://download.docker.com/linux/static/stable/x86_64/
由于我们使用的是pve,因此需要用UltraISO打包成ISO文件上传服务器
使用lsblk
查看盘符,使用mount /dev/sr0 /mnt/cdrom
挂载
复制到个人文件夹cp -r /mnt/cdrom ~/work
复制可执行文件到bincp /work/docker/* /usr/bin/
注册Docker为服务,进入 /etc/systemd/system/
目录,并创建 docker.service
文件。
1 vi /etc/systemd/system/docker.service
复制下面内容,ip换为自己主机的ip
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 [Unit] Description=Docker Application Container Engine Documentation=https://docs.docker.com After=network-online.target firewalld.service Wants=network-online.target [Service] Type=notify ExecStart=/usr/bin/dockerd --selinux-enabled=false --insecure-registry=192.168.93.133 ExecReload=/bin/kill -s HUP $MAINPID LimitNOFILE=infinity LimitNPROC=infinity LimitCORE=infinity Delegate=yes KillMode=process Restart=on-failure StartLimitBurst=3 StartLimitInterval=60s [Install] WantedBy=multi-user.target
建立必需的目录mkdir /etc/docker && touch daemon.json
赋予 docker.service
文件执行权限:
1 2 chmod 644 /etc/systemd/system/docker.service groupadd docker
重新加载systemd守护进程以应用更改:
启动Docker并设置开机自启动:
1 2 systemctl start docker systemctl enable docker
之前docker运行的那些文件夹都在/usr/data里面,需要chmod -R 755 data,然后复制到目标机器中
mysql mysq中数据需要迁移
步骤 1: 在源 MySQL 容器中导出数据
进入源 MySQL 容器:
1 docker exec -it <source_mysql_container_name> bash
使用 mysqldump 命令导出数据库:
1 mysqldump -u <username> -p<password> <database_name> > /path/to/dump.sql
替换 、 和 为相应的用户名、密码和数据库名称。
步骤 2: 将导出的数据从容器复制到主机
在上述导出数据的命令中,数据被保存在 /path/to/dump.sql 文件中。您需要将该文件从容器复制到主机。可以使用以下命令:
1 docker cp <source_mysql_container_name>:/path/to/dump.sql /path/on/host替换 <source_mysql_container_name> 为源 MySQL 容器的名称,/path/on/host 为主机上保存导出数据的路径。
步骤 3: 在目标 MySQL 容器中导入数据
进入目标 MySQL 容器:
1 docker exec -it <target_mysql_container_name> bash
使用以下命令导入数据:
1 2 mysql -u <username> -p<password> <database_name> < /path/on/host/dump.sql替换 <username>、<password>、<database_name> 为目标 MySQL 容器的相应凭据和数据库名称。/path/on/host/dump.sql 为上一步中从源容器复制到主机的数据文件路径。 、
java 记得java的配置文件需要修改哦
1 docker cp zdh-web:/home/disda /usr/data/java
docker代码更新 1 2 3 4 5 6 7 8 9 umount /mnt cd /devmount sr0 /mnt docker cp ruoyi-admin.jar zdh-web:/home/disda docker cp dist/ nginx:/usr/share/nginx/html yes | cp ruoyi-admin.jar /usr/data/java/yes | cp -r dist/ /usr/data/nginx/html/docker stop zdh-web docker start zdh-web
PVE部署 /etc/network
开启端口转发
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 auto lo iface lo inet loopback iface enx0a94ef7e0365 inet manual iface eno3 inet manual iface eno4 inet manual iface eno1 inet manual iface eno2 inet manual iface ens1f0 inet manual iface ens1f1 inet manual auto vmbr0 iface vmbr0 inet static address 21.4.0.41/24 gateway 21.4.0.1 bridge-ports eno1 bridge-stp off bridge-fd 0 post-up echo 1 > /proc/sys/net/ipv4/ip_forward post-up echo 1 > /proc/sys/net/ipv4/conf/eno1/proxy_arp auto vmbr1 iface vmbr1 inet static address 192.168.1.1/24 bridge-ports none bridge-stp off bridge-fd 0 post-up echo 1 > /proc/sys/net/ipv4/ip_forward post-up iptables -t nat -A POSTROUTING -s '192.168.1.0/24' -o eno1 -j MASQUERADE post-down iptables -t nat -D POSTROUTING -s '192.168.1.0/24' -o eno1 -j MASQUERADE post-up iptables -t raw -I PREROUTING -i fwbr+ -j CT --zone 1 post-down iptables -t raw -D PREROUTING -i fwbr+ -j CT --zone 1 post-up iptables -t nat -A PREROUTING -i vmbr0 -d 21.4.0.41 -p tcp --dport 8080 -j DNAT --to 192.168.1.2:80 post-down iptables -t nat -D PREROUTING -i vmbr0 -d 21.4.0.41 -p tcp --dport 8080 -j DNAT --to 192.168.1.2:80 post-up iptables -t nat -A PREROUTING -i vmbr0 -d 21.4.0.41 -p tcp --dport 2222 -j DNAT --to 192.168.1.2:22 post-down iptables -t nat -D PREROUTING -i vmbr0 -d 21.4.0.41 -p tcp --dport 2222 -j DNAT --to 192.168.1.2:22 post-up iptables -t nat -A PREROUTING -i vmbr0 -d 21.4.0.41 -p tcp --dport 3306 -j DNAT --to 192.168.1.2:3306 post-down iptables -t nat -D PREROUTING -i vmbr0 -d 21.4.0.41 -p tcp --dport 3306 -j DNAT --to 192.168.1.2:3306
Ifreload -a