Backup and restoring Hyperledger fabric network one instance to other instanc

#!/bin/bash
echo "One click backup"
set -e

if [ -d "./backup" ] 
then
    echo "Back up is already exists." 
    sudo cp -r ../wallet/ backup/
    # sudo rm -rf backup
else
# sudo rm -rf backup
    mkdir backup
    echo "Created backup folder......"    
    echo
    #Copying Certificates and Configuration files
    sleep 5
    
    sudo cp -r y-network/organizations/ backup/
    sudo cp -r y-network/channel-artifacts/ backup/
    sudo cp -r y-network/system-genesis-block/ backup/
    
    # cd ..
    
    sudo cp -r ../wallet/ backup/
    
    cd backup
    # mkdir peer
    # mkdir orderer
    cd ..
    #Copying Peer and orderer data
    sleep 5
    sudo docker cp peer0.org1.yaliyomo.net:/var/hyperledger/production/ backup/peer0.org1/
    sudo docker cp peer0.org2.yaliyomo.net:/var/hyperledger/production/ backup/peer0.org2/
    sudo docker cp peer0.org3.yaliyomo.net:/var/hyperledger/production/ backup/peer0.org3/
    sudo docker cp orderer.yaliyomo.net:/var/hyperledger/production/orderer/ backup/orderer/
    sudo docker cp orderer1.yaliyomo.net:/var/hyperledger/production/orderer/ backup/orderer1/
    sudo docker cp orderer2.yaliyomo.net:/var/hyperledger/production/orderer/ backup/orderer2/
    sudo docker cp orderer3.yaliyomo.net:/var/hyperledger/production/orderer/ backup/orderer3/
    sudo docker cp orderer4.yaliyomo.net:/var/hyperledger/production/orderer/ backup/orderer4/
    
    #All done
    exit 1 
fi

This is my Backup shell script file.

set -e

#bringing network down and clearing volumes

cd ../docker-compose/

sudo ./networkDelete.sh

# sudo docker-compose -f docker-compose.yml down

sudo docker volume prune

sudo docker network prune

# Copying the wallet data from backup to application wallet
echo
echo "Copying the wallet data from backup to application wallet"
echo "Current working directory: $(pwd)"

cd ../Blockchain/backup

if sudo cp -r ./wallet/* ../../wallet/; then
    echo "Wallet data copied successfully."
else
    echo "Failed to copy wallet data."
    exit 1
fi

# sudo cp -r ./wallet/* ../../wallet/

echo "Bringing network Up with Previous Backup"

cd ..

if sudo docker-compose -f restore-network.yaml up -d; then
    echo "Containers successfully started."
    # All done...
    sleep 20
    echo "Successfully up the containers"
else
    echo "Failed to start containers."
    exit 1
fi


# sudo docker-compose -f restore-network.yaml up -d
# #All done...
# sleep 20
# echo "successfully up the containers"


# Querying Data


# cd ./y-network/scripts/
# sudo ./envVar.sh
# exit 1


cd ./y-network/scripts/

if sudo ./envVar.sh; then
    echo "Data queried successfully."
else
    echo "Failed to query data."
    exit 1
fi

exit 0

This is Restoring shell script.

# Copyright IBM Corp. All Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
#

version: '2'

volumes:
  orderer.yaliyomo.net:
  orderer1.yaliyomo.net:
  orderer2.yaliyomo.net:
  orderer3.yaliyomo.net:
  orderer4.yaliyomo.net:
  peer0.org1.yaliyomo.net:
  peer0.org2.yaliyomo.net:
  peer0.org3.yaliyomo.net:
  grafana-data:

networks:
  test:

services:

  ca_org1:
    image: hyperledger/fabric-ca:$IMAGE_TAG
    restart: always
    environment:
      - FABRIC_CA_HOME=/etc/hyperledger/fabric-ca-server
      - FABRIC_CA_SERVER_CA_NAME=ca-org1
      - FABRIC_CA_SERVER_TLS_ENABLED=true
      - FABRIC_CA_SERVER_PORT=7054
    ports:
      - "7054:7054"
    command: sh -c 'fabric-ca-server start -b admin:adminpw -d'
    volumes:
      - ./backup/organizations/fabric-ca/org1:/etc/hyperledger/fabric-ca-server
    container_name: ca_org1
    networks:
      - test

  ca_org2:
    image: hyperledger/fabric-ca:$IMAGE_TAG
    restart: always
    environment:
      - FABRIC_CA_HOME=/etc/hyperledger/fabric-ca-server
      - FABRIC_CA_SERVER_CA_NAME=ca-org2
      - FABRIC_CA_SERVER_TLS_ENABLED=true
      - FABRIC_CA_SERVER_PORT=8054
    ports:
      - "8054:8054"
    command: sh -c 'fabric-ca-server start -b admin:adminpw -d'
    volumes:
      - ./backup/organizations/fabric-ca/org2:/etc/hyperledger/fabric-ca-server
    container_name: ca_org2
    networks:
      - test

  ca_org3:
    image: hyperledger/fabric-ca:$IMAGE_TAG
    restart: always
    environment:
      - FABRIC_CA_HOME=/etc/hyperledger/fabric-ca-server
      - FABRIC_CA_SERVER_CA_NAME=ca-org3
      - FABRIC_CA_SERVER_TLS_ENABLED=true
      - FABRIC_CA_SERVER_PORT=6054
    ports:
      - "6054:6054"
    command: sh -c 'fabric-ca-server start -b admin:adminpw -d'
    volumes:
      - ./backup/organizations/fabric-ca/org3:/etc/hyperledger/fabric-ca-server
    container_name: ca_org3
    networks:
      - test

  ca_orderer:
    image: hyperledger/fabric-ca:$IMAGE_TAG
    restart: always
    environment:
      - FABRIC_CA_HOME=/etc/hyperledger/fabric-ca-server
      - FABRIC_CA_SERVER_CA_NAME=ca-orderer
      - FABRIC_CA_SERVER_TLS_ENABLED=true
      - FABRIC_CA_SERVER_PORT=9054
    ports:
      - "9054:9054"
    command: sh -c 'fabric-ca-server start -b admin:adminpw -d'
    volumes:
      - ./backup/organizations/fabric-ca/ordererOrg:/etc/hyperledger/fabric-ca-server
    container_name: ca_orderer
    networks:
      - test
  couchdb0:
    container_name: couchdb0
    image: hyperledger/fabric-couchdb
    restart: always
    # Populate the COUCHDB_USER and COUCHDB_PASSWORD to set an admin user and password
    # for CouchDB.  This will prevent CouchDB from operating in an "Admin Party" mode.
    environment:
      - COUCHDB_USER=
      - COUCHDB_PASSWORD=
    # Comment/Uncomment the port mapping if you want to hide/expose the CouchDB service,
    # for example map it to utilize Fauxton User Interface in dev environments.
    ports:
      - "5984:5984"
    networks:
      - test
    volumes:
     - /data/couchdb0:/opt/couchdb/data

  couchdb1:
    container_name: couchdb1
    image: hyperledger/fabric-couchdb
    restart: always
    # Populate the COUCHDB_USER and COUCHDB_PASSWORD to set an admin user and password
    # for CouchDB.  This will prevent CouchDB from operating in an "Admin Party" mode.
    environment:
      - COUCHDB_USER=
      - COUCHDB_PASSWORD=
    # Comment/Uncomment the port mapping if you want to hide/expose the CouchDB service,
    # for example map it to utilize Fauxton User Interface in dev environments.
    ports:
      - "7984:5984"
    networks:
      - test
    volumes:
      - /data/couchdb1:/opt/couchdb/data

  couchdb2:
    container_name: couchdb2
    image: hyperledger/fabric-couchdb
    restart: always
    # Populate the COUCHDB_USER and COUCHDB_PASSWORD to set an admin user and password
    # for CouchDB.  This will prevent CouchDB from operating in an "Admin Party" mode.
    environment:
      - COUCHDB_USER=
      - COUCHDB_PASSWORD=
    # Comment/Uncomment the port mapping if you want to hide/expose the CouchDB service,
    # for example map it to utilize Fauxton User Interface in dev environments.
    ports:
      - "6984:5984"
    networks:
      - test
    volumes:
     - /data/couchdb2:/opt/couchdb/data
    
  orderer.yaliyomo.net:
    container_name: orderer.yaliyomo.net
    image: hyperledger/fabric-orderer:$IMAGE_TAG
    restart: always
    environment:
      - FABRIC_LOGGING_SPEC=INFO
      - ORDERER_GENERAL_LISTENADDRESS=0.0.0.0
      - ORDERER_GENERAL_LISTENPORT=7050
      - ORDERER_GENERAL_GENESISMETHOD=file
      - ORDERER_GENERAL_GENESISFILE=/var/hyperledger/orderer/orderer.genesis.block
      - ORDERER_GENERAL_LOCALMSPID=OrdererMSP
      - ORDERER_GENERAL_LOCALMSPDIR=/var/hyperledger/orderer/msp
      # enabled TLS
      - ORDERER_GENERAL_TLS_ENABLED=true
      - ORDERER_GENERAL_TLS_PRIVATEKEY=/var/hyperledger/orderer/tls/server.key
      - ORDERER_GENERAL_TLS_CERTIFICATE=/var/hyperledger/orderer/tls/server.crt
      - ORDERER_GENERAL_TLS_ROOTCAS=[/var/hyperledger/orderer/tls/ca.crt]
      - ORDERER_KAFKA_TOPIC_REPLICATIONFACTOR=1
      - ORDERER_KAFKA_VERBOSE=true
      - ORDERER_GENERAL_CLUSTER_CLIENTCERTIFICATE=/var/hyperledger/orderer/tls/server.crt
      - ORDERER_GENERAL_CLUSTER_CLIENTPRIVATEKEY=/var/hyperledger/orderer/tls/server.key
      - ORDERER_GENERAL_CLUSTER_ROOTCAS=[/var/hyperledger/orderer/tls/ca.crt]
    working_dir: /opt/gopath/src/github.com/hyperledger/fabric
    command: orderer
    volumes:
        - ./backup/system-genesis-block/genesis.block:/var/hyperledger/orderer/orderer.genesis.block
        - ./backup/organizations/ordererOrganizations/yaliyomo.net/orderers/orderer.yaliyomo.net/msp:/var/hyperledger/orderer/msp
        - ./backup/organizations/ordererOrganizations/yaliyomo.net/orderers/orderer.yaliyomo.net/tls/:/var/hyperledger/orderer/tls
        - ./backup/organizations/ordererOrganizations/yaliyomo.net/orderers/orderer:/var/hyperledger
        - ./backup/orderer:/var/hyperledger/production/orderer 
        #- /var/hyperledger/orderer:/var/hyperledger/production              
    ports:
      - 7050:7050
    networks:
      - test
  orderer1.yaliyomo.net:
    container_name: orderer1.yaliyomo.net
    image: hyperledger/fabric-orderer:$IMAGE_TAG
    restart: always
    environment:
      - FABRIC_LOGGING_SPEC=INFO
      - ORDERER_GENERAL_LISTENADDRESS=0.0.0.0
      - ORDERER_GENERAL_LISTENPORT=7050
      - ORDERER_GENERAL_GENESISMETHOD=file
      - ORDERER_GENERAL_GENESISFILE=/var/hyperledger/orderer/orderer.genesis.block
      - ORDERER_GENERAL_LOCALMSPID=OrdererMSP
      - ORDERER_GENERAL_LOCALMSPDIR=/var/hyperledger/orderer/msp
      # enabled TLS
      - ORDERER_GENERAL_TLS_ENABLED=true
      - ORDERER_GENERAL_TLS_PRIVATEKEY=/var/hyperledger/orderer/tls/server.key
      - ORDERER_GENERAL_TLS_CERTIFICATE=/var/hyperledger/orderer/tls/server.crt
      - ORDERER_GENERAL_TLS_ROOTCAS=[/var/hyperledger/orderer/tls/ca.crt]
      - ORDERER_KAFKA_TOPIC_REPLICATIONFACTOR=1
      - ORDERER_KAFKA_VERBOSE=true
      - ORDERER_GENERAL_CLUSTER_CLIENTCERTIFICATE=/var/hyperledger/orderer/tls/server.crt
      - ORDERER_GENERAL_CLUSTER_CLIENTPRIVATEKEY=/var/hyperledger/orderer/tls/server.key
      - ORDERER_GENERAL_CLUSTER_ROOTCAS=[/var/hyperledger/orderer/tls/ca.crt]
    working_dir: /opt/gopath/src/github.com/hyperledger/fabric
    command: orderer
    volumes:
        - ./backup/system-genesis-block/genesis.block:/var/hyperledger/orderer/orderer.genesis.block
        - ./backup/organizations/ordererOrganizations/yaliyomo.net/orderers/orderer1.yaliyomo.net/msp:/var/hyperledger/orderer/msp
        - ./backup/organizations/ordererOrganizations/yaliyomo.net/orderers/orderer1.yaliyomo.net/tls/:/var/hyperledger/orderer/tls
        - ./backup/orderer1:/var/hyperledger/production/orderer 
        - ./backup/organizations/ordererOrganizations/yaliyomo.net/orderers/orderer1:/var/hyperledger
        #- /var/hyperledger/orderer:/var/hyperledger/production              
    ports:
      - 7150:7050
    networks:
      - test
  orderer2.yaliyomo.net:
    container_name: orderer2.yaliyomo.net
    image: hyperledger/fabric-orderer:$IMAGE_TAG
    restart: always
    environment:
      - FABRIC_LOGGING_SPEC=INFO
      - ORDERER_GENERAL_LISTENADDRESS=0.0.0.0
      - ORDERER_GENERAL_LISTENPORT=7050
      - ORDERER_GENERAL_GENESISMETHOD=file
      - ORDERER_GENERAL_GENESISFILE=/var/hyperledger/orderer/orderer.genesis.block
      - ORDERER_GENERAL_LOCALMSPID=OrdererMSP
      - ORDERER_GENERAL_LOCALMSPDIR=/var/hyperledger/orderer/msp
      # enabled TLS
      - ORDERER_GENERAL_TLS_ENABLED=true
      - ORDERER_GENERAL_TLS_PRIVATEKEY=/var/hyperledger/orderer/tls/server.key
      - ORDERER_GENERAL_TLS_CERTIFICATE=/var/hyperledger/orderer/tls/server.crt
      - ORDERER_GENERAL_TLS_ROOTCAS=[/var/hyperledger/orderer/tls/ca.crt]
      - ORDERER_KAFKA_TOPIC_REPLICATIONFACTOR=1
      - ORDERER_KAFKA_VERBOSE=true
      - ORDERER_GENERAL_CLUSTER_CLIENTCERTIFICATE=/var/hyperledger/orderer/tls/server.crt
      - ORDERER_GENERAL_CLUSTER_CLIENTPRIVATEKEY=/var/hyperledger/orderer/tls/server.key
      - ORDERER_GENERAL_CLUSTER_ROOTCAS=[/var/hyperledger/orderer/tls/ca.crt]
    working_dir: /opt/gopath/src/github.com/hyperledger/fabric
    command: orderer
    volumes:
        - ./backup/system-genesis-block/genesis.block:/var/hyperledger/orderer/orderer.genesis.block
        - ./backup/organizations/ordererOrganizations/yaliyomo.net/orderers/orderer2.yaliyomo.net/msp:/var/hyperledger/orderer/msp
        - ./backup/organizations/ordererOrganizations/yaliyomo.net/orderers/orderer2.yaliyomo.net/tls/:/var/hyperledger/orderer/tls
        - ./backup/orderer2:/var/hyperledger/production/orderer 
        - ./backup/organizations/ordererOrganizations/yaliyomo.net/orderers/orderer2:/var/hyperledger
        #- /var/hyperledger/orderer:/var/hyperledger/production              
    ports:
      - 7250:7050
    networks:
      - test  
  orderer3.yaliyomo.net:
    container_name: orderer3.yaliyomo.net
    image: hyperledger/fabric-orderer:$IMAGE_TAG
    restart: always
    environment:
      - FABRIC_LOGGING_SPEC=INFO
      - ORDERER_GENERAL_LISTENADDRESS=0.0.0.0
      - ORDERER_GENERAL_LISTENPORT=7050
      - ORDERER_GENERAL_GENESISMETHOD=file
      - ORDERER_GENERAL_GENESISFILE=/var/hyperledger/orderer/orderer.genesis.block
      - ORDERER_GENERAL_LOCALMSPID=OrdererMSP
      - ORDERER_GENERAL_LOCALMSPDIR=/var/hyperledger/orderer/msp
      # enabled TLS
      - ORDERER_GENERAL_TLS_ENABLED=true
      - ORDERER_GENERAL_TLS_PRIVATEKEY=/var/hyperledger/orderer/tls/server.key
      - ORDERER_GENERAL_TLS_CERTIFICATE=/var/hyperledger/orderer/tls/server.crt
      - ORDERER_GENERAL_TLS_ROOTCAS=[/var/hyperledger/orderer/tls/ca.crt]
      - ORDERER_KAFKA_TOPIC_REPLICATIONFACTOR=1
      - ORDERER_KAFKA_VERBOSE=true
      - ORDERER_GENERAL_CLUSTER_CLIENTCERTIFICATE=/var/hyperledger/orderer/tls/server.crt
      - ORDERER_GENERAL_CLUSTER_CLIENTPRIVATEKEY=/var/hyperledger/orderer/tls/server.key
      - ORDERER_GENERAL_CLUSTER_ROOTCAS=[/var/hyperledger/orderer/tls/ca.crt]
    working_dir: /opt/gopath/src/github.com/hyperledger/fabric
    command: orderer
    volumes:
        - ./backup/system-genesis-block/genesis.block:/var/hyperledger/orderer/orderer.genesis.block
        - ./backup/organizations/ordererOrganizations/yaliyomo.net/orderers/orderer3.yaliyomo.net/msp:/var/hyperledger/orderer/msp
        - ./backup/organizations/ordererOrganizations/yaliyomo.net/orderers/orderer3.yaliyomo.net/tls/:/var/hyperledger/orderer/tls
        - ./backup/orderer3:/var/hyperledger/production/orderer 
        - ./backup/organizations/ordererOrganizations/yaliyomo.net/orderers/orderer3:/var/hyperledger
        #- /var/hyperledger/orderer:/var/hyperledger/production              
    ports:
      - 7350:7050
    networks:
      - test  
  orderer4.yaliyomo.net:
    container_name: orderer4.yaliyomo.net
    image: hyperledger/fabric-orderer:$IMAGE_TAG
    restart: always
    environment:
      - FABRIC_LOGGING_SPEC=INFO
      - ORDERER_GENERAL_LISTENADDRESS=0.0.0.0
      - ORDERER_GENERAL_LISTENPORT=7050
      - ORDERER_GENERAL_GENESISMETHOD=file
      - ORDERER_GENERAL_GENESISFILE=/var/hyperledger/orderer/orderer.genesis.block
      - ORDERER_GENERAL_LOCALMSPID=OrdererMSP
      - ORDERER_GENERAL_LOCALMSPDIR=/var/hyperledger/orderer/msp
      # enabled TLS
      - ORDERER_GENERAL_TLS_ENABLED=true
      - ORDERER_GENERAL_TLS_PRIVATEKEY=/var/hyperledger/orderer/tls/server.key
      - ORDERER_GENERAL_TLS_CERTIFICATE=/var/hyperledger/orderer/tls/server.crt
      - ORDERER_GENERAL_TLS_ROOTCAS=[/var/hyperledger/orderer/tls/ca.crt]
      - ORDERER_KAFKA_TOPIC_REPLICATIONFACTOR=1
      - ORDERER_KAFKA_VERBOSE=true
      - ORDERER_GENERAL_CLUSTER_CLIENTCERTIFICATE=/var/hyperledger/orderer/tls/server.crt
      - ORDERER_GENERAL_CLUSTER_CLIENTPRIVATEKEY=/var/hyperledger/orderer/tls/server.key
      - ORDERER_GENERAL_CLUSTER_ROOTCAS=[/var/hyperledger/orderer/tls/ca.crt]
    working_dir: /opt/gopath/src/github.com/hyperledger/fabric
    command: orderer
    volumes:
        - ./backup/system-genesis-block/genesis.block:/var/hyperledger/orderer/orderer.genesis.block
        - ./backup/organizations/ordererOrganizations/yaliyomo.net/orderers/orderer4.yaliyomo.net/msp:/var/hyperledger/orderer/msp
        - ./backup/organizations/ordererOrganizations/yaliyomo.net/orderers/orderer4.yaliyomo.net/tls/:/var/hyperledger/orderer/tls
        - ./backup/orderer4:/var/hyperledger/production/orderer 
        - ./backup/organizations/ordererOrganizations/yaliyomo.net/orderers/orderer4:/var/hyperledger
        #- /var/hyperledger/orderer:/var/hyperledger/production              
    ports:
      - 7450:7050
    networks:
      - test  

  peer0.org1.yaliyomo.net:
    container_name: peer0.org1.yaliyomo.net
    restart: always
    image: hyperledger/fabric-peer:$IMAGE_TAG
    environment:
      #Generic peer variables
      - CORE_VM_ENDPOINT=unix:///host/var/run/docker.sock
      # the following setting starts chaincode containers on the same
      # bridge network as the peers
      # https://docs.docker.com/compose/networking/
      - CORE_VM_DOCKER_HOSTCONFIG_NETWORKMODE=${COMPOSE_PROJECT_NAME}_test
      - FABRIC_LOGGING_SPEC=INFO
      #- FABRIC_LOGGING_SPEC=DEBUG
      - CORE_PEER_TLS_ENABLED=true
      - CORE_PEER_GOSSIP_USELEADERELECTION=true
      - CORE_PEER_GOSSIP_ORGLEADER=false
      - CORE_PEER_PROFILE_ENABLED=true
      - CORE_PEER_TLS_CERT_FILE=/etc/hyperledger/fabric/tls/server.crt
      - CORE_PEER_TLS_KEY_FILE=/etc/hyperledger/fabric/tls/server.key
      - CORE_PEER_TLS_ROOTCERT_FILE=/etc/hyperledger/fabric/tls/ca.crt
      # Peer specific variabes
      - CORE_PEER_ID=peer0.org1.yaliyomo.net
      - CORE_PEER_ADDRESS=peer0.org1.yaliyomo.net:7051
      - CORE_PEER_LISTENADDRESS=0.0.0.0:7051
      - CORE_PEER_CHAINCODEADDRESS=peer0.org1.yaliyomo.net:7052
      - CORE_PEER_CHAINCODELISTENADDRESS=0.0.0.0:7052
      - CORE_PEER_GOSSIP_BOOTSTRAP=peer0.org1.yaliyomo.net:7051
      - CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer0.org1.yaliyomo.net:7051
      - CORE_PEER_LOCALMSPID=Org1MSP
      - CORE_OPERATIONS_LISTENADDRESS=0.0.0.0:9443  # operation RESTful API
      - CORE_METRICS_PROVIDER=prometheus  # prometheus will pull metrics from orderer via /metrics RESTful API
      - CORE_LEDGER_STATE_STATEDATABASE=CouchDB
      - CORE_LEDGER_STATE_COUCHDBCONFIG_COUCHDBADDRESS=couchdb0:5984
      # The CORE_LEDGER_STATE_COUCHDBCONFIG_USERNAME and CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD
      # provide the credentials for ledger to connect to CouchDB.  The username and password must
      # match the username and password set for the associated CouchDB.
      - CORE_LEDGER_STATE_COUCHDBCONFIG_USERNAME=
      - CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD=
    volumes:
        - /var/run/:/host/var/run/
        - ./backup/organizations/peerOrganizations/org1.yaliyomo.net/peers/peer0.org1.yaliyomo.net/msp:/etc/hyperledger/fabric/msp
        - ./backup/organizations/peerOrganizations/org1.yaliyomo.net/peers/peer0.org1.yaliyomo.net/tls:/etc/hyperledger/fabric/tls
        - ./backup/peer0.org1:/var/hyperledger/production
        #- /var/hyperledger/peer01:/var/hyperledger/production
    working_dir: /opt/gopath/src/github.com/hyperledger/fabric/peer
    command: peer node start
    ports:
      - 7051:7051
    networks:
      - test
    depends_on:
      - couchdb0  

  peer0.org2.yaliyomo.net:
    container_name: peer0.org2.yaliyomo.net
    image: hyperledger/fabric-peer:$IMAGE_TAG
    restart: always
    environment:
      #Generic peer variables
      - CORE_VM_ENDPOINT=unix:///host/var/run/docker.sock
      # the following setting starts chaincode containers on the same
      # bridge network as the peers
      # https://docs.docker.com/compose/networking/
      - CORE_VM_DOCKER_HOSTCONFIG_NETWORKMODE=${COMPOSE_PROJECT_NAME}_test
      - FABRIC_LOGGING_SPEC=INFO
      #- FABRIC_LOGGING_SPEC=DEBUG
      - CORE_PEER_TLS_ENABLED=true
      - CORE_PEER_GOSSIP_USELEADERELECTION=true
      - CORE_PEER_GOSSIP_ORGLEADER=false
      - CORE_PEER_PROFILE_ENABLED=true
      - CORE_PEER_TLS_CERT_FILE=/etc/hyperledger/fabric/tls/server.crt
      - CORE_PEER_TLS_KEY_FILE=/etc/hyperledger/fabric/tls/server.key
      - CORE_PEER_TLS_ROOTCERT_FILE=/etc/hyperledger/fabric/tls/ca.crt
      # Peer specific variabes
      - CORE_PEER_ID=peer0.org2.yaliyomo.net
      - CORE_PEER_ADDRESS=peer0.org2.yaliyomo.net:8051
      - CORE_PEER_LISTENADDRESS=0.0.0.0:8051
      - CORE_PEER_CHAINCODEADDRESS=peer0.org2.yaliyomo.net:8052
      - CORE_PEER_CHAINCODELISTENADDRESS=0.0.0.0:8052
      - CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer0.org2.yaliyomo.net:8051
      - CORE_PEER_GOSSIP_BOOTSTRAP=peer0.org2.yaliyomo.net:8051
      - CORE_PEER_LOCALMSPID=Org2MSP
      - CORE_OPERATIONS_LISTENADDRESS=0.0.0.0:9443  # operation RESTful API
      - CORE_METRICS_PROVIDER=prometheus  # prometheus will pull metrics from orderer via /metrics RESTful API
      - CORE_LEDGER_STATE_STATEDATABASE=CouchDB
      - CORE_LEDGER_STATE_COUCHDBCONFIG_COUCHDBADDRESS=couchdb1:5984
      # The CORE_LEDGER_STATE_COUCHDBCONFIG_USERNAME and CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD
      # provide the credentials for ledger to connect to CouchDB.  The username and password must
      # match the username and password set for the associated CouchDB.
      - CORE_LEDGER_STATE_COUCHDBCONFIG_USERNAME=
      - CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD=
    volumes:
        - /var/run/:/host/var/run/
        - ./backup/organizations/peerOrganizations/org2.yaliyomo.net/peers/peer0.org2.yaliyomo.net/msp:/etc/hyperledger/fabric/msp
        - ./backup/organizations/peerOrganizations/org2.yaliyomo.net/peers/peer0.org2.yaliyomo.net/tls:/etc/hyperledger/fabric/tls
        - ./backup/peer0.org2:/var/hyperledger/production
        #- /var/hyperledger/peer02:/var/hyperledger/production                
    working_dir: /opt/gopath/src/github.com/hyperledger/fabric/peer
    command: peer node start
    ports:
      - 8051:8051
    networks:
      - test
    depends_on:
      - couchdb1
  
  peer0.org3.yaliyomo.net:
    container_name: peer0.org3.yaliyomo.net
    image: hyperledger/fabric-peer:$IMAGE_TAG
    restart: always
    environment:
      #Generic peer variables
      - CORE_VM_ENDPOINT=unix:///host/var/run/docker.sock
      # the following setting starts chaincode containers on the same
      # bridge network as the peers
      # https://docs.docker.com/compose/networking/
      - CORE_VM_DOCKER_HOSTCONFIG_NETWORKMODE=${COMPOSE_PROJECT_NAME}_test
      - FABRIC_LOGGING_SPEC=INFO
      #- FABRIC_LOGGING_SPEC=DEBUG
      - CORE_PEER_TLS_ENABLED=true
      - CORE_PEER_GOSSIP_USELEADERELECTION=true
      - CORE_PEER_GOSSIP_ORGLEADER=false
      - CORE_PEER_PROFILE_ENABLED=true
      - CORE_PEER_TLS_CERT_FILE=/etc/hyperledger/fabric/tls/server.crt
      - CORE_PEER_TLS_KEY_FILE=/etc/hyperledger/fabric/tls/server.key
      - CORE_PEER_TLS_ROOTCERT_FILE=/etc/hyperledger/fabric/tls/ca.crt
      # Peer specific variabes
      - CORE_PEER_ID=peer0.org3.yaliyomo.net
      - CORE_PEER_ADDRESS=peer0.org3.yaliyomo.net:6051
      - CORE_PEER_LISTENADDRESS=0.0.0.0:6051
      - CORE_PEER_CHAINCODEADDRESS=peer0.org3.yaliyomo.net:6052
      - CORE_PEER_CHAINCODELISTENADDRESS=0.0.0.0:6052
      - CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer0.org3.yaliyomo.net:6051
      - CORE_PEER_GOSSIP_BOOTSTRAP=peer0.org3.yaliyomo.net:6051
      - CORE_PEER_LOCALMSPID=Org3MSP
      - CORE_OPERATIONS_LISTENADDRESS=0.0.0.0:9443  # operation RESTful API
      - CORE_METRICS_PROVIDER=prometheus  # prometheus will pull metrics from orderer via /metrics RESTful API
      - CORE_LEDGER_STATE_STATEDATABASE=CouchDB
      - CORE_LEDGER_STATE_COUCHDBCONFIG_COUCHDBADDRESS=couchdb2:5984
      # The CORE_LEDGER_STATE_COUCHDBCONFIG_USERNAME and CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD
      # provide the credentials for ledger to connect to CouchDB.  The username and password must
      # match the username and password set for the associated CouchDB.
      - CORE_LEDGER_STATE_COUCHDBCONFIG_USERNAME=
      - CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD=
    volumes:
        - /var/run/:/host/var/run/
        - ./backup/organizations/peerOrganizations/org3.yaliyomo.net/peers/peer0.org3.yaliyomo.net/msp:/etc/hyperledger/fabric/msp
        - ./backup/organizations/peerOrganizations/org3.yaliyomo.net/peers/peer0.org3.yaliyomo.net/tls:/etc/hyperledger/fabric/tls
        - ./backup/peer0.org3:/var/hyperledger/production        
        #- /var/hyperledger/peer03:/var/hyperledger/production
    working_dir: /opt/gopath/src/3github.com/hyperledger/fabric/peer
    command: peer node start
    ports:
      - 6051:6051
    networks:
      - test
    depends_on:
      - couchdb2  
  
  # prometheus:
  #   image: prom/prometheus
  #   restart: always
  #   volumes:
  #     - ./prometheus.yml:/etc/prometheus/prometheus.yml
  #   networks:
  #     - test
  
  pushGateway:
    image: prom/pushgateway
    restart: always
    ports:
      - "9091:9091"
    networks:
      - test

  node_exporter:
    image: prom/node-exporter
    restart: always
    volumes:
      - /proc:/host/proc:ro
      - /sys:/host/sys:ro
      - /:/rootfs:ro
    command:
      - '--path.procfs=/host/proc'
      - '--path.sysfs=/host/sys'
      - '--path.rootfs=/host'
      - '--collector.filesystem.ignored-mount-points="^(/rootfs|/host|)/(sys|proc|dev|host|etc)($$|/)"'
      - '--collector.filesystem.ignored-fs-types="^(sys|proc|auto|cgroup|devpts|ns|au|fuse.lxc|mqueue)(fs|)$$"'  
    networks:
      - test


  # grafana:
  #   image: grafana/grafana
  #   restart: always
  #   volumes: 
  #     - grafana-data:/var/lib/grafana
  #     - ./grafana/:/etc/grafana/provisioning/
  #   ports:
  #     - "3001:3000"
  #   networks:
  #     - test

This is restoring yml configuration.

i am following https://www.devprovider.com/2019/10/30/how-to-restore-hyperledger-fabric-from-backup/ this for taking backup and restoring.

in same instance it will working fine as excepted. but one instance to other instance i will get certificate issue.

I am trying to restore one HLF backup into another instance like dev1 backup restore into dev2 environment.

that time getting issue

2024-03-27 05:26:21.293 UTC 002e ERRO [core.comm] ServerHandshake -> Server TLS handshake failed in 655.908µs with error remote error: tls: bad certificate server=PeerServer remoteaddress=172.23.0.20:46364
2024-03-27 05:26:21.328 UTC 002f ERRO [core.comm] ServerHandshake -> Server TLS handshake failed in 933.209µs with error remote error: tls: bad certificate server=PeerServer remoteaddress=172.23.0.19:32894
2024-03-27 05:26:22.295 UTC 0030 ERRO [core.comm] ServerHandshake -> Server TLS handshake failed in 856.132µs with error remote error: tls: bad certificate server=PeerServer remoteaddress=172.23.0.20:46380
2024-03-27 05:26:22.329 UTC 0031 ERRO [core.comm] ServerHandshake -> Server TLS handshake failed in 996.547µs with error remote error: tls: bad certificate server=PeerServer remoteaddress=172.23.0.19:32906
2024-03-27 05:26:23.999 UTC 0032 ERRO [core.comm] ServerHandshake -> Server TLS handshake failed in 747.401µs with error remote error: tls: bad certificate server=PeerServer remoteaddress=172.23.0.20:46382
2024-03-27 05:26:24.033 UTC 0033 ERRO [core.comm] ServerHandshake -> Server TLS handshake failed in 687.64µs with error remote error: tls: bad certificate server=PeerServer remoteaddress=172.23.0.19:32920
2024-03-27 05:26:24.237 UTC 0034 WARN [gossip.gossip] func1 -> Deep probe of peer0.org3.yaliyomo.net:6051 for channel mychannel failed: context deadline exceeded
2024-03-27 05:26:24.237 UTC 0036 WARN [gossip.discovery] func1 -> Could not connect to Endpoint: peer0.org3.yaliyomo.net:6051, InternalEndpoint: peer0.org3.yaliyomo.net:6051, PKI-ID: <nil>, Metadata:  : context deadline exceeded