服务器备份脚本


发布于

|

分类

突然想把服务器做一下迁移。发现自己并不太会用 shell 脚本,之前的 shell 脚本 改起来特别费劲,于是照着原脚本流程写了个 Python 版。

果然是 Python 大法好。

V2版是Shell的翻译,写得比较丑,我就给删除了哈。你现在看到的已经是V3版本了。

脚本支持备份到AWS上。如果不需要某些内容,直接删掉对应部分即可。

import glob
import logging
import os
import shutil
import subprocess
import time
import boto3

from datetime import date
from argparse import ArgumentParser

cfg = {
    "store": {
        "location": "aws",
        "backup_prefix": "backup_",
        "local": {
            "max_backup_days": 30,
            "base_dir": "/home/backup/",
        },
        "aws": {
            "AWS_ACCESS_KEY_ID": '',
            "AWS_ACCESS_KEY_SECRET": '',
            "AWS_REGION_NAME": "",
            "AWS_BUCKET_NAME": ""
        }
    },
    "website": {
        "path": {
            "blog": "path_to_my_blog",
            "config": "path_to_my_config",
        }
    },
    "database": {
        "db": [
            {
                "hostname": "localhost",
                "username": '',
                "password": '',
                "database": [
                    "my_database_1",
                    "my_database_2",
                ]
            }
        ]
    }
}

logging.basicConfig(
    level=logging.INFO,
    filename='/var/log/backup_record.log',
    format='[%(asctime)s] %(levelname)s: %(message)s'
)


def backup_database(tmp_path, dst_dir_part):
    logging.info("Start database backup")

    # Make working dir
    working_dir = os.path.join(tmp_path, "database")
    os.makedirs(working_dir, exist_ok=True)

    for database in cfg['database']['db']:
        for ith, name in enumerate(database['database']):
            try:
                # Dump the database to file
                sql_file = os.path.join(working_dir, "{}-{}.sql".format(database["hostname"], name))
                cmd_mysql = [
                    "mysqldump",
                    "--host={}".format(database["hostname"]),
                    "--user={}".format(database["username"]),
                    "--password={}".format(database["password"]),
                    name
                ]
                subprocess.run(cmd_mysql, stdout=open(sql_file, "w"))

                # Compress file, and delete the source
                cmd_7z = ["7z", "a", sql_file + ".7z", sql_file, "-sdel"]
                subprocess.run(cmd_7z, stdout=subprocess.DEVNULL)
                logging.info("Compress {}@{} success".format(name, database["hostname"]))
            except:
                logging.error("Can not backup {}@{}".format(name, database["hostname"]))

    store(working_dir, "database", dst_dir_part)

    logging.info("Backup database success")


def backup_website(tmp_path, dst_dir_part):
    logging.info("Start website backup")

    # Make working dir
    working_dir = os.path.join(tmp_path, "website")
    os.makedirs(working_dir, exist_ok=True)

    for name, path in cfg["website"]["path"].items():
        # Compress the folder directly
        try:
            cmd = ["7z", "a", os.path.join(working_dir, name + ".7z"), path]
            subprocess.run(cmd, stdout=subprocess.DEVNULL)
            logging.info("Compress [{}]{} success".format(name, path))
        except:
            logging.error("Can not backup [{}]{}".format(name, path))

    store(working_dir, "website", dst_dir_part)

    logging.info("Backup website success")


def store(working_dir, which, dst_dir_part):
    if "aws" == cfg["store"]["location"]:
        store_aws(working_dir, which, dst_dir_part)
    else:
        store_local(working_dir, which, dst_dir_part)


def store_aws(working_dir, which, dst_dir_part):
    session = boto3.Session(
        aws_access_key_id=cfg['store']['aws']['AWS_ACCESS_KEY_ID'],
        aws_secret_access_key=cfg['store']['aws']['AWS_ACCESS_KEY_SECRET'],
        region_name=cfg['store']['aws']['AWS_REGION_NAME']
    )
    s3 = session.resource('s3')
    bucket = s3.Bucket(cfg['store']['aws']['AWS_BUCKET_NAME'])

    for subdir, dirs, files in os.walk(working_dir):
        for file in files:
            try:
                with open(os.path.join(subdir, file), 'rb') as data:
                    bucket.put_object(Key=os.path.join(which, dst_dir_part, file), Body=data)
                logging.info("Upload {} to S3 success".format(file))
            except:
                logging.error("Can not upload {} to S3".format(file))

    # Renove the working_dir by hand
    shutil.rmtree(working_dir)


def store_local(working_dir, which, dst_dir_part):
    def clean_old(path, days_limit):
        time_diff = time.time() - 60 * 60 * 24 * days_limit
        files = list(filter(os.path.isdir, glob.glob(os.path.join(path, "*"))))
        files_to_del = list(filter(lambda x: os.path.getmtime(x) < time_diff, files))
        for f in files_to_del:
            logging.info("Remove old backup {}".format(f))
            shutil.rmtree(f)

    dst_dir = os.path.join(cfg["store"]["local"]["base_dir"], which)
    os.makedirs(dst_dir, exist_ok=True)

    # Move the folder
    dst_full = os.path.join(dst_dir, dst_dir_part)
    if os.path.exists(dst_full):
        shutil.rmtree(dst_full)
    shutil.move(working_dir, os.path.join(dst_dir, dst_dir_part))

    # Clean old files
    clean_old(dst_dir, cfg["store"]["local"]["max_backup_days"])
    # We do not have to delete working_dir there, since we have moved it


if "__main__" == __name__:
    parser = ArgumentParser()
    parser.add_argument('--database', action="store_true")
    parser.add_argument('--website', action="store_true")
    args = parser.parse_args()

    dst_dir_part = "{}{}".format(cfg["store"]["backup_prefix"], date.today().isoformat())
    tmp_dir = os.path.join("/tmp", dst_dir_part)
    os.makedirs(tmp_dir, exist_ok=True)

    if args.database:
        backup_database(tmp_dir, dst_dir_part)

    if args.website:
        backup_website(tmp_dir, dst_dir_part)

    # Rubbish clean
    shutil.rmtree(tmp_dir, ignore_errors=True)

评论

发表回复

您的电子邮箱地址不会被公开。 必填项已用*标注