云函数只有在需要的时候才进行调用和收费,这非常适合定期备份操作。
插件安装教程链接如下:
https://cloud.tencent.com/document/product/583/38090
利用HelloWorld模板创建函数
由于云函数提供的环境中暂时无法预装一些二进制文件或者第三方库,因此需要手动拷贝对应文件到用户目录。本实践中需要mysqldump去在命令行执行数据库备份,需要python的第三方库toml和cos-python-sdk-v5去实现操作的配置化和操作腾讯云COS。
pip install --target=./site_packages toml cos-python-sdk-v5
由于mysqldump文件上传后不一定会拥有执行权限,所以最好在初始化时为mysqldump增加执行权限。另外为了使得代码能够找到自己打包的公共库,要在python的库查找路径中配置site_packages。具体代码如下:
import os
import sys
os.system('chmod a+x ./mysqldump') # 为mysqldump添加执行权限
site_packages_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'site_packages')
sys.path.insert(0, site_packages_dir)
# 在以下代码中即可直接导入toml
import toml
自此,需要准备的东西已经完成,接下来可以编程去实现整个备份和上传的流程了。
可以使用toml去编写配置文件,本实践中配置文件内容如下
# db的公共配置,下边的配置中可以覆盖这里的配置
[ifish.backup.db.common]
host = "1.2.3.4"
port = 3306
user = "root"
password = "123456"
# 要备份的db信息
# 这里的配置会覆盖common中
[[ifish.backup.db.dbs]]
user = "test"
password = "123456"
db = "test" # 要备份的db的名称
# 腾讯云COS配置
[ifish.backup.db.cos]
SecretId = "123456"
SecretKey = "123456"
Region = "ap-beijing"
Scheme = "https"
BackupBuckets = [ "backup",]
BackupKeyPrefix = "db" # 存储的文件夹名称
MaxBackupTimes = 5 # 最多要备份的数量,5表示桶中最多会保留最近5次备份的结果,之前的会被删除
整体项目目录结构如图:
其中test.toml和tmp文件夹为本地测试使用,可以使用云函数的template.yaml去配置环境变量来使得程序去识别当前是生产环境还是开发环境。具体的代码如下:
config.py
# -*- coding: utf-8 -*-
import os
import sys
if os.getenv('DEPLOY', 'dev') != 'prod':
sys.path.append('.')
site_packages_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'site_packages')
sys.path.insert(0, site_packages_dir)
import toml
import logging
if os.getenv('DEPLOY', 'dev') == 'prod':
_CONFIG_FILE = './prod.toml'
WORK_DIR = '/tmp'
MYSQL_DUMP_CMD = './mysqldump -h%s -p%d -u%s -p%s %s > %s'
_LOG_LEVEL = logging.INFO
else:
_CONFIG_FILE = './test.toml'
WORK_DIR = './tmp'
MYSQL_DUMP_CMD = 'mysqldump -h%s -p%d -u%s -p%s %s > %s'
_LOG_LEVEL = logging.DEBUG
logging.basicConfig(level=_LOG_LEVEL, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger()
if os.getenv('DEPLOY', 'dev') == 'prod':
logger.info('PROD'.center(50, '-'))
else:
logger.info('DEV'.center(50, '-'))
# 加载配置文件
config = toml.load(_CONFIG_FILE)
cos_config = config['ifish']['backup']['db']['cos']
config = config['ifish']['backup']['db']
# common和每个单独的配置合并后必须拥有的项
_CONFIG_COMMON_ITMES = ['host', 'port', 'user', 'password']
# 每个单独的配置必须拥有的项
_CONFIG_SPECIFIC_ITEMS = ['db']
for conf in config['dbs']:
for k in _CONFIG_COMMON_ITMES:
if not conf.get(k):
conf[k] = config['common'][k]
for k in _CONFIG_SPECIFIC_ITEMS:
if not conf.get(k):
raise ValueError('Specific config must have item: %s' % k)
# 给mysqldump添加执行权限
os.system('chmod a+x ./mysqldump')
if __name__ == "__main__":
import json
print(json.dumps(config, indent=2, ensure_ascii=False))
print(json.dumps(cos_config, indent=2, ensure_ascii=False))
cos.py
# -*- coding=utf-8
from config import cos_config, logger
import os
import datetime
import time
from qcloud_cos import CosConfig
from qcloud_cos import CosS3Client
import json
config_detail = {
'SecretId': cos_config['SecretId'],
'SecretKey': cos_config['SecretKey'],
'Region': cos_config['Region'],
'Token': cos_config.get('Token', None),
'Scheme': cos_config.get('Scheme', 'https'),
}
config = CosConfig(**config_detail)
client = CosS3Client(config)
def upload_to_backup_buckets(local_file):
logger.info('Start to Upload File: ' + local_file)
file_name = os.path.basename(local_file)
key = os.path.join(cos_config['BackupKeyPrefix'],
time.strftime('%Y_%m_%d_%H_%M_%S') + '_' + file_name)
result = {}
for bucket in cos_config['BackupBuckets']:
res = client.upload_file(
Bucket=bucket,
LocalFilePath=local_file,
Key=key,
PartSize=1,
MAXThread=10,
EnableMD5=False
)
result['bucket'] = res['ETag']
logger.info('Start to Upload File: ' + local_file)
return result
def delete_expired_items():
logger.info('Start Delete Expired Items')
for bucket in cos_config['BackupBuckets']:
logger.info('Process Bucket: ' + bucket)
# 目前备份数量远小于1000,所以这里不做迭代
response = client.list_objects(
Bucket=bucket,
Prefix=cos_config['BackupKeyPrefix']
)
if response['IsTruncated'] != 'false':
raise ValueError('Too Many Objects')
contents = []
logger.info('Found Contents:\n %s' % json.dumps(response, indent=2, ensure_ascii=False))
for s in response['Contents']:
if s['Key'].strip('/') == cos_config['BackupKeyPrefix']:
continue
content = {
'key': s['Key'],
'last_modified_date': datetime.datetime.strptime(s['LastModified'], '%Y-%m-%dT%H:%M:%S.%fZ')
}
contents.append(content)
sorted(contents, reverse=True, key=lambda k: k['last_modified_date'].timestamp())
logger.debug(len(contents))
if len(contents) > cos_config['MaxBackupTimes']:
delete_contents = contents[cos_config['MaxBackupTimes']:]
logger.info('Delete Items: %s' % delete_contents)
delete_param = {
'Object': [{'Key': c['key']} for c in delete_contents],
'Quiet': 'false'
}
response = client.delete_objects(Bucket=bucket, Delete=delete_param)
logger.info('Delete Result:\n %s' % json.dumps(response, indent=2, ensure_ascii=False))
if __name__ == "__main__":
upload_to_backup_buckets('./prod.toml')
util.py
# -*- coding: utf-8 -*-
import zipfile
import os
from config import MYSQL_DUMP_CMD, WORK_DIR, logger
import subprocess
def compress_dir(zip_dir, zip_filename, filename_filter=lambda filename: True):
"""
压缩文件夹
:param zip_dir:
:param zip_filename:
:param filename_filter:
:return:
"""
with zipfile.ZipFile(zip_filename, 'w', zipfile.ZIP_DEFLATED) as zip_file:
for dirpath, dirnames, filenames in os.walk(zip_dir):
for filename in filenames:
if filename_filter(filename):
fpath = os.path.join(dirpath, filename)
arc_path = os.path.relpath(fpath, zip_dir)
zip_file.write(fpath, arc_path)
def backup_one_db(host, port, user, password, db):
logger.info(f'Start to backup DB: user={user}, db={db}')
backup_file_name = f'{user}__{db}.sql'
backup_file_path = os.path.join(WORK_DIR, backup_file_name)
cmd = MYSQL_DUMP_CMD % (host, port, user, password, db, backup_file_path)
if not password:
cmd = cmd.replace('-p', '')
logger.debug(cmd)
cmds = cmd.split()
p = subprocess.run(cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf8', timeout=30)
if p.returncode == 0:
logger.info(f'Backup DB successfully: user={user}, db={db}')
return True
else:
logger.info(f'Backup DB failed: user={user}, db={db}')
return False
if __name__ == "__main__":
zip_filename = '/Users/happylv/projects/tmp/test.zip'
zip_dir = '/Users/happylv/projects/tmp'
compress_dir(zip_dir, zip_filename)
index.py
# -*- coding: utf-8 -*-
import os
from config import logger, config, WORK_DIR
from cos import upload_to_backup_buckets, delete_expired_items
from util import backup_one_db, compress_dir
def main_handler(event, context):
logger.info('Start Main Handler')
for i, conf in enumerate(config['dbs']):
backup_one_db(**conf)
zip_filepath = os.path.join(WORK_DIR, 'db.zip')
compress_dir(WORK_DIR, zip_filepath, lambda filename: filename.endswith('.sql'))
upload_to_backup_buckets(zip_filepath)
delete_expired_items()
logger.info('End Main Handler')
return {'code': 0, 'msg': 'success', 'data': {}}
if __name__ == '__main__':
main_handler(None, None)
template.yaml
Resources:
ifish:
Type: TencentCloud::Serverless::Namespace
db_backup:
Properties:
CodeUri: .
Description: DB backup function
Environment:
Variables:
DEPLOY: prod # 使用该环境变量控制是生产环境还是测试环境
Events:
# 配置定时出发,每天0点触发
backup_mysql_daily:
Properties:
CronExpression: 0 0 0 */1 * * *
Enable: true
Type: Timer
Handler: index.main_handler
MemorySize: 128
Runtime: Python3.6
Timeout: 300
# 配置vpc使得云函数可以访问db
VpcConfig:
SubnetId: subnet-123456
VpcId: vpc-123456
Type: TencentCloud::Serverless::Function
根据stackoverflow上的回答,这是MacOS下的一个bug,解决方式见链接:
原创声明:本文系作者授权腾讯云开发者社区发表,未经许可,不得转载。
如有侵权,请联系 cloudcommunity@tencent.com 删除。
原创声明:本文系作者授权腾讯云开发者社区发表,未经许可,不得转载。
如有侵权,请联系 cloudcommunity@tencent.com 删除。
扫码关注腾讯云开发者
领取腾讯云代金券
Copyright © 2013 - 2025 Tencent Cloud. All Rights Reserved. 腾讯云 版权所有
深圳市腾讯计算机系统有限公司 ICP备案/许可证号:粤B2-20090059 深公网安备号 44030502008569
腾讯云计算(北京)有限责任公司 京ICP证150476号 | 京ICP备11018762号 | 京公网安备号11010802020287
Copyright © 2013 - 2025 Tencent Cloud.
All Rights Reserved. 腾讯云 版权所有