[init] initial commit

This commit is contained in:
2023-06-05 23:04:30 +08:00
commit 66b1dd4d70
72 changed files with 10079 additions and 0 deletions

36
old/blog_update.sh Normal file
View File

@ -0,0 +1,36 @@
#!/bin/bash
#===================================================================
# Filename : jekyll.sh
# Function :
# Usage :
# Author : Manford Fan
# Date : 2021-08-29 13:10
# Version : Version 0.1
# Disclaimer : The author is NOT responsible for any loss caused
# by the user's own operations.
# And More : If you find there are some bugs in this script
# Or you have better ideas, please do contact me
# via E-mail -- mffan0922@163.com
#===================================================================
# update blog
echo `date`
rm -rf /opt/websites/blog
let numOfAvatar=`ls /opt/websites/nav/assets/images/logos/ | wc -l`
let randNumber=$RANDOM%$numOfAvatar
cp /opt/websites/nav/assets/images/logos/${randNumber}.jpg /opt/websites/homepage/assets/img/logo.jpg -rf
cp /opt/websites/nav/assets/images/logos/${randNumber}.jpg /opt/source-code/blog/img/avatar.jpg -rf
cp /opt/websites/nav/assets/images/logos/${randNumber}.jpg /opt/websites/nav/assets/images/logos/avatar.jpg -rf
jekyll b -s /opt/source-code/blog/ -d /opt/websites/blog/
# update bash
rm -rf /opt/websites/just-the-docs/bash
jekyll b -s /opt/source-code/document/bash -d /opt/websites/just-the-docs/bash
# update python
rm -rf /opt/websites/just-the-docs/python
jekyll b -s /opt/source-code/document/python -d /opt/websites/just-the-docs/python
chown -R www-data:www-data /opt/websites

13
old/color.sh Normal file
View File

@ -0,0 +1,13 @@
#!/bin/bash
nums=(0 1 2 3 4 5 7 8)
for i in ${nums[@]}
do
for j in `seq 30 37`
do
for k in `seq 40 47`
do
echo -e "$i;$j;${k}m -- \e[$i;$j;${k}mHello echo!\e[0m"
done
done
done

Binary file not shown.

After

Width:  |  Height:  |  Size: 162 KiB

167
old/ctc/config.sh Normal file
View File

@ -0,0 +1,167 @@
#!/bin/bash
# set -e
# bug-1: can not find the configuration of live domain
# usage
function usage {
echo -e "${c_bc}获取域名详细配置或者对比两个域名的配置异同:${c_e}"
echo -e " config -c domain"
echo -e " config -d domain_1 domain_2\n"
exit 100
}
function onCtrlC () {
# while capture Ctrl+C, kill all background processes silently and exit
exec 3>&2 # 3 is now a copy of 2
exec 2> /dev/null # 2 now points to /dev/null
kill ${bg_pids} ${progress_pid} >/dev/null 2>&1
sleep 1 # sleep to wait for process to die
exec 2>&3 # restore stderr to saved
exec 3>&- # close saved version
echo
echo -e "${c_bir}Ctrl+C is captured, exiting...\n${c_e}"
exit 1
}
function get_config {
# 判断要查询的域名是否在平台domain.list文件每小时更新一次 -- task.sh
res=`cat $data/domain.list | grep -w "$domain"`
if [[ $res == '' ]]; then
echo -e "${c_br}该域名[$domain]未在天翼平台配置,一个小时内新增的域名无法查询,退出...${c_e}"
echo -e "${c_br}只接收一个参数,请输入要查询的域名\ne.g. infos www.ctyun.cn\n${c_e}"
exit 247
fi
# ----------------------------------------------------------------------------------------
# 获取域名信息 - part 1
curl "https://dcp.ctcdn.cn/traffic-control-api/v2/access/list?access_version_id=1&domain=$domain" -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjYzfQ.XZ_nNtRWoBRcPBM-bHAG_ciohkNh69n4AVHwV755r6Q' -H 'content-type: application/json;charset=UTF-8' -vo domain_info_1.log > domain_info_1.response 2>&1
# 判断响应是否200
cat $trash/domain_info_1.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
[[ $? -ne 0 ]] && { echo -e "${c_br}获取域名信息-part 1失败退出...${c_e}"; exit 246; }
python3 /usr/local/script/fanmf11/get_infos.py --map_info domain_info_1.log $domain
[[ $? -ne 0 ]] && { echo -e "${c_br}处理域名-part 1信息失败退出...${c_e}"; exit 242; }
accid=`cat info.log | awk -F ':' '$1==3 {print $2}'`
# ----------------------------------------------------------------------------------------
# 获取域名信息 -- CDN
curl "https://confmanager.ctcdn.cn/v1/domain/conf/query_union?precise_query=1&domain=$domain" -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjQxNn0.KEFh_yg3J1y8sL7s3X_8jIR8GGq88A89b7J5YhVYVf8' -H 'content-type: application/json;charset=UTF-8' -vo domain_info_4.log > domain_info_4.response 2>&1
# 判断响应是否200
cat $trash/domain_info_4.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
[[ $? -ne 0 ]] && { echo -e "${c_br}获取域名信息-part 4失败退出...${c_e}"; exit 243; }
python3 /usr/local/script/fanmf11/get_infos.py --domain_config_cdn domain_info_4.log $accid $domain
r_code=$?
if [[ $r_code -eq 204 ]]; then
# ----------------------------------------------------------------------------------------
# 获取域名信息 - live
curl "https://lasc-new.ctcdn.cn/v1/domain/conf/op_query_union?page_size=20&page=1&domain=$domain" -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjE4NH0.1Pgfnky-grT0MN2ic04PqMf32BdnF6iN_PMk3kNOzjY' -H 'content-type: application/json;charset=UTF-8' -vo domain_info_5.log > domain_info_5.response 2>&1
# 判断响应是否200
cat $trash/domain_info_5.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
[[ $? -ne 0 ]] && { echo -e "${c_br}获取域名信息-part 5失败退出...${c_e}"; exit 235; }
python3 /usr/local/script/fanmf11/get_infos.py --domain_info_5 domain_info_5.log $accid
[[ $? -ne 0 ]] && { echo -e "${c_br}处理域名-part 5信息失败退出...${c_e}"; exit 237; }
# ----------------------------------------------------------------------------------------
# 获取域名信息 - part 6
domain_id=`cat info.log | awk -F ':' '$1==4 {print $2}'`
curl "https://lasc-new.ctcdn.cn/v1/domain/internal/domain_config?domain_id=$domain_id" -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjE4NH0.1Pgfnky-grT0MN2ic04PqMf32BdnF6iN_PMk3kNOzjY' -H 'content-type: application/json;charset=UTF-8' -vo domain_info_6.log > domain_info_6.response 2>&1
# 判断响应是否200
cat $trash/domain_info_6.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
[[ $? -ne 0 ]] && { echo -e "${c_br}获取域名信息-part 6失败退出...${c_e}"; exit 238; }
python3 /usr/local/script/fanmf11/get_infos.py --domain_config_live domain_info_6.log $domain
[[ $? -ne 0 ]] && { echo -e "${c_br}处理域名-part 6信息失败退出...${c_e}"; exit 236; }
elif [[ $r_code -ne 0 ]]; then
echo -e "${c_br}处理域名-part 4信息失败退出...${c_e}"
exit 239
else
:
fi
}
# Self defined color shortcut
c_br='\e[1;31m' # bold red
c_bg='\e[1;32m' # bold green
c_by='\e[1;33m' # bold yellow
c_bb='\e[1;34m' # bold blue
c_bp='\e[1;35m' # bold purple
c_bc='\e[1;36m' # bold cyan
c_bir='\e[1;3;31m' # bold italic red
c_bib='\e[1;3;34m' # bold italic blue
c_bic='\e[1;3;36m' # bold italic cyan
c_e='\e[0m' # reset
# some initialization
stty erase '^H' # allow backspace
data='/usr/local/script/fanmf11/data'
toolbox='/usr/local/script/fanmf11/'
OP="prefix "$@
dash=`echo $OP | awk '{print $2}'`
first=`echo $OP | awk '{print $3}'`
second=`echo $OP | awk '{print $4}'`
flg=1 # signify if rip is acquired successfully or not, 0 - OK and 1 -NG
TS=`date +%s%N`
host=`whoami`
trash="/usr/local/script/fanmf11/trash/$host/$TS"
if [[ -d $trash ]]; then
echo -e "${c_br}对于同一个用户,同一时间只能运行一个实例,请重新运行...${c_e}"
exit 245
else
mkdir -p $trash
cd $trash && cd ..
docs=`ls`
for doc in $docs; do
[[ -f $doc ]] && rm -rf $doc
done
folders=`ls -t`
while [[ `echo $folders | awk '{print NF}'` -gt 29 ]]; do
folder=`ls -t | tail -1`
rm -rf $folder
folders=`ls -t`
done
cd $trash && touch config
fi
# set a trap for Ctrl+C
trap 'onCtrlC' INT
if [[ $# -eq 2 && $dash == '-c' ]]; then
domain=$first
get_config
exec 3>&2 && exec 2> log.json
cat $domain | tr -d ' ' | sed 's/"{/{/g' | sed 's/}"/}/g' | sed 's/\\"/"/g' | sed 's/\\n/ /g' | sed 's/\\t//g'| jq -r . | awk -f $toolbox/reformat.awk | jq -r . > log.json 2>&1
cat log.json | grep -q 'parse error'
[[ $? -eq 0 ]] && { cat $domain | jq -r .; } || { cat log.json | jq -r .; }
exec 2>&3 && exec 3>&-
elif [[ $# -eq 3 && $dash == '-d' ]]; then
domain=$first
get_config
exec 3>&2 && exec 2> log.json
cat $domain | tr -d ' ' | sed 's/"{/{/g' | sed 's/}"/}/g' | sed 's/\\"/"/g' | sed 's/\\n/ /g' | sed 's/\\t//g'| jq -S . | awk -f $toolbox/reformat.awk | jq -S . > log.json 2>&1
cat log.json | grep -q 'parse error'
[[ $? -eq 0 ]] && { cat $domain | jq -S -f $toolbox/normalize.jq > first.json; }
jq -S -f $toolbox/normalize.jq log.json > first.json
exec 2>&3 && exec 3>&-
domain=$second
get_config
exec 3>&2 && exec 2> log.json
cat $domain | tr -d ' ' | sed 's/"{/{/g' | sed 's/}"/}/g' | sed 's/\\"/"/g' | sed 's/\\n/ /g' | sed 's/\\t//g'| jq -S . | awk -f $toolbox/reformat.awk | jq -S . > log.json 2>&1
cat log.json | grep -q 'parse error'
[[ $? -eq 0 ]] && { cat $domain | jq -S -f $toolbox/normalize.jq > second.json; }
jq -S -f $toolbox/normalize.jq log.json > second.json
exec 2>&3 && exec 3>&-
jaydiff --json --indent=' ' --slice-myers first.json second.json
echo -e "${c_bic}此对比结果仅供参考由于不同结构的JSON数据语义可能是相同的。${c_by}可以仔细对比下显示不同的部分,有可能是因为结构不同造成的。${c_bic}另外可以用JSON在线对比工具做进一步检查如下文件${c_e}"
echo -e "${c_bib} `pwd`/first.json${c_e}"
echo -e "${c_bib} `pwd`/second.json${c_e}\n"
else
usage
fi

3
old/ctc/dist/get_infos.py vendored Normal file

File diff suppressed because one or more lines are too long

483
old/ctc/dist/pytransform/__init__.py vendored Normal file
View File

@ -0,0 +1,483 @@
# These module alos are used by protection code, so that protection
# code needn't import anything
import os
import platform
import sys
import struct
# Because ctypes is new from Python 2.5, so pytransform doesn't work
# before Python 2.5
#
from ctypes import cdll, c_char, c_char_p, c_int, c_void_p, \
pythonapi, py_object, PYFUNCTYPE, CFUNCTYPE
from fnmatch import fnmatch
#
# Support Platforms
#
plat_path = 'platforms'
plat_table = (
('windows', ('windows', 'cygwin*')),
('darwin', ('darwin',)),
('ios', ('ios',)),
('linux', ('linux*',)),
('freebsd', ('freebsd*', 'openbsd*', 'isilon onefs')),
('poky', ('poky',)),
)
arch_table = (
('x86', ('i?86', )),
('x86_64', ('x64', 'x86_64', 'amd64', 'intel')),
('arm', ('armv5',)),
('armv6', ('armv6l',)),
('armv7', ('armv7l',)),
('ppc64', ('ppc64le',)),
('mips32', ('mips',)),
('aarch32', ('aarch32',)),
('aarch64', ('aarch64', 'arm64'))
)
#
# Hardware type
#
HT_HARDDISK, HT_IFMAC, HT_IPV4, HT_IPV6, HT_DOMAIN = range(5)
#
# Global
#
_pytransform = None
class PytransformError(Exception):
pass
def dllmethod(func):
def wrap(*args, **kwargs):
return func(*args, **kwargs)
return wrap
@dllmethod
def version_info():
prototype = PYFUNCTYPE(py_object)
dlfunc = prototype(('version_info', _pytransform))
return dlfunc()
@dllmethod
def init_pytransform():
major, minor = sys.version_info[0:2]
# Python2.5 no sys.maxsize but sys.maxint
# bitness = 64 if sys.maxsize > 2**32 else 32
prototype = PYFUNCTYPE(c_int, c_int, c_int, c_void_p)
init_module = prototype(('init_module', _pytransform))
ret = init_module(major, minor, pythonapi._handle)
if (ret & 0xF000) == 0x1000:
raise PytransformError('Initialize python wrapper failed (%d)'
% (ret & 0xFFF))
return ret
@dllmethod
def init_runtime():
prototype = PYFUNCTYPE(c_int, c_int, c_int, c_int, c_int)
_init_runtime = prototype(('init_runtime', _pytransform))
return _init_runtime(0, 0, 0, 0)
@dllmethod
def encrypt_code_object(pubkey, co, flags, suffix=''):
_pytransform.set_option(6, suffix.encode())
prototype = PYFUNCTYPE(py_object, py_object, py_object, c_int)
dlfunc = prototype(('encrypt_code_object', _pytransform))
return dlfunc(pubkey, co, flags)
@dllmethod
def generate_license_key(prikey, keysize, rcode):
prototype = PYFUNCTYPE(py_object, c_char_p, c_int, c_char_p)
dlfunc = prototype(('generate_license_key', _pytransform))
return dlfunc(prikey, keysize, rcode) if sys.version_info[0] == 2 \
else dlfunc(prikey, keysize, rcode.encode())
@dllmethod
def get_registration_code():
prototype = PYFUNCTYPE(py_object)
dlfunc = prototype(('get_registration_code', _pytransform))
return dlfunc()
@dllmethod
def get_expired_days():
prototype = PYFUNCTYPE(py_object)
dlfunc = prototype(('get_expired_days', _pytransform))
return dlfunc()
@dllmethod
def clean_obj(obj, kind):
prototype = PYFUNCTYPE(c_int, py_object, c_int)
dlfunc = prototype(('clean_obj', _pytransform))
return dlfunc(obj, kind)
def clean_str(*args):
tdict = {
'str': 0,
'bytearray': 1,
'unicode': 2
}
for obj in args:
k = tdict.get(type(obj).__name__)
if k is None:
raise RuntimeError('Can not clean object: %s' % obj)
clean_obj(obj, k)
def get_hd_info(hdtype, name=None):
if hdtype not in range(HT_DOMAIN + 1):
raise RuntimeError('Invalid parameter hdtype: %s' % hdtype)
size = 256
t_buf = c_char * size
buf = t_buf()
cname = c_char_p(0 if name is None
else name.encode('utf-8') if hasattr('name', 'encode')
else name)
if (_pytransform.get_hd_info(hdtype, buf, size, cname) == -1):
raise PytransformError('Get hardware information failed')
return buf.value.decode()
def show_hd_info():
return _pytransform.show_hd_info()
def assert_armored(*names):
prototype = PYFUNCTYPE(py_object, py_object)
dlfunc = prototype(('assert_armored', _pytransform))
def wrapper(func):
def wrap_execute(*args, **kwargs):
dlfunc(names)
return func(*args, **kwargs)
return wrap_execute
return wrapper
def check_armored(*names):
try:
prototype = PYFUNCTYPE(py_object, py_object)
prototype(('assert_armored', _pytransform))(names)
return True
except RuntimeError:
return False
def get_license_info():
info = {
'ISSUER': None,
'EXPIRED': None,
'HARDDISK': None,
'IFMAC': None,
'IFIPV4': None,
'DOMAIN': None,
'DATA': None,
'CODE': None,
}
rcode = get_registration_code().decode()
if rcode.startswith('*VERSION:'):
index = rcode.find('\n')
info['ISSUER'] = rcode[9:index].split('.')[0].replace('-sn-1.txt', '')
rcode = rcode[index+1:]
index = 0
if rcode.startswith('*TIME:'):
from time import ctime
index = rcode.find('\n')
info['EXPIRED'] = ctime(float(rcode[6:index]))
index += 1
if rcode[index:].startswith('*FLAGS:'):
index += len('*FLAGS:') + 1
info['FLAGS'] = ord(rcode[index - 1])
prev = None
start = index
for k in ['HARDDISK', 'IFMAC', 'IFIPV4', 'DOMAIN', 'FIXKEY', 'CODE']:
index = rcode.find('*%s:' % k)
if index > -1:
if prev is not None:
info[prev] = rcode[start:index]
prev = k
start = index + len(k) + 2
info['CODE'] = rcode[start:]
i = info['CODE'].find(';')
if i > 0:
info['DATA'] = info['CODE'][i+1:]
info['CODE'] = info['CODE'][:i]
return info
def get_license_code():
return get_license_info()['CODE']
def get_user_data():
return get_license_info()['DATA']
def _match_features(patterns, s):
for pat in patterns:
if fnmatch(s, pat):
return True
def _gnu_get_libc_version():
try:
prototype = CFUNCTYPE(c_char_p)
ver = prototype(('gnu_get_libc_version', cdll.LoadLibrary('')))()
return ver.decode().split('.')
except Exception:
pass
def format_platform(platid=None):
if platid:
return os.path.normpath(platid)
plat = platform.system().lower()
mach = platform.machine().lower()
for alias, platlist in plat_table:
if _match_features(platlist, plat):
plat = alias
break
if plat == 'linux':
cname, cver = platform.libc_ver()
if cname == 'musl':
plat = 'musl'
elif cname == 'libc':
plat = 'android'
elif cname == 'glibc':
v = _gnu_get_libc_version()
if v and len(v) >= 2 and (int(v[0]) * 100 + int(v[1])) < 214:
plat = 'centos6'
for alias, archlist in arch_table:
if _match_features(archlist, mach):
mach = alias
break
if plat == 'windows' and mach == 'x86_64':
bitness = struct.calcsize('P'.encode()) * 8
if bitness == 32:
mach = 'x86'
return os.path.join(plat, mach)
# Load _pytransform library
def _load_library(path=None, is_runtime=0, platid=None, suffix='', advanced=0):
path = os.path.dirname(__file__) if path is None \
else os.path.normpath(path)
plat = platform.system().lower()
for alias, platlist in plat_table:
if _match_features(platlist, plat):
plat = alias
break
name = '_pytransform' + suffix
if plat == 'linux':
filename = os.path.abspath(os.path.join(path, name + '.so'))
elif plat in ('darwin', 'ios'):
filename = os.path.join(path, name + '.dylib')
elif plat == 'windows':
filename = os.path.join(path, name + '.dll')
elif plat in ('freebsd', 'poky'):
filename = os.path.join(path, name + '.so')
else:
filename = None
if platid is not None and os.path.isfile(platid):
filename = platid
elif platid is not None or not os.path.exists(filename) or not is_runtime:
libpath = platid if platid is not None and os.path.isabs(platid) else \
os.path.join(path, plat_path, format_platform(platid))
filename = os.path.join(libpath, os.path.basename(filename))
if filename is None:
raise PytransformError('Platform %s not supported' % plat)
if not os.path.exists(filename):
raise PytransformError('Could not find "%s"' % filename)
try:
m = cdll.LoadLibrary(filename)
except Exception as e:
if sys.flags.debug:
print('Load %s failed:\n%s' % (filename, e))
raise
# Removed from v4.6.1
# if plat == 'linux':
# m.set_option(-1, find_library('c').encode())
if not os.path.abspath('.') == os.path.abspath(path):
m.set_option(1, path.encode() if sys.version_info[0] == 3 else path)
elif (not is_runtime) and sys.platform.startswith('cygwin'):
path = os.environ['PYARMOR_CYGHOME']
m.set_option(1, path.encode() if sys.version_info[0] == 3 else path)
# Required from Python3.6
m.set_option(2, sys.byteorder.encode())
if sys.flags.debug:
m.set_option(3, c_char_p(1))
m.set_option(4, c_char_p(not is_runtime))
# Disable advanced mode by default
m.set_option(5, c_char_p(not advanced))
# Set suffix for private package
if suffix:
m.set_option(6, suffix.encode())
return m
def pyarmor_init(path=None, is_runtime=0, platid=None, suffix='', advanced=0):
global _pytransform
_pytransform = _load_library(path, is_runtime, platid, suffix, advanced)
return init_pytransform()
def pyarmor_runtime(path=None, suffix='', advanced=0):
if _pytransform is not None:
return
try:
pyarmor_init(path, is_runtime=1, suffix=suffix, advanced=advanced)
init_runtime()
except Exception as e:
if sys.flags.debug or hasattr(sys, '_catch_pyarmor'):
raise
sys.stderr.write("%s\n" % str(e))
sys.exit(1)
# ----------------------------------------------------------
# End of pytransform
# ----------------------------------------------------------
#
# Unused
#
@dllmethod
def generate_license_file(filename, priname, rcode, start=-1, count=1):
prototype = PYFUNCTYPE(c_int, c_char_p, c_char_p, c_char_p, c_int, c_int)
dlfunc = prototype(('generate_project_license_files', _pytransform))
return dlfunc(filename.encode(), priname.encode(), rcode.encode(),
start, count) if sys.version_info[0] == 3 \
else dlfunc(filename, priname, rcode, start, count)
#
# Not available from v5.6
#
def generate_capsule(licfile):
prikey, pubkey, prolic = _generate_project_capsule()
capkey, newkey = _generate_pytransform_key(licfile, pubkey)
return prikey, pubkey, capkey, newkey, prolic
@dllmethod
def _generate_project_capsule():
prototype = PYFUNCTYPE(py_object)
dlfunc = prototype(('generate_project_capsule', _pytransform))
return dlfunc()
@dllmethod
def _generate_pytransform_key(licfile, pubkey):
prototype = PYFUNCTYPE(py_object, c_char_p, py_object)
dlfunc = prototype(('generate_pytransform_key', _pytransform))
return dlfunc(licfile.encode() if sys.version_info[0] == 3 else licfile,
pubkey)
#
# Deprecated functions from v5.1
#
@dllmethod
def encrypt_project_files(proname, filelist, mode=0):
prototype = PYFUNCTYPE(c_int, c_char_p, py_object, c_int)
dlfunc = prototype(('encrypt_project_files', _pytransform))
return dlfunc(proname.encode(), filelist, mode)
def generate_project_capsule(licfile):
prikey, pubkey, prolic = _generate_project_capsule()
capkey = _encode_capsule_key_file(licfile)
return prikey, pubkey, capkey, prolic
@dllmethod
def _encode_capsule_key_file(licfile):
prototype = PYFUNCTYPE(py_object, c_char_p, c_char_p)
dlfunc = prototype(('encode_capsule_key_file', _pytransform))
return dlfunc(licfile.encode(), None)
@dllmethod
def encrypt_files(key, filelist, mode=0):
t_key = c_char * 32
prototype = PYFUNCTYPE(c_int, t_key, py_object, c_int)
dlfunc = prototype(('encrypt_files', _pytransform))
return dlfunc(t_key(*key), filelist, mode)
@dllmethod
def generate_module_key(pubname, key):
t_key = c_char * 32
prototype = PYFUNCTYPE(py_object, c_char_p, t_key, c_char_p)
dlfunc = prototype(('generate_module_key', _pytransform))
return dlfunc(pubname.encode(), t_key(*key), None)
#
# Compatible for PyArmor v3.0
#
@dllmethod
def old_init_runtime(systrace=0, sysprofile=1, threadtrace=0, threadprofile=1):
'''Only for old version, before PyArmor 3'''
pyarmor_init(is_runtime=1)
prototype = PYFUNCTYPE(c_int, c_int, c_int, c_int, c_int)
_init_runtime = prototype(('init_runtime', _pytransform))
return _init_runtime(systrace, sysprofile, threadtrace, threadprofile)
@dllmethod
def import_module(modname, filename):
'''Only for old version, before PyArmor 3'''
prototype = PYFUNCTYPE(py_object, c_char_p, c_char_p)
_import_module = prototype(('import_module', _pytransform))
return _import_module(modname.encode(), filename.encode())
@dllmethod
def exec_file(filename):
'''Only for old version, before PyArmor 3'''
prototype = PYFUNCTYPE(c_int, c_char_p)
_exec_file = prototype(('exec_file', _pytransform))
return _exec_file(filename.encode())

BIN
old/ctc/dist/pytransform/_pytransform.so vendored Executable file

Binary file not shown.

603
old/ctc/get_infos.py Normal file
View File

@ -0,0 +1,603 @@
#-*- coding:utf-8 -*-
#!/usr/bin/env python3
import json
import sys
import signal
import readline
import os
c_title = '\033[1;4;31;42m' # title color
c_br = '\033[1;31m' # bold red
c_bg = '\033[1;32m' # bold green
c_by = '\033[1;33m' # bold yellow
c_bb = '\033[1;34m' # bold blue
c_bp = '\033[1;35m' # bold purple
c_bc = '\033[1;36m' # bold cyan
c_bir= '\033[1;3;31m' # * bold italic red
c_bib = '\033[1;3;34m' # * bold italic cyan
c_bic = '\033[1;3;36m' # bold italic cyan
c_e = '\033[0m' # reset
def get_parent(parent_log, inp_parent_id):
parent_all = {"dyn_first_parent": "动态一层父", "dyn_first_parent_all": "动态一层父所有", "dyn_first_parent_backups": "动态一层备父", "dyn_second_parent": "动态二层父", "first_parent": "一层父", "first_parent_backups": "一层备父", "pre_first_parent": "预部署一层父", "pre_first_parent_backups": "预部署一层备父", "pre_second_parent": "预部署二层父", "pre_second_parent_backups": "预部署二层备父", "second_parent": "二层父", "second_parent_backups": "二层备父"}
parent_related = {}
with open(parent_log) as obj_parent:
parents=json.loads(obj_parent.read())
for parent in parents['result']:
if parent['parent_id'] == inp_parent_id:
parent_name = parent['parent_name']
print(f"父方案: {parent_name}")
for parent_en, parent_cn in parent_all.items():
if parent[parent_en] != '':
parent_related[parent[parent_en]] = parent_cn
for parent_en, parent_cn in parent_related.items():
print(f"{parent_cn}: {parent_en}")
break
def get_respool(respool_log, inp_template_id, pool_type):
with open(respool_log) as obj_respool:
respools=json.loads(obj_respool.read())
for respool in respools['result']:
if int(respool['template_id']) == int(inp_template_id):
# print(f"{pool_type}: {respool['template_name']}")
return (f"{pool_type}: {respool['template_name']}")
def domain_info_1(domain_info_log, inp_domain):
with open(domain_info_log) as obj_domain_info:
domain_infos=json.loads(obj_domain_info.read())
# 判断是否是重叠域名
multi = len(domain_infos['data'])
if multi == 0:
print(f"{c_br}未找到该域名相关信息,可以登录网页系统查看是否有配置解析组,退出...{c_e}")
sys.exit(205)
overlap = "" if multi > 1 else ""
inp_index = 1
if multi > 1:
print(f"{c_bp}该域名是重叠域名,请确认要查询域名的归属账号: {c_e}")
index = 1
flag = 0
# 遍历重叠域名的账号邮箱,需要输入确定的序号
for domain_info in domain_infos['data']:
print(f"账号{index} - ", end="")
for find_it in domain_info['domains']:
if find_it['domain'] == inp_domain:
pretty_print3(f"账户: {find_it['account_name']}", f"邮箱: {find_it['email']}", f"accid: {find_it['account_id']}")
flag = 0
break
flag = 1
if flag == 1:
print()
flag = 0
index += 1
print(f"{c_by}请输入要查询域名归属账号的序号(e.g. 1, 2, 3...): {c_e}")
# 验证index是合法输入的逻辑
inp_index = input()
if inp_index.isdigit() and 1 <= int(inp_index) and int(inp_index) < index:
inp_index = int(inp_index)
else:
print(f"{c_br}请输入正确的序号,{c_e}", end="")
sys.exit(200)
inp_index -= 1
inp_index = inp_index if inp_index != 0 else 0
common_cname = len(domain_infos['data'][inp_index]['domains'])
for find_it in range(common_cname):
if domain_infos['data'][inp_index]['domains'][find_it]['domain'] == inp_domain:
break
common_cname = '' if common_cname > 1 else ''
common_cnames = []
for domain in domain_infos['data'][inp_index]['domains']:
common_cnames.append(domain['domain'])
account = domain_infos['data'][inp_index]['domains'][find_it]['account_name']
account_id = domain_infos['data'][inp_index]['domains'][find_it]['account_id']
access_id = domain_infos['data'][inp_index]['domains'][find_it]['access_id']
email = domain_infos['data'][inp_index]['domains'][find_it]['email']
cname = domain_infos['data'][inp_index]['cname']
cname_vendor = domain_infos['data'][inp_index]['access_vendor_cname']
parse_group = domain_infos['data'][inp_index]['parse_group_name']
with open("info.log", 'w', encoding='utf-8') as obj_info:
obj_info.write(f"1:{account}\n")
obj_info.write(f"2:{email}\n")
obj_info.write(f"3:{account_id}\n")
obj_info.write(f"4:{access_id}\n")
pretty_print3(f"账户: {account}", f"邮箱: {email}", f"accId: {account_id}")
pretty_print3(f"Map: {parse_group}", f"accessId: {access_id}", f"重叠域名: {overlap}")
pretty_print3(f"合作方: {cname_vendor}", f"CNAME: {cname}", f"是否共享CNAME缓存: {common_cname}")
if common_cname == '':
print(f"共享CNAME缓存域名列表: {common_cnames}")
if parse_group == '':
sys.exit(201)
def domain_info_2(domain_info_log, inp_accid):
with open(domain_info_log) as obj_domain_info:
domain_infos=json.loads(obj_domain_info.read())
# 遍历账号名称相同的客户使用request id确定唯一的信息
for acc_info in domain_infos['results']['items']:
if acc_info['accountId'] == inp_accid:
managerArea = acc_info['managerArea']
platformVipLevel = acc_info['platformVipLevel']
businessLevel = acc_info['businessLevel']
ctYunVipLevel = acc_info['ctYunVipLevel']
clientId = acc_info['clientId']
accountType = acc_info['accountType']
clientInsideName = acc_info['clientInsideName']
maintainAfterName = acc_info['maintainAfterName']
maintainAfterPhone = acc_info['maintainAfterPhone']
maintainAfterEmail = acc_info['maintainAfterEmail']
managerVendor = acc_info['managerVendor']
pretty_print3(f"售后姓名: {maintainAfterName}", f"售后电话: {maintainAfterPhone}", f"售后邮箱: {maintainAfterEmail}")
pretty_print3(f"天翼云VIP等级: {ctYunVipLevel}", f"平台VIP等级: {platformVipLevel}", f"客户VIP等级: {businessLevel}")
pretty_print3(f"clientId: {clientId}", f"客户内部名称: {clientInsideName}", f"商务渠道: {managerArea}")
pretty_print2(f"承载平台: {managerVendor}", f"客户类型: {accountType}")
break
def domain_info_3(domain_info_log, inp_accid):
with open(domain_info_log) as obj_domain_info:
domain_infos=json.loads(obj_domain_info.read())
# 遍历重叠域名使用request id确定唯一的信息
for domain_info in domain_infos['data']['results']:
if domain_info['accountId'] == inp_accid:
statusName = domain_info['statusName']
ipv6Switch = domain_info['ipv6Switch']
productName = domain_info['productName']
innerTestDomain = domain_info['innerTestDomain']
ipv6Switch = '' if ipv6Switch == 1 else ''
innerTestDomain = '' if innerTestDomain == 1 else ''
pretty_print2(f"域名状态: {statusName}", f"是否开启IPv6: {ipv6Switch}")
pretty_print2(f"是否内部测试域名: {innerTestDomain}", f"产品类型: {productName}")
break
def domain_info_4(domain_info_log, inp_accid):
with open(domain_info_log) as obj_domain_info:
domain_infos=json.loads(obj_domain_info.read())
# 遍历账号名称相同的客户使用request id确定唯一的信息
if len(domain_infos['result']) == 0:
sys.exit(204)
else:
for domain_info in domain_infos['result']:
if domain_info['account_id'] == inp_accid:
# 1. 回源地址
origin = []
for ori in domain_info['origin']:
origin.append(ori['role'] + ':' + ori['origin'])
# 2. 访问协议 + 端口
http_visit = domain_info['basic_conf']['http_server_port'] if domain_info['http_status'] == 'on' else 'X'
https_visit = domain_info['basic_conf']['https_server_port'] if domain_info['https_status'] == 'on' else 'X'
url_visit = str(http_visit) + '/' + str(https_visit)
# 3. 回源协议 + 端口
https_origin = str(domain_info['basic_conf']['https_origin_port'])
http_origin = str(domain_info['basic_conf']['http_origin_port'])
if domain_info['backorigin_protocol'] == 'follow_request':
url_origin = http_origin + '/' + https_origin
elif domain_info['backorigin_protocol'] == 'http':
url_origin = http_origin + '/X'
elif domain_info['backorigin_protocol'] == 'https':
url_origin = 'X/' + https_origin
else:
print("回源协议除了http/https/follow_request之外还有第四种方式请补充...")
sys.exit(201)
# 4. 证书备注名
cert_name = domain_info['cert_name']
# 6. 预部署资源池
pre_node_list = domain_info['pre_node_list']
off_pool = get_respool("respool.log", pre_node_list, '预部署资源池')
# 7. 全局资源池
node_list = domain_info['node_list']
on_pool = get_respool("respool.log", node_list, '全局资源池')
# 8. 是否热加载
conf_order_id = domain_info['conf_order_id']
conf_order_id = '' if conf_order_id == -1 else ''
pretty_print2(f"证书备注名: {cert_name}", f"热加载: {conf_order_id}")
pretty_print2(off_pool, on_pool)
print(f"回源地址: {origin}")
print(f"http/https访问: {url_visit}")
print(f"http/https回源: {url_origin}")
# 5. 父方案 parent_id
parent_id = domain_info['parent_id']
get_parent("parent.log", parent_id)
break
def domain_info_5(domain_info_log, inp_accid):
with open(domain_info_log) as obj_domain_info:
domain_infos=json.loads(obj_domain_info.read())
# 遍历账号名称相同的客户使用request id确定唯一的信息
for domain_info in domain_infos['result']:
if domain_info['account_id'] == inp_accid:
with open("info.log", 'w', encoding='utf-8') as obj_info:
obj_info.write(f"4:{domain_info['domain_id']}\n")
break
# 如下accid没用到
def domain_info_6(domain_info_log, inp_accid):
with open(domain_info_log) as obj_domain_info:
domain_info=json.loads(obj_domain_info.read())['result']
# 推拉流模式
push_stream_domain = ''
pull_stream_mode = domain_info['base_conf']['pull_stream_mode']
if pull_stream_mode == 0:
pull_stream_mode = "直播拉流(推拉流)"
push_stream_domain = domain_info['base_conf']['push_stream_domain']
elif pull_stream_mode == 1:
pull_stream_mode = "直播拉流(回源拉流)"
else:
pull_stream_mode = "直播推流"
# 证书备注名
if domain_info['protocol_control']['https_switch'] == 1:
cert_name = domain_info['protocol_control']['cert_name']
else:
cert_name = '无绑定证书'
pretty_print3(f"推拉流模式: {pull_stream_mode}", f"推流域名: {push_stream_domain}", f"证书备注名: {cert_name}")
# 预部署资源池
pre_node_list = domain_info['pre_resouce_id']
off_pool = get_respool("respool.log", pre_node_list, '预部署资源池')
# 全局资源池
node_list = domain_info['resouce_id']
on_pool = get_respool("respool.log", node_list, '全局资源池')
pretty_print2(off_pool, on_pool)
# 回源模式
origin_mode = domain_info['base_conf']['origin_mode']
for mode in origin_mode:
print(f"回源模式: {mode}")
mode_desc = domain_info['base_conf'][f'{mode}_origin']
for ori in mode_desc:
for k, v in ori.items():
if v != '':
print(f"{k}: {v}")
# 父方案 parent_id
parent_id = domain_info['parent_id']
get_parent("parent.log", parent_id)
def domain_map_info(domain_map_log, flg):
with open(domain_map_log) as obj_domain_map_log:
map_info=json.loads(obj_domain_map_log.read())
# 判断是否是重叠域名
parse_detail=map_info['parse_detail']
if int(flg) == 0:
print('------------------------------分区域解析------------------------------')
for item in parse_detail:
pretty_print3(item['area_cnname'], item['type'], item['value'], 1)
# write to file here
print('----------------------------------------------------------------------')
else:
with open('map.log', 'w') as obj_map_log:
for item in parse_detail:
obj_map_log.write(f"{item['value']}\n")
def map_info(map_info_log, inp_domain):
with open(map_info_log) as obj_map_info:
map_infos=json.loads(obj_map_info.read())
# 判断是否是重叠域名
multi = len(map_infos['data'])
if multi == 0:
print(f"{c_br}未找到该域名相关信息,可以登录网页系统查看是否有配置解析组,退出...{c_e}")
sys.exit(205)
inp_index = 1
if multi > 1:
print(f"{c_bp}该域名是重叠域名,请确认要查询域名的归属账号: {c_e}")
index = 1
flag = 0
# 遍历重叠域名的账号邮箱,需要输入确定的序号
for map_info in map_infos['data']:
print(f"账号{index} - ", end="")
for find_it in map_info['domains']:
if find_it['domain'] == inp_domain:
pretty_print3(f"账户: {find_it['account_name']}", f"邮箱: {find_it['email']}", f"accid: {find_it['account_id']}")
flag = 0
break
flag = 1
if flag == 1:
print()
flag = 0
index += 1
print(f"{c_by}请输入要查询域名归属账号的序号(e.g. 1, 2, 3...): {c_e}")
# 验证index是合法输入的逻辑
inp_index = input()
if inp_index.isdigit() and 1 <= int(inp_index) and int(inp_index) < index:
inp_index = int(inp_index)
else:
print(f"{c_br}请输入正确的序号,{c_e}", end="")
sys.exit(200)
inp_index -= 1
inp_index = inp_index if inp_index != 0 else 0
parse_group = map_infos['data'][inp_index]['parse_group_name']
common_cname = len(map_infos['data'][inp_index]['domains'])
for find_it in range(common_cname):
if map_infos['data'][inp_index]['domains'][find_it]['domain'] == inp_domain:
break
account_id = map_infos['data'][inp_index]['domains'][find_it]['account_id']
access_id = map_infos['data'][inp_index]['domains'][find_it]['access_id']
with open("info.log", 'w', encoding='utf-8') as obj_info:
obj_info.write(f"3:{account_id}\n")
obj_info.write(f"4:{access_id}\n")
if parse_group != '':
with open("map.log", 'w', encoding='utf-8') as obj_map:
obj_map.write(f"{parse_group}\n")
else:
sys.exit(201)
def domain_config_cdn(domain_info_log, inp_accid, domain):
with open(domain_info_log) as obj_domain_info:
domain_infos=json.loads(obj_domain_info.read())
# 遍历账号名称相同的客户使用request id确定唯一的信息
if len(domain_infos['result']) == 0:
sys.exit(204)
else:
for domain_info in domain_infos['result']:
config_json = json.dumps(domain_info)
os.environ['config_json'] = config_json
os.environ['domain_json'] = domain
if domain_info['account_id'] == inp_accid:
os.system("echo $config_json > $domain_json")
break
def domain_config_live(domain_info_log, domain):
with open(domain_info_log) as obj_domain_info:
domain_info=json.loads(obj_domain_info.read())['result']
config_json = json.dumps(domain_info)
os.environ['config_json'] = config_json
os.environ['domain_json'] = domain
os.system("echo $config_json > $domain_json")
def parent_info_4(domain_info_log, inp_accid):
with open(domain_info_log) as obj_domain_info:
domain_infos=json.loads(obj_domain_info.read())
# 遍历账号名称相同的客户使用request id确定唯一的信息
if len(domain_infos['result']) == 0:
sys.exit(204)
else:
for domain_info in domain_infos['result']:
if domain_info['account_id'] == inp_accid:
# 5. 父方案 parent_id
parent_id = domain_info['parent_id']
get_parent_info("parent.log", parent_id)
break
def parent_info_5(domain_info_log, inp_accid):
with open(domain_info_log) as obj_domain_info:
domain_infos=json.loads(obj_domain_info.read())
# 遍历账号名称相同的客户使用request id确定唯一的信息
for domain_info in domain_infos['result']:
if domain_info['account_id'] == inp_accid:
with open("info.log", 'w', encoding='utf-8') as obj_info:
obj_info.write(f"2:{domain_info['domain_id']}\n")
break
# 如下accid没用到
def parent_info_6(domain_info_log, inp_accid):
with open(domain_info_log) as obj_domain_info:
domain_info=json.loads(obj_domain_info.read())['result']
# 父方案 parent_id
parent_id = domain_info['parent_id']
get_parent_info("parent.log", parent_id)
def get_parent_info(parent_log, inp_parent_id):
parent_all = ["dyn_first_parent", "dyn_first_parent_all", "dyn_first_parent_backups", "dyn_second_parent", "first_parent", "first_parent_backups", "pre_first_parent", "pre_first_parent_backups", "pre_second_parent", "pre_second_parent_backups", "second_parent", "second_parent_backups"]
parent_related = {}
with open(parent_log) as obj_parent:
parents=json.loads(obj_parent.read())
for parent in parents['result']:
if parent['parent_id'] == inp_parent_id:
parent_name = parent['parent_name']
index = 1
for parent_en in parent_all:
if parent[parent_en] != '':
with open("cmap", 'a', encoding='utf-8') as obj_cmap:
obj_cmap.write(f"{index}. {parent[parent_en]}\n")
index += 1
break
def quit(signum, frame):
print("Bye!")
sys.exit(205)
def pretty_print2(col_1, col_2):
len_1 = len(col_1)
len_2 = len(col_2)
len_1_utf8 = len(col_1.encode('utf-8'))
len_2_utf8 = len(col_2.encode('utf-8'))
size_1 = 48 - int((len_1_utf8 - len_1) / 2)
size_2 = 40 - int((len_2_utf8 - len_2) / 2)
print(f"%-{size_1}s%-{size_2}s" % (col_1, col_2))
def pretty_print3(col_1, col_2, col_3, col_4=0):
len_1 = len(col_1)
len_2 = len(col_2)
len_3 = len(col_3)
len_1_utf8 = len(col_1.encode('utf-8'))
len_2_utf8 = len(col_2.encode('utf-8'))
len_3_utf8 = len(col_3.encode('utf-8'))
size_1 = 48- int((len_1_utf8 - len_1) / 2)
size_2 = 40 - int((len_2_utf8 - len_2) / 2)
size_3 = 30 - int((len_2_utf8 - len_2) / 2)
if col_4 == 0:
print(f"%-{size_1}s%-{size_2}s%-{size_3}s" % (col_1, col_2, col_3))
else:
size_1 = 16- int((len_1_utf8 - len_1) / 2)
size_2 = 10 - int((len_2_utf8 - len_2) / 2)
size_3 = 60 - int((len_2_utf8 - len_2) / 2)
print(f"%-{size_1}s%-{size_2}s%-{size_3}s" % (col_1, col_2, col_3))
def pretty_print_data(width: list, cols: list):
for i in range(len(cols)):
len_text = len(cols[i])
len_utf8 = len(cols[i].encode('utf-8'))
len_size = width[i] - int((len_utf8 - len_text) / 2)
if i == 8:
if float(cols[i]) < 10:
print(f"{c_br}%-{len_size}s{c_e}" % (cols[i]), end='')
elif float(cols[i]) < 30:
print(f"{c_by}%-{len_size}s{c_e}" % (cols[i]), end='')
else:
print(f"{c_bg}%-{len_size}s{c_e}" % (cols[i]), end='')
else:
print(f"%-{len_size}s" % (cols[i]), end='')
print()
def pretty_print_title(width: list, cols: list):
for i in range(len(cols)):
len_text = len(cols[i])
len_utf8 = len(cols[i].encode('utf-8'))
len_size = width[i] - int((len_utf8 - len_text) / 2)
print(f"{c_title}%-{len_size}s{c_e}" % (cols[i]), end='')
print()
def fmt_print_global(res_map):
title = ["大区", "省份", "View", "组名", "VIP", "LAKE", "压测带宽", "实时带宽", "冗余带宽", "昨晚高峰", "昨中高峰"]
width = [5, 18, 25, 25, 32, 15, 10, 10, 10, 10, 10]
pretty_print_title(width, title)
with open(res_map) as obj_res_map:
lines = obj_res_map.readlines()
count = 1
for line in lines:
pretty_print_data(width, line.strip().split())
count += 1
if count % 25 == 0:
pretty_print_title(width, title)
def fmt_print_partial(res_map, view, query, domain, domain_map):
if os.path.getsize(view):
title = ["大区", "省份", "View", "组名", "VIP", "LAKE", "压测带宽", "实时带宽", "冗余带宽", "昨晚高峰", "昨中高峰"]
width = [5, 18, 25, 25, 32, 15, 10, 10, 10, 10, 10]
pretty_print_title(width, title)
with open(res_map) as obj_res_map, open(view) as obj_view:
views = obj_view.readlines()
lines = obj_res_map.readlines()
count = 1
for view_s in views:
for line in lines:
c_line = line.strip().split()
if c_line[2] == view_s.strip():
pretty_print_data(width, c_line)
count += 1
if count % 25 == 0:
pretty_print_title(width, title)
if count == 1:
print(f"{c_br}域名{domain}的解析组{domain_map}中,不存在{query}地区的覆盖节点,请确认。{c_e}\n")
sys.exit(206)
else:
print(f"{c_br}请按照规则,输入正确的查询条件,退出...{c_e}")
sys.exit(202)
def main():
option = sys.argv[1]
if option == '--domain_info_1':
domain_info_log = sys.argv[2]
inp_domain = sys.argv[3]
domain_info_1(domain_info_log, inp_domain)
elif option == '--domain_info_2':
domain_info_log = sys.argv[2]
inp_accid = sys.argv[3]
domain_info_2(domain_info_log, inp_accid)
elif option == '--domain_info_3':
domain_info_log = sys.argv[2]
inp_accid = sys.argv[3]
domain_info_3(domain_info_log, inp_accid)
elif option == '--domain_info_4':
domain_info_log = sys.argv[2]
inp_accid = sys.argv[3]
domain_info_4(domain_info_log, inp_accid)
elif option == '--domain_info_5':
domain_info_log = sys.argv[2]
inp_accid = sys.argv[3]
domain_info_5(domain_info_log, inp_accid)
elif option == '--domain_info_6':
domain_info_log = sys.argv[2]
inp_accid = sys.argv[3]
domain_info_6(domain_info_log, inp_accid)
elif option == '--domain_map_info':
domain_map_log = sys.argv[2]
flg = sys.argv[3]
domain_map_info(domain_map_log, flg)
elif option == '--map_info':
map_info_log = sys.argv[2]
inp_accid = sys.argv[3]
map_info(map_info_log, inp_accid)
elif option == '--format-global':
res_map = sys.argv[2]
fmt_print_global(res_map)
elif option == '--format-partial':
query = sys.argv[2]
view = sys.argv[3]
res_map = sys.argv[4]
domain = sys.argv[5]
domain_map = sys.argv[6]
fmt_print_partial(res_map, view, query, domain, domain_map)
elif option == '--domain_config_cdn':
domain_info_log = sys.argv[2]
inp_accid = sys.argv[3]
domain = sys.argv[4]
domain_config_cdn(domain_info_log, inp_accid, domain)
elif option == '--domain_config_live':
domain_info_log = sys.argv[2]
domain = sys.argv[3]
domain_config_live(domain_info_log, domain)
elif option == '--parent_info_4':
domain_info_log = sys.argv[2]
inp_accid = sys.argv[3]
parent_info_4(domain_info_log, inp_accid)
elif option == '--parent_info_5':
domain_info_log = sys.argv[2]
inp_accid = sys.argv[3]
parent_info_5(domain_info_log, inp_accid)
elif option == '--parent_info_6':
domain_info_log = sys.argv[2]
inp_accid = sys.argv[3]
parent_info_6(domain_info_log, inp_accid)
if __name__ == "__main__":
signal.signal(signal.SIGINT, quit)
main()

View File

@ -0,0 +1,55 @@
#!/bin/bash
#===================================================================
# Filename : group_chatbot_xibei.sh
# Function :
# Usage :
# Author : Manford Fan
# Date : 2023-05-12 08:59
# Version : Version 0.1
# Disclaimer : The author is NOT responsible for any loss caused
# by the user's own operations.
# And More : If you find there are some bugs in this script
# Or you have better ideas, please do contact me
# via E-mail -- mffan0922@163.com
#===================================================================
function sendMsg() {
# 个人测试
# curl 'https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=ddea3f5f-fbfc-4c21-994a-71e9fc50e4ef' \
# -H 'Content-Type: application/json' \
# -d '
# {
# "msgtype": "markdown",
# "markdown": {
# "content": "**'"$alarmTitle"'**\n
# > <font color=\"warning\">'"$alarmInfo"'</font>"
# }
# }' > /dev/null 2>&1
# 群hook
curl 'https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=5c5f18f1-8494-4f42-b7f1-9ef7295b0578' \
-H 'Content-Type: application/json' \
-d '
{
"msgtype": "markdown",
"markdown": {
"content": "**'"$alarmTitle"'**\n
> <font color=\"warning\">'"$alarmInfo"'</font>"
}
}' > /dev/null 2>&1
}
time_opt=$1
alarmTitle="周报提醒"
if [[ $time_opt == '1' ]]; then
alarmInfo='周四了,请各位及时填写周报~~'
elif [[ $time_opt == '2' ]]; then
alarmInfo='记得写周报,记得写周报,记得写周报~~'
else
:
fi
sendMsg $alarmTitle $alarmInfo

1151
old/ctc/ids.sh Normal file

File diff suppressed because it is too large Load Diff

284
old/ctc/infos.sh Normal file
View File

@ -0,0 +1,284 @@
#!/bin/bash
# 捕获 Ctrl + C 终止整个脚本的运行
function onCtrlC () {
exec 3>&2 # 3 is now a copy of 2
exec 2> /dev/null # 2 now points to /dev/null
kill ${bg_pids} ${progress_pid} >/dev/null 2>&1
sleep 1 # sleep to wait for process to die
exec 2>&3 # restore stderr to saved
exec 3>&- # close saved version
echo
echo -e "${c_bir}Ctrl+C is captured, exiting...\n${c_e}"
exit 100
}
function infos() {
# 判断要查询的域名是否在平台domain.list文件每小时更新一次 -- task.sh
res=`cat $data/domain.list | grep -w "$domain"`
if [[ $res == '' ]]; then
echo -e "${c_br}该域名[$domain]未在天翼平台配置,一个小时内新增的域名无法查询,退出...${c_e}"
echo -e "${c_br}只接收一个参数,请输入要查询的域名\ne.g. infos www.ctyun.cn\n${c_e}"
exit 247
fi
# ----------------------------------------------------------------------------------------
# 获取域名信息 - part 1
curl "https://dcp.ctcdn.cn/traffic-control-api/v2/access/list?access_version_id=1&domain=$domain" -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjYzfQ.XZ_nNtRWoBRcPBM-bHAG_ciohkNh69n4AVHwV755r6Q' -H 'content-type: application/json;charset=UTF-8' -vo domain_info_1.log > domain_info_1.response 2>&1
# 判断响应是否200
cat $trash/domain_info_1.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
[[ $? -ne 0 ]] && { echo -e "${c_br}获取域名-part 1信息失败退出...${c_e}"; exit 246; }
python3 /usr/local/script/fanmf11/get_infos.py --domain_info_1 domain_info_1.log $domain
# python3 /home/fanmf11/fanmf11/get_infos.py --domain_info_1 domain_info_1.log $domain
r_code=$?
if [[ $r_code -eq 205 ]]; then
exit 205
elif [[ $r_code -eq 201 ]]; then
accessid=`cat info.log | awk -F ':' '$1==4 {print $2}'`
curl "https://dcp.ctcdn.cn/traffic-control-api/v2/access/parseDetail?access_id=$accessid" -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjYzfQ.XZ_nNtRWoBRcPBM-bHAG_ciohkNh69n4AVHwV755r6Q' -H 'content-type: application/json;charset=UTF-8' -vo domain_map_info.log > domain_map_info.response 2>&1
# 判断响应是否200
cat $trash/domain_map_info.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
[[ $? -ne 0 ]] && { echo -e "${c_br}获取域名-map信息失败退出...${c_e}"; exit 206; }
python3 /usr/local/script/fanmf11/get_infos.py --domain_map_info domain_map_info.log 0
elif [[ $r_code -ne 0 ]]; then
echo -e "${c_br}处理域名-part 1信息失败退出...${c_e}"
exit 242
fi
# ----------------------------------------------------------------------------------------
# 获取域名信息 - part 2
account=`cat info.log | awk -F ':' '$1==1 {print $2}'`
accid=`cat info.log | awk -F ':' '$1==3 {print $2}'`
curl 'https://bs.ctcdn.cn/api/v3/clientInfo/searchClientInfo' -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjI1NH0.9Zw66R0R85avd92qzO-3KQ4DJ2zYXL4ght5bH41iTfA' -H 'content-type: application/json;charset=UTF-8' -X POST -d '{"clientInfo":[{"key":"clientCnname", "value": "'$account'"}],"columnList":["openTime", "accountType", "accountResource", "accountEmail"]}' -vo domain_info_2.log > domain_info_2.response 2>&1
# 判断响应是否200
cat $trash/domain_info_2.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
[[ $? -ne 0 ]] && { echo -e "${c_br}获取域名信息-part 2失败退出...${c_e}"; exit 245; }
python3 /usr/local/script/fanmf11/get_infos.py --domain_info_2 domain_info_2.log $accid
# python3 /home/fanmf11/fanmf11/get_infos.py --domain_info_2 domain_info_2.log $accid
[[ $? -ne 0 ]] && { echo -e "${c_br}处理域名-part 2信息失败退出...${c_e}"; exit 241; }
# ----------------------------------------------------------------------------------------
# 获取域名信息 - part 3
curl "http://bs.ctcdn.cn/api/v3/manageDomain/list?partner=&sales_channel=&status=&productCode=&domain=$domain" -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjI1NH0.9Zw66R0R85avd92qzO-3KQ4DJ2zYXL4ght5bH41iTfA' -H 'content-type: application/json;charset=UTF-8' -vo domain_info_3.log > domain_info_3.response 2>&1
# 判断响应是否200
cat $trash/domain_info_3.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
[[ $? -ne 0 ]] && { echo -e "${c_br}获取域名信息-part 3失败退出...${c_e}"; exit 244; }
python3 /usr/local/script/fanmf11/get_infos.py --domain_info_3 domain_info_3.log $accid
# python3 /home/fanmf11/fanmf11/get_infos.py --domain_info_3 domain_info_3.log $accid
[[ $? -ne 0 ]] && { echo -e "${c_br}处理域名-part 3信息失败退出...${c_e}"; exit 240; }
# ----------------------------------------------------------------------------------------
# 获取父方案信息
curl 'https://lasc-new.ctcdn.cn/v1/domain/internal/parent_draft' -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjE4NH0.1Pgfnky-grT0MN2ic04PqMf32BdnF6iN_PMk3kNOzjY' -H 'content-type: application/json;charset=UTF-8' -vo parent.log > parent.response 2>&1
# 判断响应是否200
cat $trash/parent.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
[[ $? -ne 0 ]] && { echo -e "${c_br}获取父方案信息失败,退出...${c_e}"; exit 234; }
# ----------------------------------------------------------------------------------------
# 获取资源池信息
curl 'http://rap.ctcdn.cn/v2/rapApi/resourcePoolToResourceGroup' -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjE1N30.IXVEAglOYm8bUInW4uXqDugBnd6POouBK8q4z_HItns' -H 'content-type: application/json;charset=UTF-8' -vo respool.log > respool.response 2>&1
# 判断响应是否200
cat $trash/respool.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
[[ $? -ne 0 ]] && { echo -e "${c_br}获取资源池信息失败,退出...${c_e}"; exit 233; }
# ----------------------------------------------------------------------------------------
# 获取域名信息 - part 4
curl "https://confmanager.ctcdn.cn/v1/domain/conf/query_union?precise_query=1&domain=$domain" -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjQxNn0.KEFh_yg3J1y8sL7s3X_8jIR8GGq88A89b7J5YhVYVf8' -H 'content-type: application/json;charset=UTF-8' -vo domain_info_4.log > domain_info_4.response 2>&1
# 判断响应是否200
cat $trash/domain_info_4.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
[[ $? -ne 0 ]] && { echo -e "${c_br}获取域名信息-part 4失败退出...${c_e}"; exit 243; }
python3 /usr/local/script/fanmf11/get_infos.py --domain_info_4 domain_info_4.log $accid
# python3 /home/fanmf11/fanmf11/get_infos.py --domain_info_4 domain_info_4.log $accid
r_code=$?
if [[ $r_code -eq 204 ]]; then
# ----------------------------------------------------------------------------------------
# 获取域名信息 - part 5
curl "https://lasc-new.ctcdn.cn/v1/domain/conf/op_query_union?page_size=20&page=1&domain=$domain" -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjE4NH0.1Pgfnky-grT0MN2ic04PqMf32BdnF6iN_PMk3kNOzjY' -H 'content-type: application/json;charset=UTF-8' -vo domain_info_5.log > domain_info_5.response 2>&1
# 判断响应是否200
cat $trash/domain_info_5.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
[[ $? -ne 0 ]] && { echo -e "${c_br}获取域名信息-part 5失败退出...${c_e}"; exit 235; }
python3 /usr/local/script/fanmf11/get_infos.py --domain_info_5 domain_info_5.log $accid
# python3 /home/fanmf11/fanmf11/get_infos.py --domain_info_5 domain_info_5.log $accid
[[ $? -ne 0 ]] && { echo -e "${c_br}处理域名-part 5信息失败退出...${c_e}"; exit 237; }
# ----------------------------------------------------------------------------------------
# 获取域名信息 - part 6
domain_id=`cat info.log | awk -F ':' '$1==4 {print $2}'`
curl "https://lasc-new.ctcdn.cn/v1/domain/internal/domain_config?domain_id=$domain_id" -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjE4NH0.1Pgfnky-grT0MN2ic04PqMf32BdnF6iN_PMk3kNOzjY' -H 'content-type: application/json;charset=UTF-8' -vo domain_info_6.log > domain_info_6.response 2>&1
# 判断响应是否200
cat $trash/domain_info_6.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
[[ $? -ne 0 ]] && { echo -e "${c_br}获取域名信息-part 6失败退出...${c_e}"; exit 238; }
python3 /usr/local/script/fanmf11/get_infos.py --domain_info_6 domain_info_6.log $accid
# python3 /home/fanmf11/fanmf11/get_infos.py --domain_info_6 domain_info_6.log $accid
[[ $? -ne 0 ]] && { echo -e "${c_br}处理域名-part 6信息失败退出...${c_e}"; exit 236; }
elif [[ $r_code -ne 0 ]]; then
echo -e "${c_br}处理域名-part 4信息失败退出...${c_e}"
exit 239
else
exit 0
fi
}
# map.sh用如下函数获取解析组信息
function map() {
# ----------------------------------------------------------------------------------------
# 获取域名信息 - part 1 - 其中包括解析组信息,但有可能是重叠域名
curl "https://dcp.ctcdn.cn/traffic-control-api/v2/access/list?access_version_id=1&domain=$domain" -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjYzfQ.XZ_nNtRWoBRcPBM-bHAG_ciohkNh69n4AVHwV755r6Q' -H 'content-type: application/json;charset=UTF-8' -vo map_info.log > map_info.response 2>&1
# 判断响应是否200
cat map_info.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
[[ $? -ne 0 ]] && { echo -e "${c_br}获取域名MAP信息失败退出...${c_e}"; exit 232; }
# 处理接口获取的信息,拿到正确的解析组
python3 /usr/local/script/fanmf11/get_infos.py --map_info map_info.log $domain
# python3 /home/fanmf11/fanmf11/get_infos.py --map_info map_info.log $domain
r_code=$?
if [[ $r_code -eq 205 ]]; then
exit 205
elif [[ $r_code -eq 201 ]]; then
accessid=`cat info.log | awk -F ':' '$1==4 {print $2}'`
curl "https://dcp.ctcdn.cn/traffic-control-api/v2/access/parseDetail?access_id=$accessid" -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjYzfQ.XZ_nNtRWoBRcPBM-bHAG_ciohkNh69n4AVHwV755r6Q' -H 'content-type: application/json;charset=UTF-8' -vo domain_map_info.log > domain_map_info.response 2>&1
# 判断响应是否200
cat $trash/domain_map_info.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
[[ $? -ne 0 ]] && { echo -e "${c_br}获取域名-map信息失败退出...${c_e}"; exit 206; }
python3 /usr/local/script/fanmf11/get_infos.py --domain_map_info domain_map_info.log 1
elif [[ $r_code -ne 0 ]]; then
echo -e "${c_br}处理域名MAP信息失败退出...${c_e}"
exit 231
fi
}
function parent() {
# ----------------------------------------------------------------------------------------
# 获取父方案信息
curl 'https://lasc-new.ctcdn.cn/v1/domain/internal/parent_draft' -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjE4NH0.1Pgfnky-grT0MN2ic04PqMf32BdnF6iN_PMk3kNOzjY' -H 'content-type: application/json;charset=UTF-8' -vo parent.log > parent.response 2>&1
# 判断响应是否200
cat $trash/parent.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
[[ $? -ne 0 ]] && { echo -e "${c_br}获取父方案信息失败,退出...${c_e}"; exit 234; }
# ----------------------------------------------------------------------------------------
# 获取域名信息 - part 4
accid=`cat ../$parent_dir/info.log | awk -F ':' '$1==3 {print $2}'`
curl "https://confmanager.ctcdn.cn/v1/domain/conf/query_union?precise_query=1&domain=$domain" -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjQxNn0.KEFh_yg3J1y8sL7s3X_8jIR8GGq88A89b7J5YhVYVf8' -H 'content-type: application/json;charset=UTF-8' -vo domain_info_4.log > domain_info_4.response 2>&1
# 判断响应是否200
cat $trash/domain_info_4.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
[[ $? -ne 0 ]] && { echo -e "${c_br}获取域名信息-part 4失败退出...${c_e}"; exit 243; }
python3 /usr/local/script/fanmf11/get_infos.py --parent_info_4 domain_info_4.log $accid
# python3 /home/fanmf11/fanmf11/get_infos.py --domain_info_4 domain_info_4.log $accid
r_code=$?
if [[ $r_code -eq 204 ]]; then
# ----------------------------------------------------------------------------------------
# 获取域名信息 - part 5
curl "https://lasc-new.ctcdn.cn/v1/domain/conf/op_query_union?page_size=20&page=1&domain=$domain" -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjE4NH0.1Pgfnky-grT0MN2ic04PqMf32BdnF6iN_PMk3kNOzjY' -H 'content-type: application/json;charset=UTF-8' -vo domain_info_5.log > domain_info_5.response 2>&1
# 判断响应是否200
cat $trash/domain_info_5.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
[[ $? -ne 0 ]] && { echo -e "${c_br}获取域名信息-part 5失败退出...${c_e}"; exit 235; }
python3 /usr/local/script/fanmf11/get_infos.py --parent_info_5 domain_info_5.log $accid
# python3 /home/fanmf11/fanmf11/get_infos.py --domain_info_5 domain_info_5.log $accid
[[ $? -ne 0 ]] && { echo -e "${c_br}处理域名-part 5信息失败退出...${c_e}"; exit 237; }
# ----------------------------------------------------------------------------------------
# 获取域名信息 - part 6
domain_id=`cat info.log | awk -F ':' '$1==2 {print $2}'`
curl "https://lasc-new.ctcdn.cn/v1/domain/internal/domain_config?domain_id=$domain_id" -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjE4NH0.1Pgfnky-grT0MN2ic04PqMf32BdnF6iN_PMk3kNOzjY' -H 'content-type: application/json;charset=UTF-8' -vo domain_info_6.log > domain_info_6.response 2>&1
# 判断响应是否200
cat $trash/domain_info_6.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
[[ $? -ne 0 ]] && { echo -e "${c_br}获取域名信息-part 6失败退出...${c_e}"; exit 238; }
python3 /usr/local/script/fanmf11/get_infos.py --parent_info_6 domain_info_6.log $accid
# python3 /home/fanmf11/fanmf11/get_infos.py --domain_info_6 domain_info_6.log $accid
[[ $? -ne 0 ]] && { echo -e "${c_br}处理域名-part 6信息失败退出...${c_e}"; exit 236; }
elif [[ $r_code -ne 0 ]]; then
echo -e "${c_br}处理域名-part 4信息失败退出...${c_e}"
exit 239
else
:
fi
}
# 自定义颜色显示
c_br='\e[1;31m' # bold red
c_bg='\e[1;32m' # bold green
c_by='\e[1;33m' # bold yellow
c_bb='\e[1;34m' # bold blue
c_bp='\e[1;35m' # bold purple
c_bc='\e[1;36m' # bold cyan
c_bir='\e[1;3;31m' # * bold italic red
c_bib='\e[1;3;34m' # * bold italic cyan
c_bic='\e[1;3;36m' # bold italic cyan
c_e='\e[0m' # reset
# 初始化变量
TS=`date +%s%N`
toolbox='/usr/local/script/fanmf11' # *
data='/usr/local/script/fanmf11/data' # *
host=`whoami` # * 判断执行用户
trash="/usr/local/script/fanmf11/trash/$host/$TS" # * 每个用户的临时文件存放处
if [[ -d $trash ]]; then
echo -e "${c_br}对于同一个用户,同一时间只能运行一个实例,请重新运行...${c_e}"
exit 245
else
mkdir -p $trash
cd $trash && cd ..
docs=`ls`
for doc in $docs; do
[[ -f $doc ]] && rm -rf $doc
done
folders=`ls -t`
while [[ `echo $folders | awk '{print NF}'` -gt 29 ]]; do
folder=`ls -t | tail -1`
rm -rf $folder
folders=`ls -t`
done
cd $trash && touch infos
fi
# 捕获Ctrl+C时触发
trap 'onCtrlC' INT
# ----------------------------------------------------------------------------------------
# 判断入参数量是否合法
if [[ $# -eq 1 ]]; then
domain=$1
infos
elif [[ $# -eq 3 && $1 == '--map' ]]; then
domain=$2 && map_dir=$3
map
cp map.log ../$map_dir/
cp info.log ../$map_dir/
elif [[ $# -eq 3 && $1 == '--parent' ]]; then
domain=$2 && parent_dir=$3
parent
cp ./cmap ../$parent_dir/
else
echo -e "${c_br}只接收一个参数,请输入要查询的域名\ne.g. infos www.ctyun.cn\n${c_e}"
exit 249
fi

396
old/ctc/ips.sh Normal file
View File

@ -0,0 +1,396 @@
#!/bin/bash
# 功能实现判定一个IP或者组是否是天翼平台的
# 依赖文件ip.group/lakes
# 存在问题:
# 整体逻辑:
# 1. 输入有四种可能IPv4/IPv6/英文标签/中文标签
# 2. 以上四种可能最终使用一个函数来完成——ipvx_check
# 3. ipvx_check的作用就是检查一个IP是否属于天翼
# 4. 首先根据IP找出所在的节点英文标签再根据英文标签后缀不同进行逐一判别
# 5. ip.group文件
# 对于缓存服务器(边缘/父层/中心)节点 - 第一列是RIP第二列是英文标签第三列是VIP第八列是LVS全都一样
# 对于LVS节点 - 第一列是IP。第二列是英文标签
# 对于其他节点同LVS
# 6. lakes文件文件开头有介绍每一列的含义
# 7. 英文标签的特点可以通过`cat ip.group | awk '{print $2}' | awk -F '_' '{print $4}' | sort | uniq`来筛选
# 自定义颜色显示
c_br="\e[1;31m" # bold red
c_bg="\e[1;32m" # bold green
c_by="\e[1;33m" # bold yellow
c_bp="\e[1;35m" # bold purple
c_iy="\e[3;33m" # italic yellow
c_bir='\e[1;3;31m' # * bold italic red
c_big='\e[1;3;32m' # bold italic cyan
c_bib='\e[1;3;34m' # * bold italic cyan
c_bip='\e[1;3;35m' # bold italic cyan
c_bic='\e[1;3;36m' # bold italic cyan
c_e="\e[0m" # reset
# 使用说明
function usage {
echo -e "${c_bg}1. 查找V(R)IP/集群中文名/集群英文名是否是归属天翼云平台${c_e}"
echo -e "${c_bg}2. 查询IP对应的内网IP和主机名只支持IPv4地址${c_e}"
echo -e "${c_iy}实例:${c_e}"
echo -e "${c_iy} ips 59.56.177.149${c_e}"
echo -e "${c_iy} ips ct_fj_fuzhou3_e1${c_e}"
echo -e "${c_iy} ips 福州3${c_e}"
echo -e "${c_iy} ips -m 59.56.177.149${c_e}\n"
echo -e "${c_bp}查询内网IP对应关系功能因线上IPv6的机器没有加白暂不支持获取IPv6主机内网IP...${c_e}"
exit 1
}
# 如果输入是IP则判断该IP是否属于天翼
# 入参-1IP
# 入参-2flg -- 0/1/2
# flg = 0 -- 脚本输入的是IPv4或者IPv6
# flg = 1 -- 脚本输入的是英文节点名
# flg = 2 -- 脚本输入的是中文节点名
function ipvx_check() {
ipvx=$1
if [[ $flg -eq 0 ]]; then
# 同一个IP可能会过滤出来多个英文节点
labels=`cat $data/ip.group | fgrep -w $ipvx | awk '{print $2}' | sort | uniq`
elif [[ $flg -eq 1 ]]; then
# 确保过滤出来的就是输入的节点名排除其他含有相同IP的节点
labels=`cat $data/ip.group | fgrep -w $ipvx | awk '$2=="'$tbd'" {print $2}' | sort | uniq`
elif [[ $flg -eq 2 ]]; then
# 中文节点名可能对应多个不同节点,全部输出
labels=`cat $data/ip.group | fgrep -w $ipvx | awk '$2=="'$label_single'" {print $2}' | sort | uniq`
fi
[[ $labels == '' ]] && { echo -e "${c_br}$tbd 不是天翼平台的节点/IP退出...${c_e}"; exit 44; }
for label in $labels; do
# 根据后缀输出
# 后缀如果是 -- e/c/n/lvs则输出对应的资源池vipriplvs信息如果一个lvs对应多个边缘节点则全部输出
# 其他后缀则只输出对应节点的信息
postfix=`echo $label | awk -F'_' '{print $4}'`
if [[ $postfix =~ ^c[0-9]{0,2}$ ]]; then
center_name_en=$label
rip=`cat $data/ip.group | awk '$2=="'$center_name_en'"' | awk '{print $1}' | sort`
vip=`cat $data/ip.group | awk '$2=="'$center_name_en'"' | awk '{print $3}' | sort | uniq`
lvs_name=`cat $data/ip.group | awk '$2=="'$center_name_en'"' | awk '{print $8}' | sort | uniq | awk -F ',' '{print $1}'`
lvs=`cat $data/ip.group | awk '$2=="'$lvs_name'"' | awk '{print $1}' | sort`
resource=`cat $data/lakes | grep $center_name_en | awk '{print $6}' | sort | uniq`
center_name_cn=`cat $data/lakes | grep $center_name_en | awk '{print $11}' | sort | uniq`
echo -e "$c_bp[$center_name_en: ${c_bg}RIP]$c_e"
echo $rip | awk '{for(i=1;i<=NF;i++) if($i=="'$ipvx'"&&"'$flg'"==0) print " \033[1;3;36m", $i, "\033[0m"; else print " ", $i}'
echo
echo -e "$c_bp[$center_name_en: ${c_bg}VIP]$c_e"
echo $vip | awk -F ',' '{for(i=1;i<=NF;i++) if($i=="'$ipvx'" && "'$flg'"==0) print " \033[1;3;36m", $i, "\033[0m"; else print " ", $i}'
echo -e "$c_bp[$lvs_name: ${c_bg}IP]$c_e"
echo $lvs | awk '{for(i=1;i<=NF;i++) print " ", $i}'
echo
echo -e "$c_by[$center_name_cn($center_name_en)所属资源池]$c_e"
echo $resource | awk '{for(i=1;i<=NF;i++) print " ", $i} END{print ""}'
elif [[ $postfix =~ ^dns[0-9]{0,2}$ ]]; then
dns_label=$label
dnsip=`cat $data/ip.group | awk '$2=="'$dns_label'"' | awk '{print $1}' | sort`
dnsrip=`cat $data/ip.group | awk '$2=="'$dns_label'"' | awk '{print $1}' | sort`
dnsvip=`cat $data/ip.group | awk '$2=="'$dns_label'"' | awk '{print $3}' | sort | uniq`
echo -e "$c_bp[$dns_label: ${c_bg}RIP]$c_e"
echo $dnsip | awk '{for(i=1;i<=NF;i++) if($i=="'$ipvx'" && "'$flg'"==0) print " \033[1;3;36m", $i, "\033[0m"; else print " ", $i}'
echo -e "$c_bp[$dns_label: ${c_bg}VIP]$c_e"
echo $dnsvip | awk -F ',' '{for(i=1;i<=NF;i++) if($i=="'$ipvx'" && "'$flg'"==0) print " \033[1;3;36m", $i, "\033[0m"; else print " ", $i}'
elif [[ $postfix =~ ^e[0-9]{0,2}$ ]]; then
edge_name_en=$label
rip=`cat $data/ip.group | awk '$2=="'$edge_name_en'"' | awk '{print $1}' | sort`
vip=`cat $data/ip.group | awk '$2=="'$edge_name_en'"' | awk '{print $3}' | sort | uniq`
lvs_name=`cat $data/ip.group | awk '$2=="'$edge_name_en'"' | awk '{print $8}' | sort | uniq | awk -F ',' '{print $1}'`
lvs=`cat $data/ip.group | awk '$2=="'$lvs_name'"' | awk '{print $1}' | sort`
resource=`cat $data/lakes | grep $edge_name_en | awk '{print $6}' | sort | uniq`
edge_name_cn=`cat $data/lakes | grep $edge_name_en | awk '{print $11}' | sort | uniq`
echo -e "$c_bp[$edge_name_en: ${c_bg}RIP]$c_e"
echo $rip | awk '{for(i=1;i<=NF;i++) if($i=="'$ipvx'" && "'$flg'"==0) print " \033[1;3;36m", $i, "\033[0m"; else print " ", $i}'
echo
echo -e "$c_bp[$edge_name_en: ${c_bg}VIP]$c_e"
echo $vip | awk -F ',' '{for(i=1;i<=NF;i++) if($i=="'$ipvx'" && "'$flg'"==0) print " \033[1;3;36m", $i, "\033[0m"; else print " ", $i}'
echo -e "$c_bp[$lvs_name: ${c_bg}IP]$c_e"
echo $lvs | awk '{for(i=1;i<=NF;i++) print " ", $i}'
echo
echo -e "$c_by[$edge_name_cn($edge_name_en)所属资源池]$c_e"
echo $resource | awk '{for(i=1;i<=NF;i++) print " ", $i} END{print ""}'
elif [[ $postfix =~ ^lvs[0-9]{0,2}$ ]]; then
lvs_name=$label
lvs=`cat $data/ip.group | awk '$2=="'$lvs_name'"' | awk '{print $1}' | sort`
level_unknown=`cat $data/ip.group | awk '$8 ~ "'$lvs_name'"' | awk '{print $2}' | sort | uniq`
for unknown_en in $level_unknown; do
rip=`cat $data/ip.group | awk '$8 ~ "'$lvs_name'" && $2 == "'$unknown_en'"' | awk '{print $1}' | sort`
vip=`cat $data/ip.group | awk '$8 ~ "'$lvs_name'" && $2 == "'$unknown_en'"' | awk '{print $3}' | sort | uniq`
resource=`cat $data/lakes | grep $unknown_en | awk '{print $6}' | sort | uniq`
unknown_cn=`cat $data/lakes | grep $unknown_en | awk '{print $11}' | sort | uniq`
echo -e "$c_bp[$unknown_en: ${c_bg}RIP]$c_e"
echo $rip | awk '{for(i=1;i<=NF;i++) print " ", $i}'
echo
echo -e "$c_bp[$unknown_en: ${c_bg}VIP]$c_e"
echo $vip | awk -F ',' '{for(i=1;i<=NF;i++) print " ", $i}'
echo -e "$c_by[$unknown_cn($unknown_en)所属资源池]$c_e"
echo $resource | awk '{for(i=1;i<=NF;i++) print " ", $i} END{print ""}'
done
echo -e "$c_bp[$lvs_name: ${c_bg}IP]$c_e"
echo $lvs | awk '{for(i=1;i<=NF;i++) if($i=="'$ipvx'" && "'$flg'"==0) print " \033[1;3;36m", $i, "\033[0m"; else print " ", $i}'
echo
elif [[ $postfix =~ ^m[0-9]{0,2}$ ]]; then
mgt_label=$label
mgtip=`cat $data/ip.group | awk '$2=="'$mgt_label'"' | awk '{print $1}' | sort`
echo -e "$c_bp[$mgt_label: ${c_bg}IP]$c_e"
echo $mgtip | awk '{for(i=1;i<=NF;i++) if($i=="'$ipvx'" && "'$flg'"==0) print " \033[1;3;36m", $i, "\033[0m"; else print " ", $i}'
echo
elif [[ $postfix =~ ^mysql[0-9]{0,2}$ ]]; then
mysql_label=$label
mysqlip=`cat $data/ip.group | awk '$2=="'$mysql_label'"' | awk '{print $1}' | sort`
echo -e "$c_bp[$mysql_label: ${c_bg}IP]$c_e"
echo $mysqlip | awk '{for(i=1;i<=NF;i++) if($i=="'$ipvx'" && "'$flg'"==0) print " \033[1;3;36m", $i, "\033[0m"; else print " ", $i}'
elif [[ $postfix =~ ^n[0-9]{0,2}$ ]]; then
nation_name_en=$label
rip=`cat $data/ip.group | awk '$2=="'$nation_name_en'"' | awk '{print $1}' | sort`
vip=`cat $data/ip.group | awk '$2=="'$nation_name_en'"' | awk '{print $3}' | sort | uniq`
lvs_name=`cat $data/ip.group | awk '$2=="'$nation_name_en'"' | awk '{print $8}' | sort | uniq | awk -F ',' '{print $1}'`
lvs=`cat $data/ip.group | awk '$2=="'$lvs_name'"' | awk '{print $1}' | sort`
resource=`cat $data/lakes | grep $nation_name_en | awk '{print $6}' | sort | uniq`
nation_name_cn=`cat $data/lakes | grep $nation_name_en | awk '{print $11}' | sort | uniq`
echo -e "$c_bp[$nation_name_en: ${c_bg}RIP]$c_e"
echo $rip | awk '{for(i=1;i<=NF;i++) if($i=="'$ipvx'" && "'$flg'"==0) print " \033[1;3;36m", $i, "\033[0m"; else print " ", $i}'
echo
echo -e "$c_bp[$nation_name_en: ${c_bg}VIP]$c_e"
echo $vip | awk -F ',' '{for(i=1;i<=NF;i++) if($i=="'$ipvx'" && "'$flg'"==0) print " \033[1;3;36m", $i, "\033[0m"; else print " ", $i}'
echo -e "$c_bp[$lvs_name: ${c_bg}IP]$c_e"
echo $lvs | awk '{for(i=1;i<=NF;i++) print " ", $i}'
echo
echo -e "$c_by[$nation_name_cn($nation_name_en)所属资源池]$c_e"
echo $resource | awk '{for(i=1;i<=NF;i++) print " ", $i} END{print ""}'
elif [[ $postfix =~ ^prets[0-9]{0,2}$ ]]; then
prets_label=$label
pretsip=`cat $data/ip.group | awk '$2=="'$prets_label'"' | awk '{print $1}' | sort`
echo -e "$c_bp[$prets_label: ${c_bg}IP]$c_e"
echo $pretsip | awk '{for(i=1;i<=NF;i++) if($i=="'$ipvx'" && "'$flg'"==0) print " \033[1;3;36m", $i, "\033[0m"; else print " ", $i}'
elif [[ $postfix =~ ^pretw[0-9]{0,2}$ ]]; then
pretw_label=$label
pretwip=`cat $data/ip.group | awk '$2=="'$pretw_label'"' | awk '{print $1}' | sort`
echo -e "$c_bp[$pretw_label: ${c_bg}IP]$c_e"
echo $pretwip | awk '{for(i=1;i<=NF;i++) if($i=="'$ipvx'" && "'$flg'"==0) print " \033[1;3;36m", $i, "\033[0m"; else print " ", $i}'
elif [[ $postfix =~ ^redis[0-9]{0,2}$ ]]; then
redis_label=$label
redisip=`cat $data/ip.group | awk '$2=="'$redis_label'"' | awk '{print $1}' | sort`
echo -e "$c_bp[$redis_label: ${c_bg}IP]$c_e"
echo $redisip | awk '{for(i=1;i<=NF;i++) if($i=="'$ipvx'" && "'$flg'"==0) print " \033[1;3;36m", $i, "\033[0m"; else print " ", $i}'
elif [[ $postfix =~ ^testts[0-9]{0,2}$ ]]; then
testts_label=$label
testtsip=`cat $data/ip.group | awk '$2=="'$testts_label'"' | awk '{print $1}' | sort`
echo -e "$c_bp[$testts_label: ${c_bg}IP]$c_e"
echo $testtsip | awk '{for(i=1;i<=NF;i++) if($i=="'$ipvx'" && "'$flg'"==0) print " \033[1;3;36m", $i, "\033[0m"; else print " ", $i}'
elif [[ $postfix =~ ^ts[0-9]{0,2}$ ]]; then
ts_label=$label
tsip=`cat $data/ip.group | awk '$2=="'$ts_label'"' | awk '{print $1}' | sort`
echo -e "$c_bp[$ts_label: ${c_bg}IP]$c_e"
echo $tsip | awk '{for(i=1;i<=NF;i++) if($i=="'$ipvx'" && "'$flg'"==0) print " \033[1;3;36m", $i, "\033[0m"; else print " ", $i}'
elif [[ $postfix =~ ^tw[0-9]{0,2}$ ]]; then
tw_label=$label
twip=`cat $data/ip.group | awk '$2=="'$tw_label'"' | awk '{print $1}' | sort`
echo -e "$c_bp[$tw_label: ${c_bg}IP]$c_e"
echo $twip | awk '{for(i=1;i<=NF;i++) if($i=="'$ipvx'" && "'$flg'"==0) print " \033[1;3;36m", $i, "\033[0m"; else print " ", $i}'
elif [[ $postfix =~ ^uatts[0-9]{0,2}$ ]]; then
uatts_label=$label
uattsip=`cat $data/ip.group | awk '$2=="'$uatts_label'"' | awk '{print $1}' | sort`
echo -e "$c_bp[$uatts_label: ${c_bg}IP]$c_e"
echo $uattsip | awk '{for(i=1;i<=NF;i++) if($i=="'$ipvx'" && "'$flg'"==0) print " \033[1;3;36m", $i, "\033[0m"; else print " ", $i}'
else
echo -e "${c_br}${ipvx}${c_e}不属于我司节点如有误判请联系fanmf11@chinatelecom.cn。\n"
exit 92
fi
done
}
function ip_search() {
# 判断如果是IPv4在判断是否合法
if [[ $tbd =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
ip_1234=`echo $tbd | awk -F '.' '{print $1, $2, $3, $4}'`
for num in $ip_1234; do
if [[ $num -gt 255 ]]; then
echo -e "${c_br}非法请输入有效的IPv4地址。${c_e}"
usage
fi
done
isInner=`echo $ip_1234 | awk '{print $1}'`
if [[ $isInner == '192' ]]; then
echo -e "${c_br}$tbd是内网IP非法请输入有效的外网IPv4地址。${c_e}"
usage
fi
flg=0
ipvx_check $tbd $flg
# 判断如果是IPv6粗略的匹配规则最短11最长39包含数字大小写字母以及英文冒号
elif [[ $tbd =~ ^[0-9a-fA-F:]{11,39}$ ]]; then
flg=0
ipvx_check $tbd $flg
# 判断如果是节点英文标签格式
elif [[ $tbd =~ ^(ct|cu|cm|bgp|ctbgp|cmbgp|cubgp|as|eu|sa|na|cbn|cern)_[a-z]{2,3}_[a-z]{2,20}[0-9]{1,2}_(c|dns|e|lvs|m|mysql|n|prets|pretw|redis|testts|ts|tw|uatts1)[0-9]{0,2}$ ]]; then
anyip=`cat $data/ip.group | awk '$2=="'$tbd'"' | head -n 1 | awk '{print $1}'`
if [[ $anyip == '' ]]; then
echo -e "${c_br}${tbd}${c_e}不属于我司节点如有误判请联系fanmf11@chinatelecom.cn。"
usage
exit 90
fi
flg=1
ipvx_check $anyip $flg
# 剩余的情况一律归结为中文标签格式
else
# 一个中文标签可能会对应着多个不同的节点
label_multi=`cat $data/lakes | awk '$11=="'$tbd'" {print $1}' | sort | uniq`
if [[ $label_multi == '' ]]; then
echo -e "${c_br}${tbd}${c_e}不属于我司节点如有误判请联系fanmf11@chinatelecom.cn。"
usage
exit 91
fi
flg=2
for label_single in $label_multi; do
anyip=`cat $data/ip.group | awk '$2=="'$label_single'"' | head -n 1 | awk '{print $1}'`
if [[ $anyip != '' ]]; then
ipvx_check $anyip $flg
else
echo -e "${c_br}${label_single}节点存在但是无法找到其下IP可使用rip命令尝试再次查询。${c_e}\n"
fi
done
fi
}
function ip_inner() {
> res.log
let number=`cat ips.log | tr -d ' ' | grep -Ev ']' | sed 's/^$/#/g' | awk 'BEGIN{RS="#"} {print NR}' | tail -1`
cat ips.log | tr -d ' ' | grep -Ev ']' | sed 's/^$/#/g' | awk 'BEGIN{RS="#"} {for(i=1;i<=NR;i++) if(i==NR) print $0 > i}'
for i in `seq $number`; do
cat $i | fgrep -q "$tbd"
[[ $? -ne 0 ]] && continue
cat $i | grep -Eo "[0-9a-fA-F:]{11,39}" > ip$i
cat $i | grep -Eo "([0-9]{1,3}\.){3}[0-9]{1,3}" >> ip$i
# 将每一块的IP重新放回文件i并将结果追加到res.log
cat ip$i > $i && cat ip$i >> res.log
done
ip_list=`cat res.log | sort | uniq`
for ipy in $ip_list; do
echo $ipy | grep -Eq "[0-9a-fA-F:]{11,39}"
if [[ $? -eq 0 ]]; then
echo "跳板机无IPv6出口暂不支持获取IPv6主机内网IP..." > inner_$ipy.log
else
ssh -o ConnectTimeout=30 $ipy "hostname; /usr/sbin/ifconfig | grep 'inet 192'" > inner_$ipy.log 2>&1 &
fi
done
wait
echo '-----------------------------------------------------------------------------------------'
printf "${c_bic}%-25s%-20s%-50s\n${c_e}" "IP" "Inner IP" "Hostname"
for ipy in $ip_list; do
cat inner_$ipy.log |grep -iq 'timed out'
res1=$?
cat inner_$ipy.log |grep -iq 'closed by'
res2=$?
cat inner_$ipy.log |grep -iq 'IPv6'
res3=$?
if [[ $res1 -eq 0 ]]; then
printf "${c_bir}%-25s%-20s%-40s${c_e}\n" $ipy "===========>" "Connection timed out during banner exchange"
continue
elif [[ $res2 -eq 0 ]]; then
printf "${c_bir}%-25s%-20s%-40s${c_e}\n" $ipy "===========>" "ssh_exchange_identification: Connection closed by remote host"
continue
elif [[ $res3 -eq 0 ]]; then
printf "${c_bir}%-25s%-20s%-40s${c_e}\n" $ipy "===========>" "IPv6的机器没有加白暂不支持获取IPv6主机内网IP..."
continue
else
host=`cat inner_$ipy.log | fgrep 'in.ctcdn.cn'`
[[ $host == '' ]] && host='-'
inner_ip=`cat inner_$ipy.log | grep 'inet 192' | awk '{print $2}'`
[[ $inner_ip == '' ]] && inner_ip='-'
printf "%-25s%-20s%-40s\n" $ipy $inner_ip $host
fi
done
printf "${c_bic}%-25s%-20s%-50s\n${c_e}" "IP" "Inner IP" "Hostname"
echo '-----------------------------------------------------------------------------------------'
}
toolbox='/usr/local/script/fanmf11/'
data='/usr/local/script/fanmf11/data'
label_single=''
TS=`date +%s%N`
host=`whoami` # * 判断执行用户
trash="/usr/local/script/fanmf11/trash/$host/$TS" # * 每个用户的临时文件存放处
if [[ -d $trash ]]; then
echo -e "${c_br}对于同一个用户,同一时间只能运行一个实例,请重新运行...${c_e}"
exit 245
else
mkdir -p $trash
cd $trash && cd ..
docs=`ls`
for doc in $docs; do
[[ -f $doc ]] && rm -rf $doc
done
folders=`ls -t`
while [[ `echo $folders | awk '{print NF}'` -gt 29 ]]; do
folder=`ls -t | tail -1`
rm -rf $folder
folders=`ls -t`
done
cd $trash && touch ips
fi
# 参数个数必须是一个并把第一个参数赋值给tbd
if [[ $# -eq 1 ]]; then
tbd=$1
ip_search
elif [[ $# -eq 2 && $1 == '-m' ]]; then
tbd=$2
ip_search > ips.log 2>&1
[[ $? -ne 0 ]] && { cat ips.log; exit 211; }
# 判断如果是IPv4在判断是否合法
if [[ $tbd =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
ip_inner
else
echo -e "${c_br}输入只能是IPv4不接受其他格式的内容。${c_e}\n"
exit 112
fi
else
usage
fi

BIN
old/ctc/jaydiff Executable file

Binary file not shown.

58
old/ctc/logcombo.awk Normal file
View File

@ -0,0 +1,58 @@
#!/usr/bin/awk -f
# lap : 1--overlap 0--non-overlap
# comp: 1--access 0--origin
# pos : 4 | 10 | 11
BEGIN {
if(f00=="") { t00=1000; } else { t00=4 } # 状态码
if(f01=="") { t01=6; f01s=0; f01e=1000; } else { t01=6 } # 边缘响应时间
if(f02=="") { t02=9; f02s=0; f02e=1000; } else { t02=9 } # 边缘首包
if(f03=="") { t03=1000; } else { t03=10 } # 边缘错误码
if(f04=="") { t04=1000; } else { t04=11 }
if(f05=="") { t05=1000; } else { t05=13 }
if(f06=="") { t06=1000; } else { t06=15 }
if(f07=="") { t07=1000; } else { t07=16 }
if(f08=="") { t08=1000; } else { t08=18 }
if(f09=="") { t09=1000; } else { t09=24 }
if(f10=="") { t10=26; f10s=0; f10e=1000; } else { t10=26 }
if(f11=="") { t11=1000; } else { t11=28 }
if(f13=="") { t13=1000; } else { t13=33 }
if(f14=="") { t14=1000; } else { t14=34 }
if(f28=="") { t28=1000; f28s=0; f28e=1000; } else { t28=5 }
if(f30=="") { t30=1000; } else { t30=7 }
if(f31=="") { t31=1000; } else { t31=8 }
if(f32=="") { t32=1000; } else { t32=10 }
if(f33=="") { t33=1000; } else { t33=11 }
if(f34=="") { t34=1000; } else { t34=46 }
if(f37=="") { t37=1000; f37s=0; f37e=1000; } else { t37=4 }
if(f38=="") { t38=1000; f38s=0; f38e=1000;} else { t38=6 }
if(comp==1) { idn=56 } else if(comp==0) { idn=50 }
number=0
}
{
if(comp==0) {
tt28 = $t28 / 1000
tt37 = $t37 / 1000
tt38 = $t38 / 1000
}
if(comp==1 && atype=="combo" && f00==$t00 && f01s<=$t01 && $t01<=f01e && f02s<=$t02 && $t02<=f02e && f03==$t03 && f04==$t04 && f05==$t05 && f06==$t06 && f07==$t07 && f08==$t08 && f09==$t09 && f10s<=$t10 && $t10<=f10e && f11==$t11 && f13==$t13 && f14==$t14 && acc==$idn) {
number++
} else if(comp==1 && atype=="logs" && f00==$t00 && f01s<=$t01 && $t01<=f01e && f02s<=$t02 && $t02<=f02e && f03==$t03 && f04==$t04 && f05==$t05 && f06==$t06 && f07==$t07 && f08==$t08 && f09==$t09 && f10s<=$t10 && $t10<=f10e && f11==$t11 && f13==$t13 && f14==$t14 && acc==$idn) {
print $0
} else if(comp==0 && atype=="combo" && f28s<=tt28 && tt28<=f28e && f30==$t30 && f31==$t31 && f32==$t32 && f33==$t33 && f34==$t34 && f37s<=tt37 && tt37<=f37e && f38s<=tt38 && tt38<=f38e && acc==$idn) {
number++
} else if(comp==0 && atype=="logs" && f28s<=tt28 && tt28<=f28e && f30==$t30 && f31==$t31 && f32==$t32 && f33==$t33 && f34==$t34 && f37s<=tt37 && tt37<=f37e && f38s<=tt38 && tt38<=f38e && acc==$idn) {
print $0
}
}
END {
if(atype=="combo")
printf "%-18s%-8s%-s\n", "符合上述条件的日志占比 -- ", number, number/NR*100"%"
}

25
old/ctc/logcommon.awk Normal file
View File

@ -0,0 +1,25 @@
#!/usr/bin/awk -f
# lap : 1--overlap 0--non-overlap
# comp: 1--access 0--origin
# code:
# non-blank -- specify status code
# blank -- not specify status code
BEGIN {
if(code!="" && comp==1) { sc=4 }
if(code!="" && comp==0) { sc=11 }
if(code=="") { sc=1000 }
if(comp==1) { ac=56 }
if(comp==0) { ac=50 }
}
{
if(acc==$ac && code==$sc)
res[$pos]++
}
END {
for(i in res)
printf "%-12s%-8s%-s\n", res[i]/NR*100"%", res[i], i
}

27
old/ctc/logqps.awk Normal file
View File

@ -0,0 +1,27 @@
#!/usr/bin/awk -f
# lap : 1--overlap 0--non-overlap
# comp: 1--access 0--origin
# pos : 4 | 10 | 11
BEGIN {
number=0
if(code!="" && comp==1) { sc=4 }
if(code!="" && comp==0) { sc=11 }
if(code=="") { sc=1000 }
if(comp==1) { ac=56 }
if(comp==0) { ac=50 }
}
{
if(acc==$ac && code==$sc)
number++
}
END {
if(code != "")
# 如果百分比不是100%说明这个节点有重叠域名访问日志
printf "%-8s%-15s%-s\n", code, number, number/NR*100"%"
else
printf "%-8s%-15s%-s\n", "QPS", number, number/NR*100"%"
}

1569
old/ctc/logs.sh Normal file

File diff suppressed because it is too large Load Diff

20
old/ctc/logsc.awk Normal file
View File

@ -0,0 +1,20 @@
#!/usr/bin/awk -f
# lap : 1--overlap 0--non-overlap
# comp: 1--access 0--origin
# pos : 4 | 10 | 11
BEGIN {
if(comp==1) { ac=56 }
if(comp==0) { ac=50 }
}
{
if(acc==$ac)
res[$pos]++
}
END {
for(i in res)
printf "%-8s%-15s%-s\n", i, res[i], res[i]/NR*100"%"
}

66
old/ctc/logtime.awk Normal file
View File

@ -0,0 +1,66 @@
#!/usr/bin/awk -f
# lap : 1--overlap 0--non-overlap
# comp: 1--access 0--origin
# code:
# non-blank -- specify status code
# blank -- not specify status code
# index = 1 if time duration < 1
# index = 2 if time duration < 2
# index = 3 if time duration < 3
# index = 4 if time duration < 4
# index = 5 if time duration < 5
# index = 6 if time duration < 6
# index = 7 if time duration < 11
# index = 8 if time duration < 16
# index = 9 if time duration < 21
function timeproc(dur, trans) {
if((dur / trans)<1)
res[1]++
else if((dur / trans)<2)
res[2]++
else if((dur / trans)<3)
res[3]++
else if((dur / trans)<4)
res[4]++
else if((dur / trans)<5)
res[5]++
else if((dur / trans)<6)
res[6]++
else if((dur / trans)<11)
res[7]++
else if((dur / trans)<16)
res[8]++
else if((dur / trans)<21)
res[9]++
else if((dur / trans)>=21)
res[10]++
}
BEGIN {
if(code!="" && comp==1) { sc=4 }
if(code!="" && comp==0) { sc=11 }
if(code=="") { sc=1000 }
if(comp==1) { ac=56; trans=1; }
if(comp==0) { ac=50; trans=1000; }
}
{
if(acc==$ac && code==$sc)
timeproc($pos, trans)
}
END {
for(i in res)
if(i==1||i==2||i==3||i==4||i==5||i==6)
printf "%-15s%-8s%-s\n", "time < "i"s", res[i], res[i]/NR*100"%"
else if(i==7)
printf "%-15s%-8s%-s\n", "time < 11s", res[i], res[i]/NR*100"%"
else if(i==8)
printf "%-15s%-8s%-s\n", "time < 16s", res[i], res[i]/NR*100"%"
else if(i==9)
printf "%-15s%-8s%-s\n", "time < 21s", res[i], res[i]/NR*100"%"
else if(i==10)
printf "%-15s%-8s%-s\n", "time >= 21s", res[i], res[i]/NR*100"%"
}

349
old/ctc/map.sh Normal file
View File

@ -0,0 +1,349 @@
#!/bin/bash
# 功能实现:根据指定的域名,查询对于应域名所在解析组的相关信息
# 依赖文件dna/
# 存在问题:
#
# 自定义控制台颜色显示
c_br='\e[1;31m' # bold red
c_bg='\e[1;32m' # bold green
c_bc='\e[1;36m' # bold cyan
c_by='\e[1;33m' # bold yellow
c_bp='\e[1;35m' # bold purple
c_bir='\e[1;3;31m' # bold italic red
c_big='\e[1;3;32m' # bold italic green
c_biy='\e[1;3;33m' # bold italic yellow
c_bib='\e[1;3;34m' # bold italic blue
c_bip='\e[1;3;35m' # bold italic purple
c_bic='\e[1;3;36m' # bold italic cyan
c_biw='\e[1;3;30m' # bold italic gray
c_e='\e[0m' # reset
# 使用说明
function usage {
echo -e "${c_bib}Usage: ${c_e}"
echo -e "${c_bib} map -d domain vip # 从域名解析组中随机获取一个VIP ${c_e}"
echo -e "${c_bib} map -d domain rip # 从域名解析组中随机获取一个RIP ${c_e}"
echo -e "${c_bib} map -d domain ip # 验证一个IP是否属于域名解析组中的VIP或者RIP ${c_e}"
echo -e "${c_bib} map -d domain label # 验证一个节点中/英文标签名是否包含在域名解析组中 ${c_e}"
echo -e "${c_bib} map -d domain label # 打印域名解析组中所有的节点信息 ${c_e}"
echo -e "${c_bib} map -d domain cover # 输出域名边缘解析组节点资源覆盖情况,可指定区域查询 ${c_e}"
echo -e "${c_bib} map -d domain parent # 输出域名父解析组节点资源覆盖情况,可指定区域查询 ${c_e}\n"
echo -e "${c_bic}[MAP-100] 该脚本工具会根据指定的域名查询对于应域名所在解析组的相关信息其中vip/rip/pool/cover/parent均是字符串参数domain/ip是实际要输入真实值的参数label既可以是字符串参数也可以是实际节点中/英文标签名称。${c_e}\n"
exit 100
}
function onCtrlC () {
# while capture Ctrl+C, kill all background processes silently and exit
exec 3>&2 # 3 is now a copy of 2
exec 2> /dev/null # 2 now points to /dev/null
sleep 1 # sleep to wait for process to die
exec 2>&3 # restore stderr to saved
exec 3>&- # close saved version
echo
echo -e "${c_bir}[MAP-101] Ctrl+C is captured, exiting...\n${c_e}"
exit 101
}
# 随机获取域名对应解析组的一个VIP
function random_vip() {
# 其实支持的不仅如下,只要输入正确,其他网络运营商也是可以查询的
echo -ne "${c_bg}请输入ISP类型--【ct|cu|cm|bgp|ctbgp|cubgp|cmbgp】${c_e}"
read -t 60 isp
[[ $? -ne 0 ]] && { echo -e "${c_br}[MAP-102] 60s内无任何输入退出...${c_e}\n"; exit 102; }
# do a check to see if isp is correct or not
[[ $isp == '' ]] && isp='ct'
# 从解析组VIP列过滤以运营商标识开头的节点和VIP
ip_list=`cat map.log | awk '{print $4, $5}' | grep "^${isp}_" | awk '{print $2}' | sort | uniq`
[[ $ip_list == '' ]] && { echo -e "${c_br}[MAP-103] 在解析组$map中,没有找到$isp的网络运营商,退出...${c_e}"; exit 103; }
# 匹配v6和v4的子集
v6_list=`echo $ip_list | grep -Eo "[0-9a-fA-F:]{11,39}"`
v4_list=`echo $ip_list | grep -Eo "([0-9]{1,3}\.){3}[0-9]{1,3}"`
# 计算分别有多少个
num_v6=`echo $v6_list | awk '{print NF}'`
num_v4=`echo $v4_list | awk '{print NF}'`
# 获取随机的IP注意用$RANDOM取模之后结果有可能是0最大值不超过总数所以需要 +1
[[ num_v6 -ne 0 ]] && rand6=$(( $RANDOM % $num_v6 )) && vip_v6=`echo $v6_list | awk -v v6=$rand6 '{print $(v6+1)}'`
[[ num_v4 -ne 0 ]] && rand4=$(( $RANDOM % $num_v4 )) && vip_v4=`echo $v4_list | awk -v v4=$rand4 '{print $(v4+1)}'`
echo "vip_v6: $vip_v6"
echo "vip_v4: $vip_v4"
echo -e "${c_bip}MAP: $map\n${c_e}"
}
# 随机获取域名对应解析组的一个RIP
function random_rip() {
# 其实支持的不仅如下,只要输入正确,其他网络运营商也是可以查询的
echo -ne "${c_bg}请输入ISP类型--【ct|cu|cm|bgp|ctbgp|cubgp|cmbgp】${c_e}"
read -t 60 isp
[[ $? -ne 0 ]] && { echo -e "${c_br}[MAP-104] 60s内无任何输入退出...${c_e}\n"; exit 104; }
# do a check to see if isp is correct or not
[[ $isp == '' ]] && isp='ct'
# 从解析组VIP列过滤以运营商标识开头的节点和VIP
ip_list=`cat map.log | awk '{print $4, $5}' | grep "^${isp}_" | awk '{print $2}' | sort | uniq`
[[ $ip_list == '' ]] && { echo -e "${c_br}[MAP-106] 在解析组$map中,没有找到$isp的网络运营商,退出...${c_e}"; exit 105; }
# 匹配v6和v4的子集
v6_list=`echo $ip_list | grep -Eo "[0-9a-fA-F:]{11,39}"`
v4_list=`echo $ip_list | grep -Eo "([0-9]{1,3}\.){3}[0-9]{1,3}"`
# 计算分别有多少个
num_v6=`echo $v6_list | awk '{print NF}'`
num_v4=`echo $v4_list | awk '{print NF}'`
# 获取随机的IP注意用$RANDOM取模之后结果有可能是0最大值不超过总数所以需要 +1
[[ num_v6 -ne 0 ]] && rand6=$(( $RANDOM % $num_v6 )) && vip_v6=`echo $v6_list | awk -v v6=$rand6 '{print $(v6+1)}'`
[[ num_v4 -ne 0 ]] && rand4=$(( $RANDOM % $num_v4 )) && vip_v4=`echo $v4_list | awk -v v4=$rand4 '{print $(v4+1)}'`
# 拿到VPI之后使用`ips`获取RIP列表
[[ num_v6 -ne 0 ]] && v6_list=`ips $vip_v6 | sed -n '/RIP/, /VIP/ p' | grep -Eo '([0-9]{1,3}.){3}[0-9]{1,3}'`
[[ num_v4 -ne 0 ]] && v4_list=`ips $vip_v4 | sed -n '/RIP/, /VIP/ p' | grep -Eo '([0-9]{1,3}.){3}[0-9]{1,3}'`
# 计算分别有多少个
num_v6=`echo $v6_list | awk '{print NF}'`
num_v4=`echo $v4_list | awk '{print NF}'`
# 获取随机的IP注意用$RANDOM取模之后结果有可能是0最大值不超过总数所以需要 +1
[[ num_v6 -ne 0 ]] && rand6=$(( $RANDOM % $num_v6 )) && rip_v6=`echo $v6_list | awk -v v6=$rand6 '{print $(v6+1)}'`
[[ num_v4 -ne 0 ]] && rand4=$(( $RANDOM % $num_v4 )) && rip_v4=`echo $v4_list | awk -v v4=$rand4 '{print $(v4+1)}'`
echo "rip_v6: $rip_v6"
echo "rip_v4: $rip_v4"
echo -e "${c_bip}MAP: $map\n${c_e}"
}
# 判断一个IP是否归属域名的解析组可以是VIP也可以是RIP
function ip_inmap() {
# 使用`ips`判断IP是否在天翼平台
ips $item > ips.log 2>&1
[[ $? -ne 0 ]] && { cat ips.log; echo -e "${c_br}[MAP-106]${c_e}"; exit 106; }
# 判断IP是否是RIP
cat $data/ip.group | awk '{print $1}' | grep -wq $item
is_rip=$?
# 如果是RIP
if [[ $is_rip -eq 0 ]]; then
# 获取对应RIP的英文节点标签名并判断该节点是否在域名的解析组中
label=`cat ips.log | grep -Eo "(ct|cu|cm|bgp|ctbgp|cmbgp|cubgp|as|eu|sa|na|cbn|cern)_[a-z]{2,3}_[a-z]{2,20}[0-9]{1,2}_(c|e|m|n)[0-9]{0,2}" | head -n 1`
cat map.log | awk '{print $4}' | sort | uniq | grep -wq $label
if [[ $? -eq 0 ]]; then
echo -e "${c_big}$item是域名$domain对应解析组$map中的IP并且是一个RIP。\n${c_e}"
else
echo -e "${c_bir}[MAP-107] $item${c_biy}不是${c_bir}域名$domain对应解析组中的IP。\n${c_e}"
exit 107
fi
# 如果不是RIP那就是VIP
else
# 直接判断该IP是否在域名的解析组
cat map.log | awk '{print $5}' | sort | uniq | grep -wq $item
if [[ $? -eq 0 ]]; then
echo -e "${c_big}$item是域名$domain对应解析组$map中的IP并且是一个VIP。\n${c_e}"
else
echo -e "${c_bir}[MAP-108] $item${c_biy}不是${c_bir}域名$domain对应解析组中的IP。\n${c_e}"
exit 108
fi
fi
}
# 判断一个标签是否在域名的解析组中
function label_inmap() {
# 输出改解析组所有的节点中英文对应标签信息
cat map.log | awk '{print $14, $4}' | sed '1d' | sort | uniq | awk '{if(NR%3==0) print " | "$0" | "; else printf "%s", " | "$0;} END{print ""}' | column -t
# 查询节点中英文标签名称是否在域名的解析组中,在的话并输出相应信息
cat map.log | awk '{print $14, $4}' | sort | uniq | grep -wq $item
if [[ $? -eq 0 ]]; then
node=`cat map.log | awk '{print $14, $4}' | sort | uniq | grep -w $item`
echo -e "${c_big}$node${c_by} 是域名 $domain 对应解析组 $map 中的节点,如上是该解析组所有节点列表汇总,可参考。\n${c_e}"
else
echo -e "${c_bir}[MAP-109] $item${c_biy} 不是${c_bir}域名 $domain 对应解析组 $map 中的节点,如上是该解析组所有节点列表汇总,可参考。\n${c_e}"
exit 109
fi
}
# 打印域名对应解析组所有节点信息
function labels_inmap() {
cat map.log | awk '{print $14, $4}' | sed '1d' | sort | uniq | awk '{if(NR%3==0) print " | "$0" | "; else printf "%s", " | "$0;} END{print ""}' | column -t
echo -e "${c_big}如上是域名 $domain 对应解析组 $map 所有节点列表汇总,可参考。\n${c_e}"
}
# 输出域名对应解析组的覆盖情况,可指定地区
function cover() {
# 宽度提示
width=`tput cols`
if [[ $width -lt 170 ]]; then
echo -e "${c_biy}因该选项输出的每行数据比较多需要终端宽度大于170当前终端宽度为$width,输出的界面会不整齐,是否继续(Y/n)${c_e}"
read -t 60 YON
[[ $? -ne 0 ]] && { echo -e "${c_br}[MAP-110] 60s内无任何输入退出...${c_e}\n"; exit 110; }
if [[ $YON != '' && $YON != 'y' && $YON != 'Y' && $YON != 'Yes' && $YON != 'yes' && $YON != 'YES' ]]; then
echo -e "${c_br}[MAP-111] 请调整终端宽度之后,重新运行,退出...${c_e}\n"
exit 111
fi
fi
# 将需要保留的的字段过滤出来
cat map.log | sed '1d' | awk '{print $1, $2, $3, $4, $5, $14, $16, $22, $16-$22, $23, $24}' | sort -k 2 > res.map
echo -e "${c_bib}1. 省份维度31个省市自治区 + 电信|移动|联通,例如-内蒙古电信${c_e}"
echo -e "${c_bib}2. 大区维度:东北|华北|华东|华南|华中|西北|西南 + 电信|移动|联通|长宽|铁通|广电,例如-东北移动${c_e}"
echo -e "${c_bib}3. 全国维度:中国 + 电信|移动|联通|长宽|铁通|广电|其他/香港/澳门/台湾/教育网,例如-中国移动${c_e}"
echo -e "${c_bib}4. 国际维度:日本/韩国/蒙古国/北朝鲜/澳洲/欧洲/非洲/北美洲/南美洲/中亚/西亚/南亚/东南亚/亚洲其他等${c_e}"
echo -e "${c_bib}5. 其他维度:全球/其他/其他广电/其他电信/其他移动/其他联通/其他铁通/其他长宽${c_e}"
echo -ne "${c_bg}请按照如上规则,输入查询条件:${c_e}\n"
read -t 60 query
[[ $? -ne 0 ]] && { echo -e "${c_br}[MAP-112] 60s内无任何输入退出...${c_e}\n"; exit 112; }
# 无任何输入,则默认打印所有资源
if [[ ${query} == '' ]]; then
python3 /usr/local/script/fanmf11/get_infos.py --format-global res.map
# python3 /home/fanmf11/fanmf11/get_infos.py --format-global res.map
# 否则打印指定地区的资源覆盖情况
else
cat $data/area | grep $query| awk '{print $1}' > view
python3 /usr/local/script/fanmf11/get_infos.py --format-partial $query view res.map $domain $map
# python3 /home/fanmf11/fanmf11/get_infos.py --format-partial $query view res.map $domain $map
fi
}
# 输出域名对应父解析组的覆盖情况,可指定地区
function parent() {
# 宽度提示
width=`tput cols`
if [[ $width -lt 170 ]]; then
echo -e "${c_biy}因该选项输出的每行数据比较多需要终端宽度大于170当前终端宽度为$width,输出的界面会不整齐,是否继续(Y/n)${c_e}"
read -t 60 YON
[[ $? -ne 0 ]] && { echo -e "${c_br}[MAP-113] 60s内无任何输入退出...${c_e}\n"; exit 113; }
if [[ $YON != '' && $YON != 'y' && $YON != 'Y' && $YON != 'Yes' && $YON != 'yes' && $YON != 'YES' ]]; then
echo -e "${c_br}[MAP-114] 请调整终端宽度之后,重新运行,退出...${c_e}\n"
exit 114
fi
fi
# 获取域名的父解析组
# python3 /usr/local/script/fanmf11/get_infos.py --domain_config_accid map_info.log $domain
infos --parent $domain $TS
cat cmap && echo -e "${c_bg}请选择要查看的父方案序号(e.g. 1, 2, 3...)${c_e}"
read -t 60 index
[[ $? -ne 0 ]] && { echo -e "${c_br}[MAP-115] 60s内无任何输入退出...${c_e}\n"; exit 115; }
cat cmap | grep -Eq "^$index\."
[[ $? -ne 0 ]] && { echo -e "${c_br}[MAp-116] 请输入正确的序号,退出...${c_e}\n"; exit 116; }
cmap=`cat cmap | grep -E "^$index\." | awk '{print $2}'`
getlastcover $cmap > cmap.log 2>&1
# 将需要保留的的字段过滤出来
cat cmap.log | sed '1d' | awk '{print $1, $2, $3, $4, $5, $14, $16, $22, $16-$22, $23, $24}' | sort -k 2 > res.map
echo -e "${c_bib}1. 省份维度31个省市自治区 + 电信|移动|联通,例如-内蒙古电信${c_e}"
echo -e "${c_bib}2. 大区维度:东北|华北|华东|华南|华中|西北|西南 + 电信|移动|联通|长宽|铁通|广电,例如-东北移动${c_e}"
echo -e "${c_bib}3. 全国维度:中国 + 电信|移动|联通|长宽|铁通|广电|其他/香港/澳门/台湾/教育网,例如-中国移动${c_e}"
echo -e "${c_bib}4. 国际维度:日本/韩国/蒙古国/北朝鲜/澳洲/欧洲/非洲/北美洲/南美洲/中亚/西亚/南亚/东南亚/亚洲其他等${c_e}"
echo -e "${c_bib}5. 其他维度:全球/其他/其他广电/其他电信/其他移动/其他联通/其他铁通/其他长宽${c_e}"
echo -ne "${c_bg}请按照如上规则,输入查询条件:${c_e}\n"
read -t 60 query
[[ $? -ne 0 ]] && { echo -e "${c_br}[MAP-117] 60s内无任何输入退出...${c_e}\n"; exit 117; }
# 无任何输入,则默认打印所有资源
if [[ ${query} == '' ]]; then
python3 /usr/local/script/fanmf11/get_infos.py --format-global res.map
# python3 /home/fanmf11/fanmf11/get_infos.py --format-global res.map
# 否则打印指定地区的资源覆盖情况
else
cat $data/area | grep $query| awk '{print $1}' > view
python3 /usr/local/script/fanmf11/get_infos.py --format-partial $query view res.map $domain $map
# python3 /home/fanmf11/fanmf11/get_infos.py --format-partial $query view res.map $domain $map
fi
}
function logfile() {
if [[ -d $trash ]]; then
echo -e "${c_br}[MAP-118]对于同一个用户,同一时间只能运行一个实例,请重新运行...${c_e}"
exit 118
else
mkdir -p $trash
cd $trash && cd ..
docs=`ls`
for doc in $docs; do
[[ -f $doc ]] && rm -rf $doc
done
folders=`ls -t`
while [[ `echo $folders | awk '{print NF}'` -gt 29 ]]; do
folder=`ls -t | tail -1`
rm -rf $folder
folders=`ls -t`
done
cd $trash && touch map
fi
}
# --------------------------------------------------------------------------------
# set a trap for Ctrl+C
trap 'onCtrlC' INT
# --------------------------------------------------------------------------------
# 初始化设定
stty erase '^H' # allow backspace
data='/usr/local/script/fanmf11/data' # set data directory path
toolbox='/usr/local/script/fanmf11/' # set toobbox directory path
map=''
accid=''
TS=`date +%s%N` # document the start time of the script
host=`whoami` # who use this script
trash="/usr/local/script/fanmf11/trash/$host/$TS" # set trash directory path
# --------------------------------------------------------------------------------
# 入参正确性检测
let NumOP=$# # number of parameter
OP="prefix "$@ # do a prefix cause '-' char may damage echo command
dash_d=`echo $OP | awk '{print $2}'` # get first param -d
domain=`echo $OP | awk '{print $3}'` # get second param domain
item=`echo $OP | awk '{print $4}'` # get third param item, can be vip, rip, testip, pool, cover etc.
[[ $NumOP -ne 3 || $dash_d != '-d' ]] && usage || logfile
# --------------------------------------------------------------------------------
# 检查域名是否在平台注册
res=`cat $data/domain.list | grep -w "$domain"`
[[ $res == '' ]] && { echo -e "${c_br}[MAp-119] 该域名未在天翼平台配置,一个小时内新增的域名无法查询,退出...${c_e}"; exit 119; }
# --------------------------------------------------------------------------------
# 获取域名解析组信息
infos --map $domain $TS
[[ $? -eq 205 || $? -eq 231 ]] && { echo -e "${c_br}[MAP-120] exiting...${c_e}"; exit 120; }
# cd $trash && map=`cat map.log` && getlastcover $map > map.log
cd $trash
if [[ `cat map.log | wc -l` -eq 1 ]]; then
map=`cat map.log`
else
maps=`cat map.log | sort | uniq`
count=1 && > remap.log
for map in $maps; do
echo $count": "$map | tee -a remap.log
let count=count+1
done
echo -ne "${c_bg}存在分区域解析,需确定解析组名称(默认是1)${c_e}\n"
read -t 60 imap
[[ $? -ne 0 ]] && { echo -e "${c_br}[MAP-121] 60s内无任何输入退出...${c_e}\n"; exit 121; }
# do a check to see if isp is correct or not
[[ $imap == '' ]] && let imap=1
map=`cat remap.log | awk -F ':' -v imap=$imap '$1==imap {print $2}'`
[[ $map == '' ]] && { echo -e "${c_br}[MAP-122] 请输入正确的序号,退出...${c_e}"; exit 122; }
fi
getlastcover $map > map.log
cat map.log | grep -q 'can not find sys_id'
[[ $? -eq 0 ]] && { echo -e "${c_br}[MAP-123] 该解析组未在平台配置,退出...${c_e}"; exit 123; }
# --------------------------------------------------------------------------------
# 随机获取VIP
if [[ $item == 'vip' ]]; then
random_vip
elif [[ $item == 'rip' ]]; then
random_rip
elif [[ $item =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ || $item =~ ^[0-9a-fA-F:]{11,39}$ ]]; then
ip_inmap
elif [[ $item =~ ^(ct|cu|cm|bgp|ctbgp|cmbgp|cubgp|as|eu|sa|na|cbn|cern)_[a-z]{2,3}_[a-z]{2,20}[0-9]{1,2}_(c|dns|e|lvs|m|mysql|n|prets|pretw|redis|testts|ts|tw|uatts1)[0-9]{0,2}$ ]]; then
label_inmap
elif [[ $item == 'label' ]]; then
labels_inmap
elif [[ $item == 'cover' ]]; then
cover
elif [[ $item == 'parent' ]]; then
parent
# 兜底是中文节点名的查询
else
label_inmap
fi

13
old/ctc/normalize.jq Normal file
View File

@ -0,0 +1,13 @@
# Apply f to composite entities recursively using keys[], and to atoms
def sorted_walk(f):
. as $in
| if type == "object" then
reduce keys[] as $key
( {}; . + { ($key): ($in[$key] | sorted_walk(f)) } ) | f
elif type == "array" then map( sorted_walk(f) ) | f
else f
end;
def normalize: sorted_walk(if type == "array" then sort else . end);
normalize

31
old/ctc/reformat.awk Normal file
View File

@ -0,0 +1,31 @@
#!/usr/bin/awk -f
BEGIN{
start1="\"-----BEGINCERTIFICATE-----";
start2="-----BEGINCERTIFICATE-----";
end1="-----ENDCERTIFICATE-----";
end2="-----ENDCERTIFICATE-----\",";
}
{
if($0~"https_public_content") {
printf "%s", $1;
for(i=2;i<=NF;i++) {
if($i==start1)
printf "%s", "\"-----BEGIN CERTIFICATE-----\\n";
else if($i==start2)
printf "%s", "-----BEGIN CERTIFICATE-----\\n";
else if($i==end1)
printf "%s", "-----END CERTIFICATE-----\\n";
else if($i==end2)
printf "%s", "-----END CERTIFICATE-----\",";
else if($i=="")
continue
else
printf "%s", $i"\\n"
}
}
else print $0
}

92
old/ctc/tasks.sh Normal file
View File

@ -0,0 +1,92 @@
#!/bin/bash
function isAlarm()
{
alarmDescrption=$1
alarmFile=$2
alarmDate=`date`
curl 'https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=ddea3f5f-fbfc-4c21-994a-71e9fc50e4ef' \
-H 'Content-Type: application/json' \
-d '
{
"msgtype": "markdown",
"markdown": {
"content": "**'"$alarmDescrption"'**\n
> 错误文件:<font color=\"warning\">'"$alarmFile"'生成错误,请立即查看</font>
> 告警时间:<font color=\"warning\">'"$alarmDate"'</font>"
}
} ' > /dev/null 2>&1
}
function domain_list() {
# ----------------------------------------------------------------------------------------
# 获取平台全量域名信息
let count=0
while [[ $count -lt 3 ]]; do
curl 'https://bs.ctcdn.cn/api/v3/manageDomain/export' -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjI1NH0.9Zw66R0R85avd92qzO-3KQ4DJ2zYXL4ght5bH41iTfA' -H 'content-type: application/json;charset=UTF-8' -vo $data/domain.list > $data/domain.list.response 2>&1
# 判断响应是否200
line_number=`cat $data/domain.list | wc -l`
cat $data/domain.list.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
if [[ $? -ne 0 || $line_number -lt 20000 ]]; then
count=$((count+1))
else
exit
fi
done
isAlarm '【严重】获取全量域名信息失败' 'domain.list'
exit 248
}
function renew_backup() {
cd $data
lakes_bak > $data/lakes
curl -so $data/ip.group "http://150.223.254.77:5044/download/ip.group"
# backups
cd $toolbox
[[ -d '/home/fanmf11/.backups/' ]] && rm -rf /home/fanmf11/.backups/*.tgz || mkdir '/home/fanmf11/.backups/'
cd $toolbox & bt=$(date +%Y%m%d%H%M%S)
# cp $data/lakes $data/lakes-$(date +%d)
# cp $data/ip.group $data/ip.group-$(date +%d)
tar -czf /home/fanmf11/.backups/toolbox-${bt}.tgz ./*
[[ ! -s $data/lakes ]] && isAlarm '【严重】基础文件生成错误告警' 'lakes'
[[ ! -s $data/ip.group ]] && isAlarm '【严重】基础文件生成错误告警' 'ip.group'
[[ ! -s $backups/toolbox-${bt}.tgz ]] && isAlarm '备份失败告警' "toolbox-${bt}.tgz"
}
function view_check() {
maps=`cat $data/maps`
> $data/area.new
for map in $maps; do
getlastcover $map > $map
cat $map | awk '{print $3}' | sed '1d' | sort | uniq >> $data/area.new
rm $map
done
news=`cat $data/area.new | sort | uniq`
olds=`cat $data/area | awk '{print $1}' | sort | uniq`
> $data/area.new
> $data/area.diff
for new in $news; do
[[ $new == 'find' ]] && continue
echo $new >> $data/area.new
echo $olds | grep -wq $new
[[ $? -ne 0 ]] && { isAlarm '有新的View需要添加' "$new"; echo $new >> $data/area.diff; sleep 1; }
done
}
data='/usr/local/script/fanmf11/data'
host=`whoami`
toolbox='/usr/local/script/fanmf11'
backups='/home/fanmf11/.backups'
[[ $1 == '--renew_backup' ]] && renew_backup
[[ $1 == '--domain_list' ]] && domain_list
[[ $1 == '--new_area' ]] && view_check

134
old/ctc/utool Normal file
View File

@ -0,0 +1,134 @@
#!/bin/bash
# User specific aliases and functions
alias cls='clear && ls'
# alias trash='cd /usr/local/script/fanmf11/trash/fanmf11 && ls'
alias fanmf11='cd /usr/local/script/fanmf11 && ls'
alias ..='cd ../ && ls'
alias ...='cd ../.. && ls'
alias l='ls -alh'
alias common='cat /usr/local/script/fanmf11/data/cmds'
c_br='\e[1;31m' # bold red
c_bg='\e[1;32m' # bold green
c_by='\e[1;33m' # bold yellow
c_bb='\e[1;34m' # bold blue
c_bp='\e[1;35m' # bold purple
c_bc='\e[1;36m' # bold cyan
c_bir='\e[1;3;31m' # * bold italic red
c_big='\e[1;3;32m' # bold italic cyan
c_bib='\e[1;3;34m' # * bold italic cyan
c_bip='\e[1;3;35m' # bold italic cyan
c_bic='\e[1;3;36m' # bold italic cyan
c_e='\e[0m' # reset
trash='/usr/local/script/fanmf11/trash'
function utool() {
if [[ $1 == '-a' ]]; then
# set -x
[[ $# -lt 1 ]] && { echo -e "${c_bir}[UTOOL-100] Need at least one parameter, exiting...${c_e}"; return 100; }
[[ $# -eq 1 ]] && place='fanmf11' || place="$2"
ls $trash | grep -wq $place
[[ $? -ne 0 ]] && { echo -e "${c_br}[UTOOL-101] $place用户不存在或该用户从未使用过相关工具退出...${c_e}"; return 101; }
> $trash/fanmf11/record.log
items="ips ids map infos logs config"
for item in $items; do
date_lists=`find $trash/$place -name "$item" -type f | sort | uniq | awk -F '/' '{print $8}' | sort | uniq`
for date_list in $date_lists; do
let number=${date_list:0:10}
date_fmt=`date -d@$number +'%Y-%m-%d %H:%M:%S'`
echo "$date_fmt $date_list $item" >> $trash/fanmf11/record.log
done
done
cat $trash/fanmf11/record.log | sort -nk3 | awk '{printf "%-11s%-14s%-25s%-s\n", $1, $2, $3, $4}'
# set +x
elif [[ $1 == '-b' ]]; then
cat /usr/local/script/fanmf11/data/cmds
elif [[ $1 == '-c' ]]; then
:
elif [[ $1 == '-d' ]]; then
:
elif [[ $1 == '-e' ]]; then
:
elif [[ $1 == '-f' ]]; then
:
elif [[ $1 == '-g' ]]; then
:
elif [[ $1 == '-h' ]]; then
usage
elif [[ $1 == '-i' ]]; then
:
elif [[ $1 == '-j' ]]; then
:
elif [[ $1 == '-k' ]]; then
:
elif [[ $1 == '-l' ]]; then
:
elif [[ $1 == '-m' ]]; then
:
elif [[ $1 == '-n' ]]; then
:
elif [[ $1 == '-o' ]]; then
:
elif [[ $1 == '-p' ]]; then
:
elif [[ $1 == '-q' ]]; then
:
elif [[ $1 == '-r' ]]; then
:
elif [[ $1 == '-s' ]]; then
:
elif [[ $1 == '-t' ]]; then
:
elif [[ $1 == '-u' ]]; then
:
elif [[ $1 == '-v' ]]; then
:
elif [[ $1 == '-w' ]]; then
:
elif [[ $1 == '-x' ]]; then
:
elif [[ $1 == '-y' ]]; then
:
elif [[ $1 == '-z' ]]; then
echo -e "${c_bg}直播120.39.248.231"
echo -e "全站222.187.236.6"
echo -e "全站222.187.236.7"
echo -e "点播113.62.113.33${c_e}"
else
:
fi
}
function usage() {
let col=`tput cols`
if [[ $col -lt 120 ]]; then
echo -e "\e[1;3;31mYour screen width is too small to show the usage info neatly. So make the display window maximized.\e[0m"
read -p "Press any key to continue..."
echo ''
fi
echo -e "\e[1;32mDESCRIPTION:\e[0m"
echo -e "\e[3;32mutool -- a self-defined command line interface, which is used to facilitate operating the system, supports the following options. In the description part, where there is a leading asterisk signifies that this option must take an argument.\e[0m"
echo -e "\e[1;4m \e[0m"
echo -e "\e[37;40m|\e[0m\e[1;4;37;40mOption| Description |Option| Description \e[0m\e[37;40m|\e[0m"
echo -e "\e[37;40m| -a |*find dirs of specified item in trash | -n | |\e[0m"
echo -e "\e[37;40m| -b | show some often used commands | -o | |\e[0m"
echo -e "\e[37;40m| -c | | -p | |\e[0m"
echo -e "\e[37;40m| -d | | -q | |\e[0m"
echo -e "\e[37;40m| -e | | -r | |\e[0m"
echo -e "\e[37;40m| -f | | -s | |\e[0m"
echo -e "\e[37;40m| -g | | -t | |\e[0m"
echo -e "\e[37;40m| -h | show usage info | -u | |\e[0m"
echo -e "\e[37;40m| -i | | -v | |\e[0m"
echo -e "\e[37;40m| -j | | -w | |\e[0m"
echo -e "\e[37;40m| -k | | -x | |\e[0m"
echo -e "\e[37;40m| -l | | -y | |\e[0m"
echo -e "\e[37;40m|\e[0m\e[4;37;40m -m | | -z | \e[0m\e[37;40m|\e[0m\n"
}

41
old/github_update.sh Normal file
View File

@ -0,0 +1,41 @@
#!/bin/bash
#===================================================================
# Filename : update_github.sh
# Function :
# Usage :
# Author : Manford Fan
# Date : 2022-03-26 18:46
# Version : Version 0.1
# Disclaimer : The author is NOT responsible for any loss caused
# by the user's own operations.
# And More : If you find there are some bugs in this scripts
# Or you have better ideas, please do contact me
# via E-mail -- mffan0922@163.com
#===================================================================
t=`date +%Y%m%d%H%M%S`
echo $t >> /opt/logs/github_update.log
# first try
echo -n "1-blog: "
cd /opt/source-code/blog && git pull --rebase
echo -n "1-wiki: "
cd /opt/websites/wiki && git pull --rebase
echo -n "1-nav: "
cd /opt/websites/nav && git pull --rebase
echo -n "1-homepage: "
cd /opt/websites/homepage && git pull --rebase
# check if done
echo -n "2-blog: "
cd /opt/source-code/blog && git pull --rebase
echo -n "2-wiki: "
cd /opt/websites/wiki && git pull --rebase
echo -n "2-nav: "
cd /opt/websites/nav && git pull --rebase
echo -n "2-homepage: "
cd /opt/websites/homepage && git pull --rebase
echo -e "-----------------------------------------------------------\n"

View File

@ -0,0 +1,41 @@
#!/bin/bash
#===================================================================
# Filename : update_github.sh
# Function :
# Usage :
# Author : Manford Fan
# Date : 2022-03-26 18:46
# Version : Version 0.1
# Disclaimer : The author is NOT responsible for any loss caused
# by the user's own operations.
# And More : If you find there are some bugs in this scripts
# Or you have better ideas, please do contact me
# via E-mail -- mffan0922@163.com
#===================================================================
t=`date +%Y%m%d%H%M%S`
echo $t >> /opt/logs/github_update.log
# first try
# echo -n "1-blog: "
# cd /opt/source-code/blog && git pull --rebase
# echo -n "1-wiki: "
# cd /opt/websites/wiki && git pull --rebase
# echo -n "1-nav: "
# cd /opt/websites/nav && git pull --rebase
echo -n "1-homepage: "
cd /opt/websites/homepage && git pull --rebase
# check if done
# echo -n "2-blog: "
# cd /opt/source-code/blog && git pull --rebase
# echo -n "2-wiki: "
# cd /opt/websites/wiki && git pull --rebase
# echo -n "2-nav: "
# cd /opt/websites/nav && git pull --rebase
echo -n "2-homepage: "
cd /opt/websites/homepage && git pull --rebase
echo -e "-----------------------------------------------------------\n"

17
old/jekyll_blog_update.sh Normal file
View File

@ -0,0 +1,17 @@
#!/bin/bash
inotifywait -mrq -e create,delete,move,close_write /opt/source-code/blog --exclude '^.*/avatar.jpg|^.*/\.git' | while read directory action filename; do
echo ====================================================
echo `date`
echo $directory$filename $action
rm -rf /opt/websites/blog
let numOfAvatar=`ls /opt/websites/nav/assets/images/logos/ | wc -l`
let randNumber=$RANDOM%$numOfAvatar
cp /opt/websites/nav/assets/images/logos/${randNumber}.jpg /opt/websites/homepage/assets/img/logo.jpg -rf
cp /opt/websites/nav/assets/images/logos/${randNumber}.jpg /opt/websites/nav/assets/images/logos/avatar.jpg -rf
cp /opt/websites/nav/assets/images/logos/${randNumber}.jpg /opt/source-code/blog/img/avatar.jpg -rf
jekyll b -s /opt/source-code/blog/ -d /opt/websites/blog/
echo -e '\n'
done

View File

@ -0,0 +1,13 @@
#!/bin/bash
inotifywait -mrq -e create,delete,move,close_write /opt/source-code/document/python | while read directory action filename; do
echo ====================================================
echo `date`
echo $directory$filename $action
rm -rf /opt/websites/just-the-docs/python
jekyll b -s /opt/source-code/document/python -d /opt/websites/just-the-docs/python
echo -e '\n'
done

36
old/koel_update.sh Normal file
View File

@ -0,0 +1,36 @@
#!/bin/bash
#===================================================================
# Filename : koel_update.sh
# Function :
# Usage :
# Author : Manford Fan
# Date : 2022-10-15 23:34
# Version : Version 0.1
# Disclaimer : The author is NOT responsible for any loss caused
# by the user's own operations.
# And More : If you find there are some bugs in this script
# Or you have better ideas, please do contact me
# via E-mail -- mffan0922@163.com
#===================================================================
[[ ! -e /tmp/files_now ]] && touch /tmp/files_now
[[ ! -e /tmp/files_pre_60s ]] && touch /tmp/files_pre_60s
success_flg=1
ls -aR /opt/media/Music | grep -E "*.(mp3|flac|opus|aac|ogg|m4a)" | sort > /tmp/files_now
diff /tmp/files_now /tmp/files_pre_60s >> /opt/logs/koel_update.log
if [[ $? -ne 0 ]]; then
chown -R www-data:www-data /opt/media/Music
for i in `seq 10`; do
php /opt/source-code/koel/artisan koel:sync > /dev/null
if [[ $? -eq 0 ]]; then
php /opt/source-code/koel/artisan koel:sync >> /opt/logs/koel_update.log
success_flg=0
break
fi
sleep 2
done
[[ success_flg -eq 1 ]] && echo "Happening @ $(date) Failed scanning the media dir, need processing that by hand." >> /opt/logs/koel_update.log
echo -e "Happening @ $(date) Sync koel music successfully." >> /opt/logs/koel_update.log
fi
cp /tmp/files_now /tmp/files_pre_60s

14
old/nav_jpg.sh Normal file
View File

@ -0,0 +1,14 @@
#!/bin/bash
html='/opt/websites/nav/index.html'
jpg_max_num=`cat $html | grep -oE "[0-9]+\.jpg" | awk -F '.' '{print $1}' | sort -n | tail -n 1`
line_num=`cat $html | grep -oE "[0-9]+\.jpg" | awk -F '.' '{print $1}' | sort -n | uniq -c | wc -l`
jpg_all_num=`ls -al /opt/websites/nav/assets/images/logos/ | wc -l`
if [[ $((jpg_max_num+1)) -ne $line_num ]]; then
echo -e "\e[1;31mThere must be duplicated jpg files, plz check!\e[0m"
return 2
fi
echo "Now: $jpg_max_num | MAX: $jpg_all_num | AVAILABLE: $((jpg_all_num-jpg_max_num)) | NEXT: $((jpg_max_num+1))"

View File

@ -0,0 +1,116 @@
#!/bin/bash
#===================================================================
# Filename : rclone_alist_automount.sh
# Function :
# Usage :
# Author : Manford Fan
# Date : 2022-10-19 14:05
# Version : Version 0.1
# Disclaimer : The author is NOT responsible for any loss caused
# by the user's own operations.
# And More : If you find there are some bugs in this script
# Or you have better ideas, please do contact me
# via E-mail -- mffan0922@163.com
#===================================================================
function rclone_alist_reset() {
systemctl restart alist.service
for i in `seq 3`; do
fusermount -uzq /opt/webdav/alist > /dev/null 2>&1
umount /opt/webdav/alist > /dev/null 2>&1
sleep 2
done
ps -ef | grep 'rclone mount Alist' | grep -v grep | awk '{print $2}' > /tmp/rclone/rclone_alist_pids
for rclone_alist_pid in `cat /tmp/rclone/rclone_alist_pids`; do
kill -9 $rclone_alist_pid;
done
nohup /usr/bin/rclone mount Alist:/ /opt/webdav/alist \
--allow-other --vfs-cache-mode full --vfs-cache-max-size 10G --vfs-read-ahead 100M --dir-cache-time 2m --poll-interval 0 \
--vfs-cache-max-age 4h --cache-dir /tmp/vfs-cache --bwlimit-file 20M --bwlimit 100M --no-update-modtime --contimeout 30m \
--log-file /opt/logs/rclone/rclone_alist.log --log-level NOTICE --vfs-read-chunk-size 100M --vfs-read-chunk-size-limit 500M \
--buffer-size 200M --attr-timeout 5m --transfers=6 --multi-thread-streams=6 --fast-list --timeout 30m \
--allow-non-empty --no-modtime --max-duration 3h --vfs-read-wait 2s --vfs-write-wait 5s > /dev/null 2>&1 &
}
alist_log='/opt/logs/rclone/rclone_alist.log'
pid_self=$$
# get all kinds of states for later decision
num=`cat /proc/mounts | grep /opt/webdav/alist | wc -l`
[[ $num -eq 0 ]] && loaded=0
[[ $num -eq 1 ]] && loaded=1
[[ $num -gt 1 ]] && loaded=2
ps -ef | grep '/usr/bin/rclone mount Alist' | grep -v grep | awk '{print $2}' > /tmp/rclone/rclone_alist_pids
num=`cat /tmp/rclone/rclone_alist_pids | wc -l`
[[ $num -eq 0 ]] && rclone_running=0
[[ $num -eq 1 ]] && rclone_running=1
[[ $num -gt 1 ]] && rclone_running=2
sleep 2
ps -ef | grep '/usr/bin/bash /opt/scripts/rclone/rclone_alist_automount.sh' | grep -v 'grep' > /tmp/rclone/rclone_alist_automount_pids
let num=`cat /tmp/rclone/rclone_alist_automount_pids | sed -e '/^$/d' | wc -l`
[[ $num -eq 1 ]] && script_running=1
[[ $num -gt 1 ]] && script_running=2
# print the states for debug
echo `date` >> $alist_log
echo loaded = $loaded >> $alist_log
echo rclone_running = $rclone_running >> $alist_log
echo script_running = $script_running >> $alist_log
# exit 5
# decide if `rclone` command function normally
if [[ $1 == '-f' ]]; then
echo -e "Happening @ $(date) [Alist] Executing BY Hands.\n" >> $alist_log
if [[ $script_running -eq 1 ]]; then
rclone_alist_reset
elif [[ script_running -eq 2 ]]; then
echo "Happening @ $(date) [Alist] Script rclone_alist_automount.sh -f has already been executing..." | tee -a $alist_log
echo "Happening @ $(date) [Alist] Alist RESET will be done with -f option" | tee -a $alist_log
for rclone_alist_automount_pid in `cat /tmp/rclone/rclone_alist_automount_pids`; do
[[ $rclone_alist_automount_pid != $pid_self ]] && kill -9 $rclone_alist_automount_pid > /dev/null 2>&1
done
rclone_alist_reset
else
echo "Happening @ $(date) [Alist] In general, this -f case will NOT happen" >> $alist_log
fi
elif [[ $1 == '-c' ]]; then
echo -e "Happening @ $(date) [Alist] Executing BY Cron Service.\n" >> $alist_log
if [[ $script_running -eq 1 ]]; then
rclone_alist_reset
elif [[ script_running -eq 2 ]]; then
echo "Happening @ $(date) [Alist] Script rclone_alist_automount.sh -c has already been executing..." | tee -a $alist_log
echo "Happening @ $(date) [Alist] Alist RESET will be done on CRON condition." | tee -a $alist_log
for rclone_alist_automount_pid in `cat /tmp/rclone/rclone_alist_automount_pids`; do
[[ $rclone_alist_automount_pid != $pid_self ]] && kill -9 $rclone_alist_automount_pid > /dev/null 2>&1
done
rclone_alist_reset
else
echo "Happening @ $(date) [Alist] In general, this -c case will NOT happen" >> $alist_log
fi
elif [[ $1 == '' ]]; then
sleep 10
if [[ script_running -eq 1 ]]; then
if [[ $loaded -ne 1 || $rclone_running -ne 1 ]]; then
echo "Happening @ $(date) [Alist] Executing automatically." >> $alist_log
rclone_alist_reset
fi
elif [[ $script_running -eq 2 ]]; then
echo "Happening @ $(date) [Alist] Script rclone_alist_automount.sh auto has already been executing..." | tee -a $alist_log
echo "Happening @ $(date) [Alist] Nothing will be done at this auto-situation" | tee -a $alist_log
# for rclone_alist_automount_pid in `cat /tmp/rclone_alist_automount_pids`; do
# [[ $rclone_alist_automount_pid != $pid_self ]] && kill -9 $rclone_alist_automount_pid > /dev/null 2>&1
# done
# rclone_alist_reset
else
echo "Happening @ $(date) [Alist] In general, this auto case will NOT happen" >> $alist_log
fi
else
echo "Happening @ $(date) [Alist] Wrong usage of script/tool, only accept -f | -c or nothing as argument." >> $alist_log
fi

View File

@ -0,0 +1,342 @@
#!/bin/bash
#===================================================================
# Filename : rclone_bash_completion.sh
# Function :
# Usage :
# Author : Manford Fan
# Date : 2022-10-27 10:04
# Version : Version 0.1
# Disclaimer : The author is NOT responsible for any loss caused
# by the user's own operations.
# And More : If you find there are some bugs in this script
# Or you have better ideas, please do contact me
# via E-mail -- mffan0922@163.com
#===================================================================
# bash completion V2 for rclone -*- shell-script -*-
__rclone_debug()
{
if [[ -n ${BASH_COMP_DEBUG_FILE:-} ]]; then
echo "$*" >> "${BASH_COMP_DEBUG_FILE}"
fi
}
# Macs have bash3 for which the bash-completion package doesn't include
# _init_completion. This is a minimal version of that function.
__rclone_init_completion()
{
COMPREPLY=()
_get_comp_words_by_ref "$@" cur prev words cword
}
# This function calls the rclone program to obtain the completion
# results and the directive. It fills the 'out' and 'directive' vars.
__rclone_get_completion_results() {
local requestComp lastParam lastChar args
# Prepare the command to request completions for the program.
# Calling ${words[0]} instead of directly rclone allows to handle aliases
args=("${words[@]:1}")
requestComp="${words[0]} __complete ${args[*]}"
lastParam=${words[$((${#words[@]}-1))]}
lastChar=${lastParam:$((${#lastParam}-1)):1}
__rclone_debug "lastParam ${lastParam}, lastChar ${lastChar}"
if [ -z "${cur}" ] && [ "${lastChar}" != "=" ]; then
# If the last parameter is complete (there is a space following it)
# We add an extra empty parameter so we can indicate this to the go method.
__rclone_debug "Adding extra empty parameter"
requestComp="${requestComp} ''"
fi
# When completing a flag with an = (e.g., rclone -n=<TAB>)
# bash focuses on the part after the =, so we need to remove
# the flag part from $cur
if [[ "${cur}" == -*=* ]]; then
cur="${cur#*=}"
fi
__rclone_debug "Calling ${requestComp}"
# Use eval to handle any environment variables and such
out=$(eval "${requestComp}" 2>/dev/null)
# Extract the directive integer at the very end of the output following a colon (:)
directive=${out##*:}
# Remove the directive
out=${out%:*}
if [ "${directive}" = "${out}" ]; then
# There is not directive specified
directive=0
fi
__rclone_debug "The completion directive is: ${directive}"
__rclone_debug "The completions are: ${out}"
}
__rclone_process_completion_results() {
local shellCompDirectiveError=1
local shellCompDirectiveNoSpace=2
local shellCompDirectiveNoFileComp=4
local shellCompDirectiveFilterFileExt=8
local shellCompDirectiveFilterDirs=16
if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then
# Error code. No completion.
__rclone_debug "Received error from custom completion go code"
return
else
if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then
if [[ $(type -t compopt) = "builtin" ]]; then
__rclone_debug "Activating no space"
compopt -o nospace
else
__rclone_debug "No space directive not supported in this version of bash"
fi
fi
if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then
if [[ $(type -t compopt) = "builtin" ]]; then
__rclone_debug "Activating no file completion"
compopt +o default
else
__rclone_debug "No file completion directive not supported in this version of bash"
fi
fi
fi
# Separate activeHelp from normal completions
local completions=()
local activeHelp=()
__rclone_extract_activeHelp
if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then
# File extension filtering
local fullFilter filter filteringCmd
# Do not use quotes around the $completions variable or else newline
# characters will be kept.
for filter in ${completions[*]}; do
fullFilter+="$filter|"
done
filteringCmd="_filedir $fullFilter"
__rclone_debug "File filtering command: $filteringCmd"
$filteringCmd
elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then
# File completion for directories only
# Use printf to strip any trailing newline
local subdir
subdir=$(printf "%s" "${completions[0]}")
if [ -n "$subdir" ]; then
__rclone_debug "Listing directories in $subdir"
pushd "$subdir" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return
else
__rclone_debug "Listing directories in ."
_filedir -d
fi
else
__rclone_handle_completion_types
fi
__rclone_handle_special_char "$cur" :
__rclone_handle_special_char "$cur" =
# Print the activeHelp statements before we finish
if [ ${#activeHelp} -ne 0 ]; then
printf "\n";
printf "%s\n" "${activeHelp[@]}"
printf "\n"
# The prompt format is only available from bash 4.4.
# We test if it is available before using it.
if (x=${PS1@P}) 2> /dev/null; then
printf "%s" "${PS1@P}${COMP_LINE[@]}"
else
# Can't print the prompt. Just print the
# text the user had typed, it is workable enough.
printf "%s" "${COMP_LINE[@]}"
fi
fi
}
# Separate activeHelp lines from real completions.
# Fills the $activeHelp and $completions arrays.
__rclone_extract_activeHelp() {
local activeHelpMarker="_activeHelp_ "
local endIndex=${#activeHelpMarker}
while IFS='' read -r comp; do
if [ "${comp:0:endIndex}" = "$activeHelpMarker" ]; then
comp=${comp:endIndex}
__rclone_debug "ActiveHelp found: $comp"
if [ -n "$comp" ]; then
activeHelp+=("$comp")
fi
else
# Not an activeHelp line but a normal completion
completions+=("$comp")
fi
done < <(printf "%s\n" "${out}")
}
__rclone_handle_completion_types() {
__rclone_debug "__rclone_handle_completion_types: COMP_TYPE is $COMP_TYPE"
case $COMP_TYPE in
37|42)
# Type: menu-complete/menu-complete-backward and insert-completions
# If the user requested inserting one completion at a time, or all
# completions at once on the command-line we must remove the descriptions.
# https://github.com/spf13/cobra/issues/1508
local tab=$'\t' comp
while IFS='' read -r comp; do
[[ -z $comp ]] && continue
# Strip any description
comp=${comp%%$tab*}
# Only consider the completions that match
if [[ $comp == "$cur"* ]]; then
COMPREPLY+=("$comp")
fi
done < <(printf "%s\n" "${completions[@]}")
;;
*)
# Type: complete (normal completion)
__rclone_handle_standard_completion_case
;;
esac
}
__rclone_handle_standard_completion_case() {
local tab=$'\t' comp
# Short circuit to optimize if we don't have descriptions
if [[ "${completions[*]}" != *$tab* ]]; then
IFS=$'\n' read -ra COMPREPLY -d '' < <(compgen -W "${completions[*]}" -- "$cur")
return 0
fi
local longest=0
local compline
# Look for the longest completion so that we can format things nicely
while IFS='' read -r compline; do
[[ -z $compline ]] && continue
# Strip any description before checking the length
comp=${compline%%$tab*}
# Only consider the completions that match
[[ $comp == "$cur"* ]] || continue
COMPREPLY+=("$compline")
if ((${#comp}>longest)); then
longest=${#comp}
fi
done < <(printf "%s\n" "${completions[@]}")
# If there is a single completion left, remove the description text
if [ ${#COMPREPLY[*]} -eq 1 ]; then
__rclone_debug "COMPREPLY[0]: ${COMPREPLY[0]}"
comp="${COMPREPLY[0]%%$tab*}"
__rclone_debug "Removed description from single completion, which is now: ${comp}"
COMPREPLY[0]=$comp
else # Format the descriptions
__rclone_format_comp_descriptions $longest
fi
}
__rclone_handle_special_char()
{
local comp="$1"
local char=$2
if [[ "$comp" == *${char}* && "$COMP_WORDBREAKS" == *${char}* ]]; then
local word=${comp%"${comp##*${char}}"}
local idx=${#COMPREPLY[*]}
while [[ $((--idx)) -ge 0 ]]; do
COMPREPLY[$idx]=${COMPREPLY[$idx]#"$word"}
done
fi
}
__rclone_format_comp_descriptions()
{
local tab=$'\t'
local comp desc maxdesclength
local longest=$1
local i ci
for ci in ${!COMPREPLY[*]}; do
comp=${COMPREPLY[ci]}
# Properly format the description string which follows a tab character if there is one
if [[ "$comp" == *$tab* ]]; then
__rclone_debug "Original comp: $comp"
desc=${comp#*$tab}
comp=${comp%%$tab*}
# $COLUMNS stores the current shell width.
# Remove an extra 4 because we add 2 spaces and 2 parentheses.
maxdesclength=$(( COLUMNS - longest - 4 ))
# Make sure we can fit a description of at least 8 characters
# if we are to align the descriptions.
if [[ $maxdesclength -gt 8 ]]; then
# Add the proper number of spaces to align the descriptions
for ((i = ${#comp} ; i < longest ; i++)); do
comp+=" "
done
else
# Don't pad the descriptions so we can fit more text after the completion
maxdesclength=$(( COLUMNS - ${#comp} - 4 ))
fi
# If there is enough space for any description text,
# truncate the descriptions that are too long for the shell width
if [ $maxdesclength -gt 0 ]; then
if [ ${#desc} -gt $maxdesclength ]; then
desc=${desc:0:$(( maxdesclength - 1 ))}
desc+="…"
fi
comp+=" ($desc)"
fi
COMPREPLY[ci]=$comp
__rclone_debug "Final comp: $comp"
fi
done
}
__start_rclone()
{
local cur prev words cword split
COMPREPLY=()
# Call _init_completion from the bash-completion package
# to prepare the arguments properly
if declare -F _init_completion >/dev/null 2>&1; then
_init_completion -n "=:" || return
else
__rclone_init_completion -n "=:" || return
fi
__rclone_debug
__rclone_debug "========= starting completion logic =========="
__rclone_debug "cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}, cword is $cword"
# The user could have moved the cursor backwards on the command-line.
# We need to trigger completion from the $cword location, so we need
# to truncate the command-line ($words) up to the $cword location.
words=("${words[@]:0:$cword+1}")
__rclone_debug "Truncated words[*]: ${words[*]},"
local out directive
__rclone_get_completion_results
__rclone_process_completion_results
}
if [[ $(type -t compopt) = "builtin" ]]; then
complete -o default -F __start_rclone rclone
else
complete -o default -o nospace -F __start_rclone rclone
fi
# ex: ts=4 sw=4 et filetype=sh

View File

@ -0,0 +1,119 @@
#!/bin/bash
#===================================================================
# Filename : rclone_cloudreve_automount.sh
# Function :
# Usage :
# Author : Manford Fan
# Date : 2022-10-19 14:05
# Version : Version 0.1
# Disclaimer : The author is NOT responsible for any loss caused
# by the user's own operations.
# And More : If you find there are some bugs in this script
# Or you have better ideas, please do contact me
# via E-mail -- mffan0922@163.com
#===================================================================
function rclone_cloudreve_reset() {
for i in `seq 3`; do
fusermount -uzq /opt/webdav/cloudreve > /dev/null 2>&1
umount /opt/webdav/cloudreve > /dev/null 2>&1
sleep 2
done
ps -ef | grep 'rclone mount Cloudreve' | grep -v grep | awk '{print $2}' > /tmp/rclone/rclone_cloudreve_pids
for rclone_cloudreve_pid in `cat /tmp/rclone/rclone_cloudreve_pids`; do
kill -9 $rclone_cloudreve_pid;
done
nohup /usr/bin/rclone mount Cloudreve:/ /opt/webdav/cloudreve \
--allow-other --vfs-cache-mode full --vfs-cache-max-size 10G --vfs-read-ahead 100M --dir-cache-time 2m\
--vfs-cache-max-age 4h --cache-dir /tmp/vfs-cache --bwlimit-file 20M --bwlimit 100M --no-update-modtime \
--log-file /opt/logs/rclone/rclone_cloudreve.log --log-level NOTICE --vfs-read-chunk-size 100M --vfs-read-chunk-size-limit 500M \
--buffer-size 200M --attr-timeout 5m --transfers=6 --multi-thread-streams=6 --fast-list \
--allow-non-empty --no-modtime --max-duration 3h --vfs-read-wait 2s --vfs-write-wait 5s > /dev/null 2>&1 &
}
cloudreve_log='/opt/logs/rclone/rclone_cloudreve.log'
pid_self=$$
# get all kinds of states for later decision
num=`cat /proc/mounts | grep /opt/webdav/cloudreve | wc -l`
[[ $num -eq 0 ]] && loaded=0
[[ $num -eq 1 ]] && loaded=1
[[ $num -gt 1 ]] && loaded=2
ps -ef | grep '/usr/bin/rclone mount Cloudreve' | grep -v grep | awk '{print $2}' > /tmp/rclone/rclone_cloudreve_pids
num=`cat /tmp/rclone/rclone_cloudreve_pids | wc -l`
[[ $num -eq 0 ]] && rclone_running=0
[[ $num -eq 1 ]] && rclone_running=1
[[ $num -gt 1 ]] && rclone_running=2
sleep 2
ps -ef | grep '/usr/bin/bash /opt/scripts/rclone/rclone_cloudreve_automount.sh' | grep -v 'grep' > /tmp/rclone/rclone_cloudreve_automount_pids
let num=`cat /tmp/rclone/rclone_cloudreve_automount_pids | sed -e '/^$/d' | wc -l`
[[ $num -eq 1 ]] && script_running=1
if [[ $num -gt 1 ]]; then
script_running=2
echo `date` >> /tmp/rclone/rclone_cloudreve_abnormal.log
ps -ef | grep '/usr/bin/bash /opt/scripts/rclone/rclone_cloudreve_automount.sh' | grep -v 'grep' >> /tmp/rclone/rclone_cloudreve_abnormal.log
fi
# print the states for debug
echo `date` >> $cloudreve_log
echo loaded = $loaded >> $cloudreve_log
echo rclone_running = $rclone_running >> $cloudreve_log
echo script_running = $script_running >> $cloudreve_log
# exit 5
# decide if `rclone` command function normally
if [[ $1 == '-f' ]]; then
echo -e "Happening @ $(date) [Cloudreve] Executing BY Hands.\n" >> $cloudreve_log
if [[ $script_running -eq 1 ]]; then
rclone_cloudreve_reset
elif [[ script_running -eq 2 ]]; then
echo "Happening @ $(date) [Cloudreve] Script rclone_cloudreve_automount.sh -f has already been executing..." | tee -a $cloudreve_log
echo "Happening @ $(date) [Cloudreve] Cloudreve RESET will be done with -f option" | tee -a $cloudreve_log
for rclone_cloudreve_automount_pid in `cat /tmp/rclone/rclone_cloudreve_automount_pids`; do
[[ $rclone_cloudreve_automount_pid != $pid_self ]] && kill -9 $rclone_cloudreve_automount_pid > /dev/null 2>&1
done
rclone_cloudreve_reset
else
echo "Happening @ $(date) [Cloudreve] In general, this case will NOT happen" >> $cloudreve_log
fi
elif [[ $1 == '-c' ]]; then
echo -e "Happening @ $(date) [Cloudreve] Executing BY Cron Service.\n" >> $cloudreve_log
if [[ $script_running -eq 1 ]]; then
rclone_cloudreve_reset
elif [[ script_running -eq 2 ]]; then
echo "Happening @ $(date) [Cloudreve] Script rclone_cloudreve_automount.sh -c has already been executing..." | tee -a $cloudreve_log
echo "Happening @ $(date) [Cloudreve] Cloudreve RESET will be done on CRON condition." | tee -a $cloudreve_log
for rclone_cloudreve_automount_pid in `cat /tmp/rclone/rclone_cloudreve_automount_pids`; do
[[ $rclone_cloudreve_automount_pid != $pid_self ]] && kill -9 $rclone_cloudreve_automount_pid > /dev/null 2>&1
done
rclone_cloudreve_reset
else
echo "Happening @ $(date) [Cloudreve] In general, this case will NOT happen" >> $cloudreve_log
fi
elif [[ $1 == '' ]]; then
sleep 10
if [[ script_running -eq 1 ]]; then
if [[ $loaded -ne 1 || $rclone_running -ne 1 ]]; then
echo "Happening @ $(date) [Cloudreve] Executing automatically." >> $cloudreve_log
rclone_cloudreve_reset
fi
elif [[ $script_running -eq 2 ]]; then
echo "Happening @ $(date) [Cloudreve] Script rclone_cloudreve_automount.sh auto has already been executing..." | tee -a $cloudreve_log
echo "Happening @ $(date) [Cloudreve] Nothing will be done at this auto-situation" | tee -a $cloudreve_log
# for rclone_cloudreve_automount_pid in `cat /tmp/rclone_cloudreve_automount_pids`; do
# [[ $rclone_cloudreve_automount_pid != $pid_self ]] && kill -9 $rclone_cloudreve_automount_pid > /dev/null 2>&1
# done
# rclone_cloudreve_reset
else
echo "Happening @ $(date) [Cloudreve] In general, this case will NOT happen" >> $cloudreve_log
fi
else
echo "Happening @ $(date) [Cloudreve] Wrong usage of script/tool, only accept -f | -c or nothing as argument." >> $cloudreve_log
fi

View File

@ -0,0 +1,116 @@
#!/bin/bash
#===================================================================
# Filename : rclone_onedrive_automount.sh
# Function :
# Usage :
# Author : Manford Fan
# Date : 2022-10-19 14:05
# Version : Version 0.1
# Disclaimer : The author is NOT responsible for any loss caused
# by the user's own operations.
# And More : If you find there are some bugs in this script
# Or you have better ideas, please do contact me
# via E-mail -- mffan0922@163.com
#===================================================================
function rclone_onedrive_reset() {
for i in `seq 3`; do
fusermount -uzq /opt/webdav/onedrive > /dev/null 2>&1
umount /opt/webdav/onedrive > /dev/null 2>&1
sleep 2
done
ps -ef | grep 'rclone mount Onedrive' | grep -v grep | awk '{print $2}' > /tmp/rclone/rclone_onedrive_pids
for rclone_onedrive_pid in `cat /tmp/rclone/rclone_onedrive_pids`; do
kill -9 $rclone_onedrive_pid;
done
nohup /usr/bin/rclone mount Onedrive:/ /opt/webdav/onedrive \
--allow-other --vfs-cache-mode full --vfs-cache-max-size 10G --vfs-read-ahead 100M --dir-cache-time 2m --poll-interval 0 \
--vfs-cache-max-age 4h --cache-dir /tmp/vfs-cache --bwlimit-file 20M --bwlimit 100M --no-update-modtime --contimeout 30m \
--log-file /opt/logs/rclone/rclone_onedrive.log --log-level NOTICE --vfs-read-chunk-size 100M --vfs-read-chunk-size-limit 500M \
--buffer-size 200M --attr-timeout 5m --transfers=6 --multi-thread-streams=6 --fast-list --timeout 30m \
--allow-non-empty --no-modtime --max-duration 3h --vfs-read-wait 2s --vfs-write-wait 5s > /dev/null 2>&1 &
}
onedrive_log='/opt/logs/rclone/rclone_onedrive.log'
pid_self=$$
# get all kinds of states for later decision
num=`cat /proc/mounts | grep /opt/webdav/onedrive | wc -l`
[[ $num -eq 0 ]] && loaded=0
[[ $num -eq 1 ]] && loaded=1
[[ $num -gt 1 ]] && loaded=2
ps -ef | grep '/usr/bin/rclone mount Onedrive' | grep -v grep | awk '{print $2}' > /tmp/rclone/rclone_onedrive_pids
num=`cat /tmp/rclone/rclone_onedrive_pids | wc -l`
[[ $num -eq 0 ]] && rclone_running=0
[[ $num -eq 1 ]] && rclone_running=1
[[ $num -gt 1 ]] && rclone_running=2
sleep 2
ps -ef | grep '/usr/bin/bash /opt/scripts/rclone/rclone_onedrive_automount.sh' | grep -v 'grep' > /tmp/rclone/rclone_onedrive_automount_pids
let num=`cat /tmp/rclone/rclone_onedrive_automount_pids | sed -e '/^$/d' | wc -l`
[[ $num -eq 1 ]] && script_running=1
[[ $num -gt 1 ]] && script_running=2
# print the states for debug
echo `date` >> $onedrive_log
echo loaded = $loaded >> $onedrive_log
echo rclone_running = $rclone_running >> $onedrive_log
echo script_running = $script_running >> $onedrive_log
# exit 5
# decide if `rclone` command function normally
if [[ $1 == '-f' ]]; then
echo -e "Happening @ $(date) [Onedrive] Executing BY Hands.\n" >> $onedrive_log
if [[ $script_running -eq 1 ]]; then
rclone_onedrive_reset
elif [[ script_running -eq 2 ]]; then
echo "Happening @ $(date) [Onedrive] Script rclone_onedrive_automount.sh -f has already been executing..." | tee -a $onedrive_log
echo "Happening @ $(date) [Onedrive] Onedrive RESET will be done with -f option" | tee -a $onedrive_log
for rclone_onedrive_automount_pid in `cat /tmp/rclone/rclone_onedrive_automount_pids`; do
[[ $rclone_onedrive_automount_pid != $pid_self ]] && kill -9 $rclone_onedrive_automount_pid > /dev/null 2>&1
done
rclone_onedrive_reset
else
echo "Happening @ $(date) [Onedrive] In general, this case will NOT happen" >> $onedrive_log
fi
elif [[ $1 == '-c' ]]; then
echo -e "Happening @ $(date) [Onedrive] Executing BY Cron Service.\n" >> $onedrive_log
if [[ $script_running -eq 1 ]]; then
rclone_onedrive_reset
elif [[ script_running -eq 2 ]]; then
echo "Happening @ $(date) [Onedrive] Script rclone_onedrive_automount.sh -c has already been executing..." | tee -a $onedrive_log
echo "Happening @ $(date) [Onedrive] Onedrive RESET will be done on CRON condition." | tee -a $onedrive_log
for rclone_onedrive_automount_pid in `cat /tmp/rclone/rclone_onedrive_automount_pids`; do
[[ $rclone_onedrive_automount_pid != $pid_self ]] && kill -9 $rclone_onedrive_automount_pid > /dev/null 2>&1
done
rclone_onedrive_reset
else
echo "Happening @ $(date) [Onedrive] In general, this case will NOT happen" >> $onedrive_log
fi
elif [[ $1 == '' ]]; then
sleep 10
if [[ script_running -eq 1 ]]; then
if [[ $loaded -ne 1 || $rclone_running -ne 1 ]]; then
echo "Happening @ $(date) [Onedrive] Executing automatically." >> $onedrive_log
rclone_onedrive_reset
fi
elif [[ $script_running -eq 2 ]]; then
echo "Happening @ $(date) [Onedrive] script rclone_onedrive_automount.sh auto has already been executing..." | tee -a $onedrive_log
echo "Happening @ $(date) [Onedrive] Nothing will be done at this auto-situation" | tee -a $onedrive_log
echo "Nothing will be done at this situation" | tee -a $onedrive_log
# for rclone_onedrive_automount_pid in `cat /tmp/rclone_onedrive_automount_pids`; do
# [[ $rclone_onedrive_automount_pid != $pid_self ]] && kill -9 $rclone_onedrive_automount_pid > /dev/null 2>&1
# done
# rclone_onedrive_reset
else
echo "Happening @ $(date) [Onedrive] In general, this case will NOT happen" >> $onedrive_log
fi
else
echo "Happening @ $(date) [Onedrive] Wrong usage of script/tool, only accept -f | -c or nothing as argument." >> $onedrive_log
fi

21
old/rclone/rclone_sync.sh Normal file
View File

@ -0,0 +1,21 @@
#!/bin/bash
#===================================================================
# Filename : auto_start_self.sh
# Function :
# Usage :
# Author : Manford Fan
# Date : 2022-04-12 09:50
# Version : Version 0.1
# Disclaimer : The author is NOT responsible for any loss caused
# by the user's own operations.
# And More : If you find there are some bugs in this script
# Or you have better ideas, please do contact me
# via E-mail -- mffan0922@163.com
#===================================================================
rclone sync -P /opt/media/Kindle/ Onedrive:/A-Book/Kindle/
rclone sync -P /opt/media/Music/ Onedrive:/B-Media/Music/Koel/
rclone sync -P Onedrive:/ /opt/webdav/wd/72-Backups/Onedrive/ --exclude=/E-Github/**

330
old/restore.sh Normal file
View File

@ -0,0 +1,330 @@
#!/bin/bash
# ===========================================================================
# This script must be executed by root privilege
if [[ $(id -u) -ne 0 ]]; then
echo -e "\e[1;31mThis script MUST be executed with root privilege.\e[0m\n"
exit 1
fi
# ===========================================================================
# Double check if do run this script
echo -e "\e[1;2;31m[VPS USE ONLY] - Are you sure you want to run this script to re-configure you system???\e[0m"
read -p "Yes/No: " YON
[[ $YON != 'Yes' && $YON != 'YES' && $YON != 'yes' && $YON != 'y' && $YON != 'Y' ]] && exit 2
echo -e "\e[1;2;33m[VPS USE ONLY] - AGAIN, are you sure you want to run this script to re-configure you system???\e[0m"
read -p "Yes/No: " YON
[[ $YON != 'Yes' && $YON != 'YES' && $YON != 'yes' && $YON != 'y' && $YON != 'Y' ]] && exit 3
# ===========================================================================
# extract backup files
vps=`find . -name vps*.tar.xz`
if [[ ! -f flg && $vps != '' ]]; then
echo -e "\n\e[1;34mExtracting backups to current dir...\e[0m\n"
tar -I pixz -xmf vps*.xz
touch flg
elif [[ -f flg ]]; then
echo -e "\n\e[1;32mAlready extracted, doing nothing.\e[0m\n"
else
echo -e "\n\e[1;31mThere is no backup file right here, plz check.\e[0m\n"
exit 4
fi
# ===========================================================================
# sone prerequisites in aspect of path & content
echo -e "\n\e[1;34mPreparing initial env...\e[0m\n"
rm -rf /opt/*
mkdir -p /opt/logs
mkdir -p /opt/logs/rclone
mkdir -p /opt/temp
mkdir -p /opt/webdav/{alist,onedrive,wd}
mkdir -p /root/.pip
cp -rf configs scripts source-code websites /opt/
cp /opt/configs/pip.conf /root/.pip
# ===========================================================================
# set hostame
echo -e "\n\e[1;34mConfig hostname...\e[0m\n"
echo -ne "\e[1;34mPlz specify hostname: \e[0m"
read -t 600 host
hostnamectl set-hostname $host
name=`hostname`
cat /etc/hosts | grep -q $name
[[ $? -ne 0 ]] && sed -i "/^127/ s|$| $name|g" /etc/hosts
# ===========================================================================
# config self-defined environment variable and function
echo -e "\n\e[1;34mconfig self-defined environment variable and function...\e[0m\n"
cat /root/.bashrc | grep -q 'toolbox'
[[ $? -ne 0 ]] && echo 'source /opt/scripts/utool/toolbox.sh' >> /root/.bashrc
source /root/.bashrc
chmod +x /opt/scripts/utool/utool.py
rm -rf /usr/local/bin/utool
ln -s /opt/scripts/utool/utool.py /usr/local/bin/utool
# ===========================================================================
# set apt sources
echo -e "\n\e[1;34mConfig apt source list...\e[0m\n"
cat > /etc/apt/sources.list << EOF
deb https://mirrors.tuna.tsinghua.edu.cn/debian/ $(lsb_release -sc) main contrib non-free
deb-src https://mirrors.tuna.tsinghua.edu.cn/debian/ $(lsb_release -sc) main contrib non-free
deb https://mirrors.tuna.tsinghua.edu.cn/debian/ $(lsb_release -sc)-updates main contrib non-free
deb-src https://mirrors.tuna.tsinghua.edu.cn/debian/ $(lsb_release -sc)-updates main contrib non-free
deb https://mirrors.tuna.tsinghua.edu.cn/debian/ $(lsb_release -sc)-backports main contrib non-free
deb-src https://mirrors.tuna.tsinghua.edu.cn/debian/ $(lsb_release -sc)-backports main contrib non-free
deb https://mirrors.tuna.tsinghua.edu.cn/debian-security $(lsb_release -sc)-security main contrib non-free
deb-src https://mirrors.tuna.tsinghua.edu.cn/debian-security $(lsb_release -sc)-security main contrib non-free
EOF
echo -e "\n\e[1;34mUpdating system...\e[0m\n"
apt update && apt upgrade -y
# ===========================================================================
# install some frequently used software
echo -e "\n\e[1;34mInstalling some tools...\e[0m\n"
apt install lrzsz unzip vim gcc g++ make automake curl wget gnupg2 aria2 jq apt-transport-https \
ca-certificates lsb-release debian-archive-keyring oathtool ufw ruby ruby-dev qbittorrent-nox\
git shc tmux htop pwgen imagemagick bash-completion dnsutils ghostscript nethogs ffmpeg iftop \
python3-pip python3-dev golang net-tools ethtool tcpflow lshw rsync parallel rclone pigz pbzip2 \
pixz neofetch mlocate ncdu dstat fzf tldr nscd inotify-hookable inotify-tools vsftpd mtr bridge-utils -y
# ===========================================================================
# update pip3 setuptools and install jupyter lab
echo -e "\n\e[1;34mupdate pip3 setuptools and install jupyter lab...\e[0m\n"
pip3 install --upgrade setuptools -i https://pypi.tuna.tsinghua.edu.cn/simple
pip3 install --upgrade pip -i https://pypi.tuna.tsinghua.edu.cn/simple
pip3 install ipython -i https://pypi.tuna.tsinghua.edu.cn/simple
pip3 install jupyterlab -i https://pypi.tuna.tsinghua.edu.cn/simple
# cp /root/.jupyter/jupyter_lab_config.py /root/.jupyter/jupyter_lab_config_origin.py
# cp /opt/configs/jupyter/jupyter_lab_config.py /root/.jupyter/
# nohup jupyter lab --allow-root > /dev/null 2>&1 &
# ===========================================================================
# configure vim
echo -e "\n\e[1;34mConfig vim editor...\e[0m\n"
cd /opt/configs/tools/
[[ -d vim ]] && rm -rf vim
unzip -q vimConfig.zip
cd vim && bash install.sh
cd .. && rm -rf vim
# ===========================================================================
# config ssh git ufw and aria2
echo -e "\n\e[1;34mConfig publickey ssh && git && ufw && aria2...\e[0m\n"
cd /opt/configs/rsa/
cp -f VPS* Github* config /root/.ssh/
cat VPS.pub > /root/.ssh/authorized_keys
echo '' >> /root/.ssh/authorized_keys
chmod 600 /root/.ssh/*
git config --global user.name 'mffan0922'
git config --global user.email 'mffan0922@163.com'
# ufw allow 22
# ufw allow 80
# ufw allow 443
ufw disable
cp -rf /opt/configs/aria2/ /etc/
> /etc/aria2/aria2.session
# ===========================================================================
# install nginx
echo -e "\n\e[1;34mInstalling nginx...\e[0m\n"
apt install libpcre3 libpcre3-dev openssl libssl-dev zlib1g-dev libgeoip-dev -y
cd /opt/source-code/nginx-1.22.0/
./configure --prefix=/usr/local/nginx \
--with-select_module \
--with-poll_module \
--with-threads \
--with-file-aio \
--with-http_ssl_module \
--with-http_v2_module \
--with-http_realip_module \
--with-http_addition_module \
--with-http_geoip_module \
--with-http_sub_module \
--with-http_dav_module \
--with-http_flv_module \
--with-http_mp4_module \
--with-http_gunzip_module \
--with-http_gzip_static_module \
--with-http_auth_request_module \
--with-http_random_index_module \
--with-http_secure_link_module \
--with-http_degradation_module \
--with-http_slice_module \
--with-http_stub_status_module \
--with-mail \
--with-mail_ssl_module \
--with-stream \
--with-stream_ssl_module \
--with-stream_realip_module \
--with-stream_geoip_module \
--with-stream_ssl_preread_module \
--user=www-data \
--group=www-data \
--add-module=/opt/source-code/nginx-1.22.0/modules/headers-more-nginx-module
make -j 4 && make install
[[ -f /usr/sbin/nginx ]] && rm -rf /usr/sbin/nginx
ln -s /usr/local/nginx/sbin/nginx /usr/sbin/nginx
cp -rf /opt/configs/nginx/nginx.conf /usr/local/nginx/conf/
cp -rf /opt/configs/nginx/nginx.service /lib/systemd/system/
systemctl enable nginx.service
systemctl start nginx.service
# ===========================================================================
# get https certificates
echo -e "\n\e[1;34mSetting https...\e[0m\n"
cd /root/ && git clone git@github.com:acmesh-official/acme.sh.git
cd acme.sh && ./acme.sh --install -m mffan0922@163.com
alias acme.sh=~/.acme.sh/acme.sh
./acme.sh --issue --dns dns_ali -d rustle.cc -d *.rustle.cc
cp /root/.acme.sh/rustle.cc/fullchain.cer /opt/configs/certs/rustle.cc.cer
cp /root/.acme.sh/rustle.cc/rustle.cc.key /opt/configs/certs/
# ===========================================================================
# install jekyll
echo -e "\n\e[1;34mInstall jekyll blog env...\e[0m\n"
gem install jekyll jekyll-paginate
# ===========================================================================
echo -e "\n\e[1;34mInstalling mysql server...\e[0m\n"
cd /opt/configs/mysql/ && dpkg -i mysql-apt-config_0.8.23-1_all.deb
apt update && apt upgrade -y
apt install mysql-server -y
# cp -f /opt/configs/mysql/mysql.cnf /etc/mysql/conf.d/
systemctl restart mysql.service
# ===========================================================================
# install php8.0 for nextcloud
echo -e "\n\e[1;34mInstall php8.0...\e[0m\n"
wget -O /usr/share/keyrings/php-archive-keyring.gpg https://packages.sury.org/php/apt.gpg
echo "deb [signed-by=/usr/share/keyrings/php-archive-keyring.gpg] https://packages.sury.org/php/ $(lsb_release -sc) main" > /etc/apt/sources.list.d/php.list
apt update && apt upgrade -y
apt install php8.0-fpm php8.0-cli php8.0-mysql php8.0-curl php8.0-gd \
php8.0-mbstring php8.0-xml php8.0-zip php8.0-imap php8.0-opcache \
php8.0-soap php8.0-gmp php8.0-bcmath php8.0-intl php8.0-imagick -y
# ===========================================================================
# configure nextcloud
echo -e "\n\e[1;34mRestore nextcloud env...\e[0m\n"
apt install php8.0-memcache* memcached php8.0-apcu libmagickcore-6.q16-6-extra -y
echo -e "\n\e[1;34mbackup origin php data and restore previous php data...\e[0m\n"
cp -rf /etc/php/ /tmp/
cp -rf /opt/configs/php/8.0/fpm/pool.d/www.conf /etc/php/8.0/fpm/pool.d/www.conf
cp -rf /opt/configs/php/8.0/mods-available/apcu.ini /etc/php/8.0/mods-available/apcu.ini
cp -rf /opt/configs/php/8.0/cli/php.ini /etc/php/8.0/cli/php.ini
cp -rf /opt/configs/php/8.0/fpm/php.ini /etc/php/8.0/fpm/php.ini
# ===========================================================================
# restore mysql data
echo -e "\n\e[1;34mrestore mysql data...\e[0m\n"
cp /opt/configs/mysql/*.gz /root
cd /root && gzip -d sql-*.gz
mysql -uroot < sql-*.sql
rm sql*
# ===========================================================================
# configure frpc
echo -e "\n\e[1;34mRestore frpc env...\e[0m\n"
cp /opt/source-code/frpc/frpc.service /lib/systemd/system/
cp /opt/source-code/frpc/frpc-free.service /lib/systemd/system/
systemctl enable frpc.service
systemctl enable frpc-free.service
systemctl start frpc.service
systemctl start frpc-free.service
# ===========================================================================
# configure cloudreve
echo -e "\n\e[1;34mRestore cloudreve env...\e[0m\n"
cp /opt/source-code/cloudreve/cloudreve.service /lib/systemd/system/
systemctl enable cloudreve.service
systemctl start cloudreve.service
# ===========================================================================
# configure navidrome
echo -e "\n\e[1;34mRestore navidrome env...\e[0m\n"
cp /opt/source-code/navidrome/navidrome.service /lib/systemd/system/
systemctl enable navidrome.service
systemctl start navidrome.service
# ===========================================================================
# configure calibre
echo -e "\n\e[1;34mStarting calibre...\e[0m\n"
nohup /usr/bin/python3 /opt/source-code/calibre-web/cps.py > /dev/null 2>&1 &
# ===========================================================================
# configure blog
echo -e "\n\e[1;34mStarting blog...\e[0m\n"
nohup /usr/bin/ruby2.7 /usr/local/bin/jekyll b -s /opt/source-code/blog/ -d /opt/websites/blog/ --trace --watch --incremental > /dev/null 2>&1 &
# ===========================================================================
# configure alist
echo -e "\n\e[1;34mConfig alist...\e[0m\n"
cp /opt/source-code/alist/alist.service /lib/systemd/system/
systemctl enable alist.service
systemctl start alist.service
# ===========================================================================
# configure rclone
echo -e "\n\e[1;34mConfig rclone...\e[0m\n"
cp -rf /opt/configs/rclone /root/.config/
# ===========================================================================
# install php-8.1 & nodejs 16x
echo -e "\n\e[1;34mInstall php-8.1 & nodejs 16x for monica...\e[0m\n"
apt install -y php8.1 php8.1-bcmath php8.1-curl php8.1-gd php8.1-gmp php8.1-intl \
php8.1-mbstring php8.1-mysql php8.1-redis php8.1-xml php8.1-zip
curl -sSL https://getcomposer.org/installer | php -- --install-dir=/usr/local/bin/ --filename=composer
curl -fsSL https://deb.nodesource.com/setup_16.x | bash -
apt-get install -y nodejs
npm install --global yarn
cd /opt/source-code/monica/
composer install --no-interaction --no-dev
yarn install
yarn run production
php artisan key:generate
php artisan setup:production -v
# ===========================================================================
echo -e "\n\e[1;34mRemove no longer required packages...\e[0m\n"
apt autoremove -y
# ===========================================================================
echo -e "\n\e[1;34mRestart nginx mariadb php and cloudreve services...\e[0m\n"
systemctl restart nginx.service
systemctl restart mysql.service
systemctl restart cloudreve.service
# ===========================================================================
echo -e "\n\e[1;34mimprove nextcloud performance...\e[0m\n"
chown -R www-data:www-data /opt/websites/
# cd /opt/websites/nextcloud/
# sudo -u www-data php8.0 occ config:app:set files max_chunk_size --value 0
# sudo -u www-data php8.0 occ files:scan --all
# ===========================================================================
echo -e "\n\e[1;34mConfig crontabs and set correct timezone...\e[0m\n"
cp -f /opt/configs/crontabs/* /var/spool/cron/crontabs/
timedatectl set-timezone Asia/Shanghai
# ===========================================================================
echo -e "\n\e[1;31m基本环境已经安装完成还需要手动配置如下\e[0m\n"
echo " 1. 查看Homepage/Wiki/Nav站点是否可以正常访问"
echo " 2. 查看blog生成日志是否正常"
echo " 3. 访问nextcloud/cloudreve站点是否可以正常登陆并手动优化"
echo " 4. 手动配置Jupyter Lab"
echo " 5. 检查Navidrome是否能正常播放音乐"
echo " 6. 手动运行一次qbittorrent-nox并配置相关选项"
echo " 7. 需要手动配置koel并运行"
echo " 8. 检查frp的运行状态"
echo " 9. 直接访问Calibre Web看是否可以正常访问"
echo " 10. 需要手动配置monica并运行"
echo " 11. 手动安装jellyfin因为可能安装包无法下载需要去腾讯云主机下载传过来再安装"
echo " 12. 访问Alist主页看是否可以正常访问"
echo " 13. 访问uptime status看是否可以正常访问"
echo " 14. 手动安装bashit"
echo " 15. 重启系统"

7
old/sql_backup.sh Normal file
View File

@ -0,0 +1,7 @@
#!/bin/bash
rm -rf /opt/configs/mysql/sql*.gz
filename='sql-'`date +%Y%m%d%H%M`'.sql.gz'
mysql -e "show databases;" -uroot | grep -Ev "Database|information_schema|performance_schema" | xargs mysqldump --skip-lock-tables -uroot --databases | gzip > $filename
mv sql-*.gz /opt/configs/mysql/

66
old/v2ray.sh Normal file
View File

@ -0,0 +1,66 @@
#!/bin/bash
env_file='/opt/source-code/v2ray-4.34.0/envfile'
if [[ $1 == 'start' ]]; then
cat $env_file | grep -q 'https_proxy'
if [[ $? -ne 0 ]]; then
echo 'export http_proxy="http://127.0.0.1:10808"' >> $env_file
echo 'export https_proxy="http://127.0.0.1:10809"' >> $env_file
echo 'export all_proxy="socks://127.0.0.1:10809"' >> $env_file
source $env_file
else
echo -e "\e[1;3;31mv2ray has \e[1;3;32mALREADY\e[1;3;31m been started, do nothing...\e[0m"
exit 11
fi
/opt/source-code/v2ray-4.34.0/v2ray -config /opt/source-code/v2ray-4.34.0/config.json > /dev/null 2>&1 &
echo -e "\e[1;33mNow you can surfing around~\e[0m"
elif [[ $1 == 'stop' ]]; then
v2ray_pid=`ps -ef | grep '/opt/source-code/v2ray-4.34.0/v2ray' | grep -v grep | awk '{print $2}'`
> $env_file
source $env_file
if [[ $v2ray_pid != '' ]]; then
for vpid in $v2ray_pid; do
kill -9 $vpid > /dev/null 2>&1
done
echo -e "\e[1;33mWelcome to the real world~\e[0m"
else
echo -e "\e[1;3;31mv2ray has \e[1;3;32mALREADY\e[1;3;31m been stopped, do nothing...\e[0m"
fi
elif [[ $1 == 'renew' ]]; then
read -t 60 -p "Please input valid oversea IP: " ip
sed -i '69s/.*/ "address": "'$ip'",/' /opt/source-code/v2ray-4.34.0/config.json > /dev/null 2>&1 &
sed -i '/azure/{n;s/.*/ Hostname '$ip'/g}' /root/.ssh/config
elif [[ $1 == 'status' ]]; then
cat $env_file | grep -q 'https_proxy'
is_empty=$?
v2ray_pid=`ps -ef | grep '/opt/source-code/v2ray-4.34.0/v2ray' | grep -v grep | awk '{print $2}'`
if [[ $v2ray_pid == '' && $is_empty -ne 0 ]]; then
echo -e "\e[1;36mService is NOT running~\e[0m"
elif [[ $v2ray_pid == '' && $is_empty -eq 0 ]]; then
echo -e "\e[1;35mService is NOT running, BUT need check /opt/source-code/v2ray-4.34.0/envfile content, should be EMPTY\e[0m"
elif [[ $v2ray_pid != '' && $is_empty -eq 0 ]]; then
echo -e "\e[1;32mService is running~\e[0m"
elif [[ $v2ray_pid != '' && $is_empty -ne 0 ]]; then
echo -e "\e[1;35mService is running, BUT need check /opt/source-code/v2ray-4.34.0/envfile content, should NOT be empty~\e[0m"
fi
elif [[ $1 == 'restart' ]]; then
> $env_file
echo 'export http_proxy="http://127.0.0.1:10808"' >> $env_file
echo 'export https_proxy="http://127.0.0.1:10809"' >> $env_file
echo 'export all_proxy="socks://127.0.0.1:10809"' >> $env_file
v2ray_pid=`ps -ef | grep '/opt/source-code/v2ray-4.34.0/v2ray' | grep -v grep | awk '{print $2}'`
if [[ $v2ray_pid == '' ]]; then
/opt/source-code/v2ray-4.34.0/v2ray -config /opt/source-code/v2ray-4.34.0/config.json > /dev/null 2>&1 &
else
:
fi
source $env_file
echo -e "\e[1;35mService restarted, dive deeper~\e[0m"
else
echo -e "\e[1;3;31mOnly accept start|stop|renew as parameter.\e[0m"
exit 1
fi