[init] initial commit
This commit is contained in:
commit
66b1dd4d70
BIN
alert/__pycache__/calendar.cpython-39.pyc
Normal file
BIN
alert/__pycache__/calendar.cpython-39.pyc
Normal file
Binary file not shown.
72
alert/calendar_tips.py
Normal file
72
alert/calendar_tips.py
Normal file
@ -0,0 +1,72 @@
|
||||
import json
|
||||
import time
|
||||
import datetime
|
||||
import requests
|
||||
|
||||
|
||||
def send_msg_tip(msg_tip):
|
||||
# get the datetime, which is using at a failed situation
|
||||
alert_datetime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
|
||||
# Enterprise WeChat Bot API and the format of body to send
|
||||
hook_url = 'https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=ddea3f5f-fbfc-4c21-994a-71e9fc50e4ef'
|
||||
body = {
|
||||
"msgtype": "text",
|
||||
"text": {
|
||||
"content": msg_tip
|
||||
}
|
||||
}
|
||||
|
||||
# get the result of API call
|
||||
res = requests.post(hook_url, data=json.dumps(body, ensure_ascii=False).encode('utf-8'))
|
||||
|
||||
# when failed, log it in /opt/logs/alert.log file
|
||||
if res.status_code != 200:
|
||||
with open('/opt/logs/alert.log', 'a', encoding='utf-8') as alert_log:
|
||||
alert_log.write(alert_datetime + ' >>>> ')
|
||||
alert_log.write('Failed sending message: ')
|
||||
alert_log.write(msg_tip + '\n')
|
||||
|
||||
|
||||
# 查询当天的节假日情况
|
||||
def holiday_today(app_id, app_secret):
|
||||
week_index = {1: '一', 2: '二', 3: '三', 4: '四', 5: '五', 6: '六', 7: '天', }
|
||||
today = datetime.date.today()
|
||||
today_fmt = str(today).replace('-', '')
|
||||
api_url = f'https://www.mxnzp.com/api/holiday/single/{today_fmt}?ignoreHoliday=false&app_id={app_id}&app_secret={app_secret}'
|
||||
res = requests.get(api_url)
|
||||
|
||||
res_http_code = res.status_code
|
||||
res_text = json.loads(res.text)
|
||||
res_code = res_text['code']
|
||||
res_msg = res_text['msg']
|
||||
if res_http_code != 200 or res_code == 0:
|
||||
msg_tip = res_msg
|
||||
else:
|
||||
res_weekday = res_text['data']['weekDay']
|
||||
res_yeartips = res_text['data']['yearTips']
|
||||
res_chinesezodiac = res_text['data']['chineseZodiac']
|
||||
res_typedes = res_text['data']['typeDes']
|
||||
res_type = res_text['data']['type']
|
||||
res_dayofyear = res_text['data']['dayOfYear']
|
||||
res_weekofyear = res_text['data']['weekOfYear']
|
||||
res_constellation = res_text['data']['constellation']
|
||||
msg_tip = f'{today},{res_yeartips}{res_chinesezodiac}年,星期{week_index[res_weekday]},{res_constellation}。本周是今年的第{res_weekofyear}周,今天是今年的第{res_dayofyear}天,是{res_typedes},'
|
||||
if res_type == 2 or res_type == 1:
|
||||
msg_tip += f"请好好休息,享用美好的一天。"
|
||||
else:
|
||||
msg_tip += f"请努力工作,保持良好的心态。"
|
||||
|
||||
send_msg_tip(msg_tip)
|
||||
|
||||
def main():
|
||||
app_id = "nrsngdkvknqkrwko"
|
||||
app_secret = "SFFmQWo2dnNBRjdNYkVSclZxa2ZvUT09"
|
||||
holiday_today(app_id, app_secret)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
|
||||
|
||||
|
12
alert/docker_alarm.sh
Normal file
12
alert/docker_alarm.sh
Normal file
@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
docker ps -a --format "table {{.Names}}\t{{.Status}}" > /opt/logs/docker_status.log
|
||||
while read line; do
|
||||
echo $line | grep -q 'Exited'
|
||||
if [[ $? -eq 0 ]]; then
|
||||
name=`echo $line | awk '{print $1}'`
|
||||
alarm="Docker Alarm - $name:\nContainer $name has been off line, please check ASAP."
|
||||
bash /opt/scripts/alert/sendmsg.sh "$alarm"
|
||||
fi
|
||||
done < /opt/logs/docker_status.log
|
||||
|
64
alert/love_words.py
Normal file
64
alert/love_words.py
Normal file
@ -0,0 +1,64 @@
|
||||
#!/usr/bin/env python3
|
||||
import json
|
||||
import time
|
||||
import datetime
|
||||
import requests
|
||||
|
||||
|
||||
def send_msg_tip(msg_tip):
|
||||
# get the datetime, which is using at a failed situation
|
||||
alert_datetime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
|
||||
# Enterprise WeChat Bot API and the format of body to send
|
||||
hook_url = 'https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=ddea3f5f-fbfc-4c21-994a-71e9fc50e4ef'
|
||||
body = {
|
||||
"msgtype": "text",
|
||||
"text": {
|
||||
"content": msg_tip
|
||||
}
|
||||
}
|
||||
|
||||
# get the result of API call
|
||||
res = requests.post(hook_url, data=json.dumps(body, ensure_ascii=False).encode('utf-8'))
|
||||
|
||||
# when failed, log it in /opt/logs/alert.log file
|
||||
if res.status_code != 200:
|
||||
with open('/opt/logs/alert.log', 'a', encoding='utf-8') as alert_log:
|
||||
alert_log.write(alert_datetime + ' >>>> ')
|
||||
alert_log.write('Failed sending message: ')
|
||||
alert_log.write(msg_tip + '\n')
|
||||
|
||||
|
||||
# 查询当天的节假日情况
|
||||
def love_sentence(app_id, app_secret):
|
||||
|
||||
api_url = f'https://www.mxnzp.com/api/daily_word/recommend?count=10&app_id={app_id}&app_secret={app_secret}'
|
||||
res = requests.get(api_url)
|
||||
|
||||
res_http_code = res.status_code
|
||||
res_text = json.loads(res.text)
|
||||
res_code = res_text['code']
|
||||
res_msg = res_text['msg']
|
||||
if res_http_code != 200 or res_code == 0:
|
||||
msg_tip = res_msg
|
||||
else:
|
||||
res_data = res_text['data']
|
||||
msg_tip = ''
|
||||
for item in res_data:
|
||||
msg_tip += f'{item["content"]}\n'
|
||||
|
||||
|
||||
# print('*' * 20)
|
||||
# print(msg_tip)
|
||||
|
||||
send_msg_tip(msg_tip)
|
||||
|
||||
def main():
|
||||
app_id = "nrsngdkvknqkrwko"
|
||||
app_secret = "SFFmQWo2dnNBRjdNYkVSclZxa2ZvUT09"
|
||||
love_sentence(app_id, app_secret)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
|
120
alert/poem_send.py
Normal file
120
alert/poem_send.py
Normal file
@ -0,0 +1,120 @@
|
||||
import requests
|
||||
import json
|
||||
import time
|
||||
import sys
|
||||
|
||||
|
||||
def send_alert_msg(alert_msg):
|
||||
"""
|
||||
sending messages via Enterprise WeChat Bot with the content of a poem from Jinrishici API.
|
||||
今日诗词:名句
|
||||
|
||||
【title】-【author】-【dynasty】
|
||||
poem of complete
|
||||
:param alert_msg:content with a specified format
|
||||
:return: None
|
||||
"""
|
||||
|
||||
# get the datetime, which is using at a failed situation
|
||||
alert_datetime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
|
||||
# Enterprise WeChat Bot API and the format of body to send
|
||||
hook_url = 'https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=ddea3f5f-fbfc-4c21-994a-71e9fc50e4ef'
|
||||
body = {
|
||||
"msgtype": "text",
|
||||
"text": {
|
||||
"content": alert_msg
|
||||
}
|
||||
}
|
||||
|
||||
# get the result of API call
|
||||
res = requests.post(hook_url, data=json.dumps(body, ensure_ascii=False).encode('utf-8'))
|
||||
|
||||
# when failed, log it in /opt/logs/alert.log file
|
||||
if res.status_code != 200:
|
||||
with open('/opt/logs/alert.log', 'a', encoding='utf-8') as alert_log:
|
||||
alert_log.write(alert_datetime + ' >>>> ')
|
||||
alert_log.write('Failed sending message: ')
|
||||
alert_log.write(alert_msg + '\n')
|
||||
|
||||
|
||||
def poem():
|
||||
"""
|
||||
get the poem with Jinrishici API
|
||||
:return: None
|
||||
"""
|
||||
# specify token in headers
|
||||
headers = {'X-User-Token': 'dNigXSFtjhLbP5nf49piUPzmD7NoNHVz'}
|
||||
api_url = 'https://v2.jinrishici.com/sentence'
|
||||
# will try for 3 times, in case there are failed situations to get the poem
|
||||
for i in range(3):
|
||||
res = requests.get(api_url, headers=headers)
|
||||
# translate json data to dict format
|
||||
dict_substance = json.loads(res.content)
|
||||
|
||||
# when failed to get the content, try again
|
||||
if res.status_code != 200 or dict_substance['status'] != 'success':
|
||||
continue
|
||||
|
||||
# put necessary content to specific variables
|
||||
# print(dict_substance)
|
||||
poem_content = dict_substance['data']['content']
|
||||
poem_title = dict_substance['data']['origin']['title']
|
||||
poem_dynasty = dict_substance['data']['origin']['dynasty']
|
||||
poem_author = dict_substance['data']['origin']['author']
|
||||
poem_content_all = dict_substance['data']['origin']['content']
|
||||
poem_translation = dict_substance['data']['origin']['translate']
|
||||
# put poem translation in to the file /opt/scripts/alert/poem_trans.txt, if exists
|
||||
with open('/opt/scripts/alert/poem_trans.txt', 'w', encoding='utf-8') as obj_poem_trans:
|
||||
if poem_translation:
|
||||
for item in poem_translation:
|
||||
obj_poem_trans.write(item + '\n')
|
||||
# if the translation part does not exist, put a '' into the file
|
||||
else:
|
||||
obj_poem_trans.write('')
|
||||
|
||||
# create the format of content which is intended to be send via EWB, aka Enterprise Wechat Bot
|
||||
alert_msg = f"今日诗词:{poem_content}\n\n"
|
||||
alert_msg += f"【{poem_title}】-【{poem_author}】-【{poem_dynasty}】\n"
|
||||
for line in poem_content_all:
|
||||
alert_msg += f"{line}\n"
|
||||
|
||||
# when successfully get the needed content, jump out of the for-loop
|
||||
break
|
||||
# after 3 times re-tries, still cannot get the content, then send the following warning message
|
||||
else:
|
||||
alert_msg = '当前无法获取今日诗词,请手动检查如下请求返回是否正确!\n'
|
||||
alert_msg += 'curl "https://v2.jinrishici.com/sentence" -H "X-User-Token:dNigXSFtjhLbP5nf49piUPzmD7NoNHVz"'
|
||||
|
||||
# send it
|
||||
send_alert_msg(alert_msg)
|
||||
|
||||
|
||||
def trans():
|
||||
"""
|
||||
send the translation of the poem which is showed this morning, if exists
|
||||
:return: None
|
||||
"""
|
||||
with open('/opt/scripts/alert/poem_trans.txt', 'r', encoding='utf-8') as obj_poem_trans:
|
||||
alert_msg = obj_poem_trans.read()
|
||||
# print(alert_msg)
|
||||
if alert_msg:
|
||||
alert_msg = f'今日诗词的译文:\n{alert_msg}'
|
||||
send_alert_msg(alert_msg)
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
do the actions according to different parameters
|
||||
:return: None
|
||||
"""
|
||||
if sys.argv[1] == 'poem':
|
||||
poem()
|
||||
elif sys.argv[1] == 'trans':
|
||||
trans()
|
||||
else:
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
0
alert/poem_trans.txt
Normal file
0
alert/poem_trans.txt
Normal file
16
alert/sendmsg.sh
Normal file
16
alert/sendmsg.sh
Normal file
@ -0,0 +1,16 @@
|
||||
#!/bin/bash
|
||||
|
||||
alarm="$1"
|
||||
|
||||
curl 'https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=ddea3f5f-fbfc-4c21-994a-71e9fc50e4ef' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '
|
||||
{
|
||||
"msgtype": "text",
|
||||
"text": {
|
||||
"content": "'"$alarm"'"
|
||||
}
|
||||
}' > /dev/null 2>&1
|
||||
|
||||
|
||||
|
95
alert/todo_alert.py
Normal file
95
alert/todo_alert.py
Normal file
@ -0,0 +1,95 @@
|
||||
import requests
|
||||
import json
|
||||
import time
|
||||
import os
|
||||
|
||||
|
||||
def send_alert_msg(alert_msg):
|
||||
"""
|
||||
send warning messages to phone via Enterprise WeChat Bot API
|
||||
:param alert_msg: messages needed to be sent
|
||||
:return: None
|
||||
"""
|
||||
|
||||
# get the datetime, which is using at a failed situation
|
||||
alert_datetime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
|
||||
# # Enterprise Wechat Bot API and the format of body to send
|
||||
hook_url = 'https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=ddea3f5f-fbfc-4c21-994a-71e9fc50e4ef'
|
||||
body = {
|
||||
"msgtype": "text",
|
||||
"text": {
|
||||
"content": alert_msg
|
||||
}
|
||||
}
|
||||
|
||||
# get the result of API call
|
||||
res = requests.post(hook_url, data=json.dumps(body, ensure_ascii=False).encode('utf-8'))
|
||||
|
||||
# when failed, log it in /opt/logs/alert.log file
|
||||
if res.status_code != 200:
|
||||
with open('/opt/logs/alert.log', 'a', encoding='utf-8') as alert_log:
|
||||
alert_log.write(alert_datetime + ' >>>> ')
|
||||
alert_log.write('Failed sending message: ')
|
||||
alert_log.write(alert_msg + '\n')
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
process the file /opt/logs/TODO/todo.txt file, get the items which is to be done within 2 months
|
||||
:return: None
|
||||
"""
|
||||
|
||||
# run tt ls in advance, to generate correct todo.txt file
|
||||
os.system("bash /opt/scripts/todo/todo.sh ls > /dev/null")
|
||||
# initialize alert_msg with empty string
|
||||
alert_msg = ''
|
||||
# specify range to be alerted
|
||||
alert_day = list(range(61))
|
||||
# pretty index for output
|
||||
alert_index = {1: 'a', 2: 'b', 3: 'c', 4: 'd', 5: 'e', 6: 'f'}
|
||||
# prepare a dict with specified format and contend, to receive valid items
|
||||
alert_tasks = dict().fromkeys(alert_day, None)
|
||||
for key in alert_tasks.keys():
|
||||
alert_tasks[key] = []
|
||||
|
||||
with open('/opt/logs/TODO/todo.txt', mode='r', encoding='utf-8') as todo_txt:
|
||||
# process every line of the file, and get the left time of the task
|
||||
for line in todo_txt.readlines():
|
||||
items = line.strip().split('|')
|
||||
time_left = int(float(items[2].strip().split(':')[1]))
|
||||
content = items[1].strip() + ' | ' + items[3].strip()
|
||||
|
||||
# when the left time smaller than 2 months, put the task needed to be done in the dict
|
||||
if time_left in alert_day:
|
||||
alert_tasks[time_left].append(content)
|
||||
|
||||
# determine every day to see if there are tasks not done
|
||||
for time_left, task in alert_tasks.items():
|
||||
# if no task of some day, then skip it
|
||||
if task == []:
|
||||
continue
|
||||
|
||||
# pretty and accurate word's format
|
||||
sp_day = 'days' if time_left > 1 else 'day'
|
||||
sp_task = 'tasks' if len(task) > 1 else 'task'
|
||||
|
||||
# different output of alert message tips
|
||||
if time_left == 0:
|
||||
alert_msg += f'Today, you NEED to finish the following {sp_task}:\n'
|
||||
else:
|
||||
alert_msg += f'{time_left} {sp_day} left to finish the following {sp_task}:\n'
|
||||
|
||||
# for every specified day, output all tasks needing to be done
|
||||
count = 1
|
||||
for assignment in task:
|
||||
alert_msg += f' {alert_index[count]}. {assignment}\n'
|
||||
count += 1
|
||||
alert_msg += '\n'
|
||||
|
||||
alert_msg += 'So, hurry up!! Go get things done!!'
|
||||
send_alert_msg(alert_msg)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
98
alert/weather_tips.py
Normal file
98
alert/weather_tips.py
Normal file
@ -0,0 +1,98 @@
|
||||
import sys
|
||||
import json
|
||||
import datetime
|
||||
import requests
|
||||
import time
|
||||
|
||||
def send_msg_tip(msg_tip):
|
||||
# get the datetime, which is using at a failed situation
|
||||
alert_datetime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
|
||||
# Enterprise WeChat Bot API and the format of body to send
|
||||
hook_url = 'https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=ddea3f5f-fbfc-4c21-994a-71e9fc50e4ef'
|
||||
body = {
|
||||
"msgtype": "text",
|
||||
"text": {
|
||||
"content": msg_tip
|
||||
}
|
||||
}
|
||||
|
||||
# get the result of API call
|
||||
res = requests.post(hook_url, data=json.dumps(body, ensure_ascii=False).encode('utf-8'))
|
||||
|
||||
# when failed, log it in /opt/logs/alert.log file
|
||||
if res.status_code != 200:
|
||||
with open('/opt/logs/alert.log', 'a', encoding='utf-8') as alert_log:
|
||||
alert_log.write(alert_datetime + ' >>>> ')
|
||||
alert_log.write('Failed sending message: ')
|
||||
alert_log.write(msg_tip + '\n')
|
||||
|
||||
|
||||
def weather_forcast(app_id, app_secret, city):
|
||||
api_url = f"https://www.mxnzp.com/api/weather/forecast/{city}?app_id={app_id}&app_secret={app_secret}"
|
||||
res = requests.get(api_url)
|
||||
res_http_code = res.status_code
|
||||
res_text = json.loads(res.text)
|
||||
res_code = res_text['code']
|
||||
res_msg = res_text['msg']
|
||||
# print(res.text)
|
||||
if res_http_code != 200 or res_code == 0:
|
||||
print(f"接口查询失败:{res_msg}")
|
||||
else:
|
||||
# print(res_text['data']['forecasts'])
|
||||
today = datetime.date.today()
|
||||
hour = datetime.datetime.now().hour
|
||||
msg_tip = f"{today} {hour}时,查询到{city}最近四天的天气情况如下:\n"
|
||||
msg_tip += ('*' * 30 + '\n')
|
||||
when = {1: '今天', 2: '明天', 3: '后天', 4: '大后天', }
|
||||
week_index = {1: '一', 2: '二', 3: '三', 4: '四', 5: '五', 6: '六', 7: '天', }
|
||||
count = 1
|
||||
for item in res_text['data']['forecasts']:
|
||||
item_date = item['date']
|
||||
item_dayofweek = week_index[int(item['dayOfWeek'])]
|
||||
if item['dayWeather'] == item['nightWeather']:
|
||||
msg_tip += f"{when[count]}({item_date} 星期{item_dayofweek})全天天气是{item['dayWeather']},"
|
||||
else:
|
||||
msg_tip += f"{when[count]}({item_date} 星期{item_dayofweek})白天天气是{item['dayWeather']},夜间会转为{item['nightWeather']},"
|
||||
|
||||
msg_tip += f"最高温{item['dayTemp']},最低温{item['nightTemp']},"
|
||||
|
||||
difftemp = int(item['dayTemp'].removesuffix('℃')) - int(item['nightTemp'].removesuffix('℃'))
|
||||
if difftemp > 10:
|
||||
msg_tip += f"昼夜温差{difftemp}℃,请注意增减衣物,切勿感冒;"
|
||||
|
||||
if item['dayWindDirection'] == item['nightWindDirection']:
|
||||
msg_tip += f"{when[count]}全天是{item['dayWindDirection']}风,"
|
||||
if item['dayWindPower'] == item['nightWindPower']:
|
||||
msg_tip += f"风力为{item['dayWindPower']}。\n"
|
||||
else:
|
||||
msg_tip += f"白天风力为{item['dayWindPower']},夜间风力为{item['nightWindPower']}。\n"
|
||||
else:
|
||||
msg_tip += f"{when[count]}白天是{item['dayWeather']}风,夜间会转为{item['nightWeather']}风,"
|
||||
if item['dayWindPower'] == item['nightWindPower']:
|
||||
msg_tip += f"风力为{item['dayWindPower']}。\n"
|
||||
else:
|
||||
msg_tip += f"白天风力为{item['dayWindPower']},夜间风力为{item['nightWindPower']}。\n"
|
||||
count += 1
|
||||
msg_tip += ('*' * 30 + '\n')
|
||||
|
||||
# print(msg_tip)
|
||||
send_msg_tip(msg_tip)
|
||||
|
||||
|
||||
def main():
|
||||
app_id = "nrsngdkvknqkrwko"
|
||||
app_secret = "SFFmQWo2dnNBRjdNYkVSclZxa2ZvUT09"
|
||||
# holiday_today(app_id, app_secret)
|
||||
# ip_self(app_id, app_secret)
|
||||
try:
|
||||
city = sys.argv[1]
|
||||
except Exception as Err:
|
||||
print(f"Error Desc: {Err}. Maybe you need to supply correct city next time.")
|
||||
exit(2)
|
||||
|
||||
weather_forcast(app_id, app_secret, city)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
36
old/blog_update.sh
Normal file
36
old/blog_update.sh
Normal file
@ -0,0 +1,36 @@
|
||||
#!/bin/bash
|
||||
#===================================================================
|
||||
# Filename : jekyll.sh
|
||||
# Function :
|
||||
# Usage :
|
||||
# Author : Manford Fan
|
||||
# Date : 2021-08-29 13:10
|
||||
# Version : Version 0.1
|
||||
# Disclaimer : The author is NOT responsible for any loss caused
|
||||
# by the user's own operations.
|
||||
# And More : If you find there are some bugs in this script
|
||||
# Or you have better ideas, please do contact me
|
||||
# via E-mail -- mffan0922@163.com
|
||||
#===================================================================
|
||||
|
||||
# update blog
|
||||
echo `date`
|
||||
rm -rf /opt/websites/blog
|
||||
let numOfAvatar=`ls /opt/websites/nav/assets/images/logos/ | wc -l`
|
||||
let randNumber=$RANDOM%$numOfAvatar
|
||||
|
||||
cp /opt/websites/nav/assets/images/logos/${randNumber}.jpg /opt/websites/homepage/assets/img/logo.jpg -rf
|
||||
cp /opt/websites/nav/assets/images/logos/${randNumber}.jpg /opt/source-code/blog/img/avatar.jpg -rf
|
||||
cp /opt/websites/nav/assets/images/logos/${randNumber}.jpg /opt/websites/nav/assets/images/logos/avatar.jpg -rf
|
||||
jekyll b -s /opt/source-code/blog/ -d /opt/websites/blog/
|
||||
|
||||
# update bash
|
||||
rm -rf /opt/websites/just-the-docs/bash
|
||||
jekyll b -s /opt/source-code/document/bash -d /opt/websites/just-the-docs/bash
|
||||
|
||||
# update python
|
||||
rm -rf /opt/websites/just-the-docs/python
|
||||
jekyll b -s /opt/source-code/document/python -d /opt/websites/just-the-docs/python
|
||||
|
||||
chown -R www-data:www-data /opt/websites
|
||||
|
13
old/color.sh
Normal file
13
old/color.sh
Normal file
@ -0,0 +1,13 @@
|
||||
#!/bin/bash
|
||||
|
||||
nums=(0 1 2 3 4 5 7 8)
|
||||
for i in ${nums[@]}
|
||||
do
|
||||
for j in `seq 30 37`
|
||||
do
|
||||
for k in `seq 40 47`
|
||||
do
|
||||
echo -e "$i;$j;${k}m -- \e[$i;$j;${k}mHello echo!\e[0m"
|
||||
done
|
||||
done
|
||||
done
|
BIN
old/ctc/cdnlog_search_logic.jpg
Normal file
BIN
old/ctc/cdnlog_search_logic.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 162 KiB |
167
old/ctc/config.sh
Normal file
167
old/ctc/config.sh
Normal file
@ -0,0 +1,167 @@
|
||||
#!/bin/bash
|
||||
# set -e
|
||||
# bug-1: can not find the configuration of live domain
|
||||
# usage
|
||||
function usage {
|
||||
echo -e "${c_bc}获取域名详细配置或者对比两个域名的配置异同:${c_e}"
|
||||
echo -e " config -c domain"
|
||||
echo -e " config -d domain_1 domain_2\n"
|
||||
exit 100
|
||||
}
|
||||
|
||||
function onCtrlC () {
|
||||
# while capture Ctrl+C, kill all background processes silently and exit
|
||||
exec 3>&2 # 3 is now a copy of 2
|
||||
exec 2> /dev/null # 2 now points to /dev/null
|
||||
kill ${bg_pids} ${progress_pid} >/dev/null 2>&1
|
||||
sleep 1 # sleep to wait for process to die
|
||||
exec 2>&3 # restore stderr to saved
|
||||
exec 3>&- # close saved version
|
||||
echo
|
||||
echo -e "${c_bir}Ctrl+C is captured, exiting...\n${c_e}"
|
||||
exit 1
|
||||
}
|
||||
|
||||
function get_config {
|
||||
# 判断要查询的域名是否在平台,domain.list文件每小时更新一次 -- task.sh
|
||||
res=`cat $data/domain.list | grep -w "$domain"`
|
||||
if [[ $res == '' ]]; then
|
||||
echo -e "${c_br}该域名[$domain]未在天翼平台配置,一个小时内新增的域名无法查询,退出...${c_e}"
|
||||
echo -e "${c_br}只接收一个参数,请输入要查询的域名\ne.g. infos www.ctyun.cn\n${c_e}"
|
||||
exit 247
|
||||
fi
|
||||
# ----------------------------------------------------------------------------------------
|
||||
# 获取域名信息 - part 1
|
||||
curl "https://dcp.ctcdn.cn/traffic-control-api/v2/access/list?access_version_id=1&domain=$domain" -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjYzfQ.XZ_nNtRWoBRcPBM-bHAG_ciohkNh69n4AVHwV755r6Q' -H 'content-type: application/json;charset=UTF-8' -vo domain_info_1.log > domain_info_1.response 2>&1
|
||||
|
||||
# 判断响应是否200
|
||||
cat $trash/domain_info_1.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}获取域名信息-part 1失败,退出...${c_e}"; exit 246; }
|
||||
|
||||
python3 /usr/local/script/fanmf11/get_infos.py --map_info domain_info_1.log $domain
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}处理域名-part 1信息失败,退出...${c_e}"; exit 242; }
|
||||
|
||||
accid=`cat info.log | awk -F ':' '$1==3 {print $2}'`
|
||||
# ----------------------------------------------------------------------------------------
|
||||
# 获取域名信息 -- CDN
|
||||
curl "https://confmanager.ctcdn.cn/v1/domain/conf/query_union?precise_query=1&domain=$domain" -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjQxNn0.KEFh_yg3J1y8sL7s3X_8jIR8GGq88A89b7J5YhVYVf8' -H 'content-type: application/json;charset=UTF-8' -vo domain_info_4.log > domain_info_4.response 2>&1
|
||||
|
||||
# 判断响应是否200
|
||||
cat $trash/domain_info_4.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}获取域名信息-part 4失败,退出...${c_e}"; exit 243; }
|
||||
|
||||
python3 /usr/local/script/fanmf11/get_infos.py --domain_config_cdn domain_info_4.log $accid $domain
|
||||
r_code=$?
|
||||
if [[ $r_code -eq 204 ]]; then
|
||||
# ----------------------------------------------------------------------------------------
|
||||
# 获取域名信息 - live
|
||||
curl "https://lasc-new.ctcdn.cn/v1/domain/conf/op_query_union?page_size=20&page=1&domain=$domain" -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjE4NH0.1Pgfnky-grT0MN2ic04PqMf32BdnF6iN_PMk3kNOzjY' -H 'content-type: application/json;charset=UTF-8' -vo domain_info_5.log > domain_info_5.response 2>&1
|
||||
|
||||
# 判断响应是否200
|
||||
cat $trash/domain_info_5.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}获取域名信息-part 5失败,退出...${c_e}"; exit 235; }
|
||||
|
||||
python3 /usr/local/script/fanmf11/get_infos.py --domain_info_5 domain_info_5.log $accid
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}处理域名-part 5信息失败,退出...${c_e}"; exit 237; }
|
||||
|
||||
# ----------------------------------------------------------------------------------------
|
||||
# 获取域名信息 - part 6
|
||||
domain_id=`cat info.log | awk -F ':' '$1==4 {print $2}'`
|
||||
curl "https://lasc-new.ctcdn.cn/v1/domain/internal/domain_config?domain_id=$domain_id" -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjE4NH0.1Pgfnky-grT0MN2ic04PqMf32BdnF6iN_PMk3kNOzjY' -H 'content-type: application/json;charset=UTF-8' -vo domain_info_6.log > domain_info_6.response 2>&1
|
||||
|
||||
# 判断响应是否200
|
||||
cat $trash/domain_info_6.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}获取域名信息-part 6失败,退出...${c_e}"; exit 238; }
|
||||
|
||||
python3 /usr/local/script/fanmf11/get_infos.py --domain_config_live domain_info_6.log $domain
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}处理域名-part 6信息失败,退出...${c_e}"; exit 236; }
|
||||
|
||||
elif [[ $r_code -ne 0 ]]; then
|
||||
echo -e "${c_br}处理域名-part 4信息失败,退出...${c_e}"
|
||||
exit 239
|
||||
else
|
||||
:
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
# Self defined color shortcut
|
||||
c_br='\e[1;31m' # bold red
|
||||
c_bg='\e[1;32m' # bold green
|
||||
c_by='\e[1;33m' # bold yellow
|
||||
c_bb='\e[1;34m' # bold blue
|
||||
c_bp='\e[1;35m' # bold purple
|
||||
c_bc='\e[1;36m' # bold cyan
|
||||
c_bir='\e[1;3;31m' # bold italic red
|
||||
c_bib='\e[1;3;34m' # bold italic blue
|
||||
c_bic='\e[1;3;36m' # bold italic cyan
|
||||
c_e='\e[0m' # reset
|
||||
# some initialization
|
||||
stty erase '^H' # allow backspace
|
||||
data='/usr/local/script/fanmf11/data'
|
||||
toolbox='/usr/local/script/fanmf11/'
|
||||
OP="prefix "$@
|
||||
dash=`echo $OP | awk '{print $2}'`
|
||||
first=`echo $OP | awk '{print $3}'`
|
||||
second=`echo $OP | awk '{print $4}'`
|
||||
flg=1 # signify if rip is acquired successfully or not, 0 - OK and 1 -NG
|
||||
TS=`date +%s%N`
|
||||
host=`whoami`
|
||||
trash="/usr/local/script/fanmf11/trash/$host/$TS"
|
||||
|
||||
if [[ -d $trash ]]; then
|
||||
echo -e "${c_br}对于同一个用户,同一时间只能运行一个实例,请重新运行...${c_e}"
|
||||
exit 245
|
||||
else
|
||||
mkdir -p $trash
|
||||
cd $trash && cd ..
|
||||
docs=`ls`
|
||||
for doc in $docs; do
|
||||
[[ -f $doc ]] && rm -rf $doc
|
||||
done
|
||||
folders=`ls -t`
|
||||
while [[ `echo $folders | awk '{print NF}'` -gt 29 ]]; do
|
||||
folder=`ls -t | tail -1`
|
||||
rm -rf $folder
|
||||
folders=`ls -t`
|
||||
done
|
||||
cd $trash && touch config
|
||||
fi
|
||||
# set a trap for Ctrl+C
|
||||
trap 'onCtrlC' INT
|
||||
|
||||
|
||||
if [[ $# -eq 2 && $dash == '-c' ]]; then
|
||||
domain=$first
|
||||
get_config
|
||||
exec 3>&2 && exec 2> log.json
|
||||
cat $domain | tr -d ' ' | sed 's/"{/{/g' | sed 's/}"/}/g' | sed 's/\\"/"/g' | sed 's/\\n/ /g' | sed 's/\\t//g'| jq -r . | awk -f $toolbox/reformat.awk | jq -r . > log.json 2>&1
|
||||
cat log.json | grep -q 'parse error'
|
||||
[[ $? -eq 0 ]] && { cat $domain | jq -r .; } || { cat log.json | jq -r .; }
|
||||
exec 2>&3 && exec 3>&-
|
||||
|
||||
elif [[ $# -eq 3 && $dash == '-d' ]]; then
|
||||
domain=$first
|
||||
get_config
|
||||
exec 3>&2 && exec 2> log.json
|
||||
cat $domain | tr -d ' ' | sed 's/"{/{/g' | sed 's/}"/}/g' | sed 's/\\"/"/g' | sed 's/\\n/ /g' | sed 's/\\t//g'| jq -S . | awk -f $toolbox/reformat.awk | jq -S . > log.json 2>&1
|
||||
cat log.json | grep -q 'parse error'
|
||||
[[ $? -eq 0 ]] && { cat $domain | jq -S -f $toolbox/normalize.jq > first.json; }
|
||||
jq -S -f $toolbox/normalize.jq log.json > first.json
|
||||
exec 2>&3 && exec 3>&-
|
||||
domain=$second
|
||||
get_config
|
||||
exec 3>&2 && exec 2> log.json
|
||||
cat $domain | tr -d ' ' | sed 's/"{/{/g' | sed 's/}"/}/g' | sed 's/\\"/"/g' | sed 's/\\n/ /g' | sed 's/\\t//g'| jq -S . | awk -f $toolbox/reformat.awk | jq -S . > log.json 2>&1
|
||||
cat log.json | grep -q 'parse error'
|
||||
[[ $? -eq 0 ]] && { cat $domain | jq -S -f $toolbox/normalize.jq > second.json; }
|
||||
jq -S -f $toolbox/normalize.jq log.json > second.json
|
||||
exec 2>&3 && exec 3>&-
|
||||
jaydiff --json --indent=' ' --slice-myers first.json second.json
|
||||
echo -e "${c_bic}此对比结果仅供参考,由于不同结构的JSON数据语义可能是相同的。${c_by}可以仔细对比下显示不同的部分,有可能是因为结构不同造成的。${c_bic}另外,可以用JSON在线对比工具做进一步检查如下文件${c_e}"
|
||||
echo -e "${c_bib} `pwd`/first.json${c_e}"
|
||||
echo -e "${c_bib} `pwd`/second.json${c_e}\n"
|
||||
else
|
||||
usage
|
||||
fi
|
||||
|
3
old/ctc/dist/get_infos.py
vendored
Normal file
3
old/ctc/dist/get_infos.py
vendored
Normal file
File diff suppressed because one or more lines are too long
483
old/ctc/dist/pytransform/__init__.py
vendored
Normal file
483
old/ctc/dist/pytransform/__init__.py
vendored
Normal file
@ -0,0 +1,483 @@
|
||||
# These module alos are used by protection code, so that protection
|
||||
# code needn't import anything
|
||||
import os
|
||||
import platform
|
||||
import sys
|
||||
import struct
|
||||
|
||||
# Because ctypes is new from Python 2.5, so pytransform doesn't work
|
||||
# before Python 2.5
|
||||
#
|
||||
from ctypes import cdll, c_char, c_char_p, c_int, c_void_p, \
|
||||
pythonapi, py_object, PYFUNCTYPE, CFUNCTYPE
|
||||
from fnmatch import fnmatch
|
||||
|
||||
#
|
||||
# Support Platforms
|
||||
#
|
||||
plat_path = 'platforms'
|
||||
|
||||
plat_table = (
|
||||
('windows', ('windows', 'cygwin*')),
|
||||
('darwin', ('darwin',)),
|
||||
('ios', ('ios',)),
|
||||
('linux', ('linux*',)),
|
||||
('freebsd', ('freebsd*', 'openbsd*', 'isilon onefs')),
|
||||
('poky', ('poky',)),
|
||||
)
|
||||
|
||||
arch_table = (
|
||||
('x86', ('i?86', )),
|
||||
('x86_64', ('x64', 'x86_64', 'amd64', 'intel')),
|
||||
('arm', ('armv5',)),
|
||||
('armv6', ('armv6l',)),
|
||||
('armv7', ('armv7l',)),
|
||||
('ppc64', ('ppc64le',)),
|
||||
('mips32', ('mips',)),
|
||||
('aarch32', ('aarch32',)),
|
||||
('aarch64', ('aarch64', 'arm64'))
|
||||
)
|
||||
|
||||
#
|
||||
# Hardware type
|
||||
#
|
||||
HT_HARDDISK, HT_IFMAC, HT_IPV4, HT_IPV6, HT_DOMAIN = range(5)
|
||||
|
||||
#
|
||||
# Global
|
||||
#
|
||||
_pytransform = None
|
||||
|
||||
|
||||
class PytransformError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def dllmethod(func):
|
||||
def wrap(*args, **kwargs):
|
||||
return func(*args, **kwargs)
|
||||
return wrap
|
||||
|
||||
|
||||
@dllmethod
|
||||
def version_info():
|
||||
prototype = PYFUNCTYPE(py_object)
|
||||
dlfunc = prototype(('version_info', _pytransform))
|
||||
return dlfunc()
|
||||
|
||||
|
||||
@dllmethod
|
||||
def init_pytransform():
|
||||
major, minor = sys.version_info[0:2]
|
||||
# Python2.5 no sys.maxsize but sys.maxint
|
||||
# bitness = 64 if sys.maxsize > 2**32 else 32
|
||||
prototype = PYFUNCTYPE(c_int, c_int, c_int, c_void_p)
|
||||
init_module = prototype(('init_module', _pytransform))
|
||||
ret = init_module(major, minor, pythonapi._handle)
|
||||
if (ret & 0xF000) == 0x1000:
|
||||
raise PytransformError('Initialize python wrapper failed (%d)'
|
||||
% (ret & 0xFFF))
|
||||
return ret
|
||||
|
||||
|
||||
@dllmethod
|
||||
def init_runtime():
|
||||
prototype = PYFUNCTYPE(c_int, c_int, c_int, c_int, c_int)
|
||||
_init_runtime = prototype(('init_runtime', _pytransform))
|
||||
return _init_runtime(0, 0, 0, 0)
|
||||
|
||||
|
||||
@dllmethod
|
||||
def encrypt_code_object(pubkey, co, flags, suffix=''):
|
||||
_pytransform.set_option(6, suffix.encode())
|
||||
prototype = PYFUNCTYPE(py_object, py_object, py_object, c_int)
|
||||
dlfunc = prototype(('encrypt_code_object', _pytransform))
|
||||
return dlfunc(pubkey, co, flags)
|
||||
|
||||
|
||||
@dllmethod
|
||||
def generate_license_key(prikey, keysize, rcode):
|
||||
prototype = PYFUNCTYPE(py_object, c_char_p, c_int, c_char_p)
|
||||
dlfunc = prototype(('generate_license_key', _pytransform))
|
||||
return dlfunc(prikey, keysize, rcode) if sys.version_info[0] == 2 \
|
||||
else dlfunc(prikey, keysize, rcode.encode())
|
||||
|
||||
|
||||
@dllmethod
|
||||
def get_registration_code():
|
||||
prototype = PYFUNCTYPE(py_object)
|
||||
dlfunc = prototype(('get_registration_code', _pytransform))
|
||||
return dlfunc()
|
||||
|
||||
|
||||
@dllmethod
|
||||
def get_expired_days():
|
||||
prototype = PYFUNCTYPE(py_object)
|
||||
dlfunc = prototype(('get_expired_days', _pytransform))
|
||||
return dlfunc()
|
||||
|
||||
|
||||
@dllmethod
|
||||
def clean_obj(obj, kind):
|
||||
prototype = PYFUNCTYPE(c_int, py_object, c_int)
|
||||
dlfunc = prototype(('clean_obj', _pytransform))
|
||||
return dlfunc(obj, kind)
|
||||
|
||||
|
||||
def clean_str(*args):
|
||||
tdict = {
|
||||
'str': 0,
|
||||
'bytearray': 1,
|
||||
'unicode': 2
|
||||
}
|
||||
for obj in args:
|
||||
k = tdict.get(type(obj).__name__)
|
||||
if k is None:
|
||||
raise RuntimeError('Can not clean object: %s' % obj)
|
||||
clean_obj(obj, k)
|
||||
|
||||
|
||||
def get_hd_info(hdtype, name=None):
|
||||
if hdtype not in range(HT_DOMAIN + 1):
|
||||
raise RuntimeError('Invalid parameter hdtype: %s' % hdtype)
|
||||
size = 256
|
||||
t_buf = c_char * size
|
||||
buf = t_buf()
|
||||
cname = c_char_p(0 if name is None
|
||||
else name.encode('utf-8') if hasattr('name', 'encode')
|
||||
else name)
|
||||
if (_pytransform.get_hd_info(hdtype, buf, size, cname) == -1):
|
||||
raise PytransformError('Get hardware information failed')
|
||||
return buf.value.decode()
|
||||
|
||||
|
||||
def show_hd_info():
|
||||
return _pytransform.show_hd_info()
|
||||
|
||||
|
||||
def assert_armored(*names):
|
||||
prototype = PYFUNCTYPE(py_object, py_object)
|
||||
dlfunc = prototype(('assert_armored', _pytransform))
|
||||
|
||||
def wrapper(func):
|
||||
def wrap_execute(*args, **kwargs):
|
||||
dlfunc(names)
|
||||
return func(*args, **kwargs)
|
||||
return wrap_execute
|
||||
return wrapper
|
||||
|
||||
|
||||
def check_armored(*names):
|
||||
try:
|
||||
prototype = PYFUNCTYPE(py_object, py_object)
|
||||
prototype(('assert_armored', _pytransform))(names)
|
||||
return True
|
||||
except RuntimeError:
|
||||
return False
|
||||
|
||||
|
||||
def get_license_info():
|
||||
info = {
|
||||
'ISSUER': None,
|
||||
'EXPIRED': None,
|
||||
'HARDDISK': None,
|
||||
'IFMAC': None,
|
||||
'IFIPV4': None,
|
||||
'DOMAIN': None,
|
||||
'DATA': None,
|
||||
'CODE': None,
|
||||
}
|
||||
rcode = get_registration_code().decode()
|
||||
if rcode.startswith('*VERSION:'):
|
||||
index = rcode.find('\n')
|
||||
info['ISSUER'] = rcode[9:index].split('.')[0].replace('-sn-1.txt', '')
|
||||
rcode = rcode[index+1:]
|
||||
|
||||
index = 0
|
||||
if rcode.startswith('*TIME:'):
|
||||
from time import ctime
|
||||
index = rcode.find('\n')
|
||||
info['EXPIRED'] = ctime(float(rcode[6:index]))
|
||||
index += 1
|
||||
|
||||
if rcode[index:].startswith('*FLAGS:'):
|
||||
index += len('*FLAGS:') + 1
|
||||
info['FLAGS'] = ord(rcode[index - 1])
|
||||
|
||||
prev = None
|
||||
start = index
|
||||
for k in ['HARDDISK', 'IFMAC', 'IFIPV4', 'DOMAIN', 'FIXKEY', 'CODE']:
|
||||
index = rcode.find('*%s:' % k)
|
||||
if index > -1:
|
||||
if prev is not None:
|
||||
info[prev] = rcode[start:index]
|
||||
prev = k
|
||||
start = index + len(k) + 2
|
||||
info['CODE'] = rcode[start:]
|
||||
i = info['CODE'].find(';')
|
||||
if i > 0:
|
||||
info['DATA'] = info['CODE'][i+1:]
|
||||
info['CODE'] = info['CODE'][:i]
|
||||
return info
|
||||
|
||||
|
||||
def get_license_code():
|
||||
return get_license_info()['CODE']
|
||||
|
||||
|
||||
def get_user_data():
|
||||
return get_license_info()['DATA']
|
||||
|
||||
|
||||
def _match_features(patterns, s):
|
||||
for pat in patterns:
|
||||
if fnmatch(s, pat):
|
||||
return True
|
||||
|
||||
|
||||
def _gnu_get_libc_version():
|
||||
try:
|
||||
prototype = CFUNCTYPE(c_char_p)
|
||||
ver = prototype(('gnu_get_libc_version', cdll.LoadLibrary('')))()
|
||||
return ver.decode().split('.')
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def format_platform(platid=None):
|
||||
if platid:
|
||||
return os.path.normpath(platid)
|
||||
|
||||
plat = platform.system().lower()
|
||||
mach = platform.machine().lower()
|
||||
|
||||
for alias, platlist in plat_table:
|
||||
if _match_features(platlist, plat):
|
||||
plat = alias
|
||||
break
|
||||
|
||||
if plat == 'linux':
|
||||
cname, cver = platform.libc_ver()
|
||||
if cname == 'musl':
|
||||
plat = 'musl'
|
||||
elif cname == 'libc':
|
||||
plat = 'android'
|
||||
elif cname == 'glibc':
|
||||
v = _gnu_get_libc_version()
|
||||
if v and len(v) >= 2 and (int(v[0]) * 100 + int(v[1])) < 214:
|
||||
plat = 'centos6'
|
||||
|
||||
for alias, archlist in arch_table:
|
||||
if _match_features(archlist, mach):
|
||||
mach = alias
|
||||
break
|
||||
|
||||
if plat == 'windows' and mach == 'x86_64':
|
||||
bitness = struct.calcsize('P'.encode()) * 8
|
||||
if bitness == 32:
|
||||
mach = 'x86'
|
||||
|
||||
return os.path.join(plat, mach)
|
||||
|
||||
|
||||
# Load _pytransform library
|
||||
def _load_library(path=None, is_runtime=0, platid=None, suffix='', advanced=0):
|
||||
path = os.path.dirname(__file__) if path is None \
|
||||
else os.path.normpath(path)
|
||||
|
||||
plat = platform.system().lower()
|
||||
for alias, platlist in plat_table:
|
||||
if _match_features(platlist, plat):
|
||||
plat = alias
|
||||
break
|
||||
|
||||
name = '_pytransform' + suffix
|
||||
if plat == 'linux':
|
||||
filename = os.path.abspath(os.path.join(path, name + '.so'))
|
||||
elif plat in ('darwin', 'ios'):
|
||||
filename = os.path.join(path, name + '.dylib')
|
||||
elif plat == 'windows':
|
||||
filename = os.path.join(path, name + '.dll')
|
||||
elif plat in ('freebsd', 'poky'):
|
||||
filename = os.path.join(path, name + '.so')
|
||||
else:
|
||||
filename = None
|
||||
|
||||
if platid is not None and os.path.isfile(platid):
|
||||
filename = platid
|
||||
elif platid is not None or not os.path.exists(filename) or not is_runtime:
|
||||
libpath = platid if platid is not None and os.path.isabs(platid) else \
|
||||
os.path.join(path, plat_path, format_platform(platid))
|
||||
filename = os.path.join(libpath, os.path.basename(filename))
|
||||
|
||||
if filename is None:
|
||||
raise PytransformError('Platform %s not supported' % plat)
|
||||
|
||||
if not os.path.exists(filename):
|
||||
raise PytransformError('Could not find "%s"' % filename)
|
||||
|
||||
try:
|
||||
m = cdll.LoadLibrary(filename)
|
||||
except Exception as e:
|
||||
if sys.flags.debug:
|
||||
print('Load %s failed:\n%s' % (filename, e))
|
||||
raise
|
||||
|
||||
# Removed from v4.6.1
|
||||
# if plat == 'linux':
|
||||
# m.set_option(-1, find_library('c').encode())
|
||||
|
||||
if not os.path.abspath('.') == os.path.abspath(path):
|
||||
m.set_option(1, path.encode() if sys.version_info[0] == 3 else path)
|
||||
elif (not is_runtime) and sys.platform.startswith('cygwin'):
|
||||
path = os.environ['PYARMOR_CYGHOME']
|
||||
m.set_option(1, path.encode() if sys.version_info[0] == 3 else path)
|
||||
|
||||
# Required from Python3.6
|
||||
m.set_option(2, sys.byteorder.encode())
|
||||
|
||||
if sys.flags.debug:
|
||||
m.set_option(3, c_char_p(1))
|
||||
m.set_option(4, c_char_p(not is_runtime))
|
||||
|
||||
# Disable advanced mode by default
|
||||
m.set_option(5, c_char_p(not advanced))
|
||||
|
||||
# Set suffix for private package
|
||||
if suffix:
|
||||
m.set_option(6, suffix.encode())
|
||||
|
||||
return m
|
||||
|
||||
|
||||
def pyarmor_init(path=None, is_runtime=0, platid=None, suffix='', advanced=0):
|
||||
global _pytransform
|
||||
_pytransform = _load_library(path, is_runtime, platid, suffix, advanced)
|
||||
return init_pytransform()
|
||||
|
||||
|
||||
def pyarmor_runtime(path=None, suffix='', advanced=0):
|
||||
if _pytransform is not None:
|
||||
return
|
||||
|
||||
try:
|
||||
pyarmor_init(path, is_runtime=1, suffix=suffix, advanced=advanced)
|
||||
init_runtime()
|
||||
except Exception as e:
|
||||
if sys.flags.debug or hasattr(sys, '_catch_pyarmor'):
|
||||
raise
|
||||
sys.stderr.write("%s\n" % str(e))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
# ----------------------------------------------------------
|
||||
# End of pytransform
|
||||
# ----------------------------------------------------------
|
||||
|
||||
#
|
||||
# Unused
|
||||
#
|
||||
|
||||
|
||||
@dllmethod
|
||||
def generate_license_file(filename, priname, rcode, start=-1, count=1):
|
||||
prototype = PYFUNCTYPE(c_int, c_char_p, c_char_p, c_char_p, c_int, c_int)
|
||||
dlfunc = prototype(('generate_project_license_files', _pytransform))
|
||||
return dlfunc(filename.encode(), priname.encode(), rcode.encode(),
|
||||
start, count) if sys.version_info[0] == 3 \
|
||||
else dlfunc(filename, priname, rcode, start, count)
|
||||
|
||||
#
|
||||
# Not available from v5.6
|
||||
#
|
||||
|
||||
|
||||
def generate_capsule(licfile):
|
||||
prikey, pubkey, prolic = _generate_project_capsule()
|
||||
capkey, newkey = _generate_pytransform_key(licfile, pubkey)
|
||||
return prikey, pubkey, capkey, newkey, prolic
|
||||
|
||||
|
||||
@dllmethod
|
||||
def _generate_project_capsule():
|
||||
prototype = PYFUNCTYPE(py_object)
|
||||
dlfunc = prototype(('generate_project_capsule', _pytransform))
|
||||
return dlfunc()
|
||||
|
||||
|
||||
@dllmethod
|
||||
def _generate_pytransform_key(licfile, pubkey):
|
||||
prototype = PYFUNCTYPE(py_object, c_char_p, py_object)
|
||||
dlfunc = prototype(('generate_pytransform_key', _pytransform))
|
||||
return dlfunc(licfile.encode() if sys.version_info[0] == 3 else licfile,
|
||||
pubkey)
|
||||
|
||||
|
||||
#
|
||||
# Deprecated functions from v5.1
|
||||
#
|
||||
|
||||
|
||||
@dllmethod
|
||||
def encrypt_project_files(proname, filelist, mode=0):
|
||||
prototype = PYFUNCTYPE(c_int, c_char_p, py_object, c_int)
|
||||
dlfunc = prototype(('encrypt_project_files', _pytransform))
|
||||
return dlfunc(proname.encode(), filelist, mode)
|
||||
|
||||
|
||||
def generate_project_capsule(licfile):
|
||||
prikey, pubkey, prolic = _generate_project_capsule()
|
||||
capkey = _encode_capsule_key_file(licfile)
|
||||
return prikey, pubkey, capkey, prolic
|
||||
|
||||
|
||||
@dllmethod
|
||||
def _encode_capsule_key_file(licfile):
|
||||
prototype = PYFUNCTYPE(py_object, c_char_p, c_char_p)
|
||||
dlfunc = prototype(('encode_capsule_key_file', _pytransform))
|
||||
return dlfunc(licfile.encode(), None)
|
||||
|
||||
|
||||
@dllmethod
|
||||
def encrypt_files(key, filelist, mode=0):
|
||||
t_key = c_char * 32
|
||||
prototype = PYFUNCTYPE(c_int, t_key, py_object, c_int)
|
||||
dlfunc = prototype(('encrypt_files', _pytransform))
|
||||
return dlfunc(t_key(*key), filelist, mode)
|
||||
|
||||
|
||||
@dllmethod
|
||||
def generate_module_key(pubname, key):
|
||||
t_key = c_char * 32
|
||||
prototype = PYFUNCTYPE(py_object, c_char_p, t_key, c_char_p)
|
||||
dlfunc = prototype(('generate_module_key', _pytransform))
|
||||
return dlfunc(pubname.encode(), t_key(*key), None)
|
||||
|
||||
#
|
||||
# Compatible for PyArmor v3.0
|
||||
#
|
||||
|
||||
|
||||
@dllmethod
|
||||
def old_init_runtime(systrace=0, sysprofile=1, threadtrace=0, threadprofile=1):
|
||||
'''Only for old version, before PyArmor 3'''
|
||||
pyarmor_init(is_runtime=1)
|
||||
prototype = PYFUNCTYPE(c_int, c_int, c_int, c_int, c_int)
|
||||
_init_runtime = prototype(('init_runtime', _pytransform))
|
||||
return _init_runtime(systrace, sysprofile, threadtrace, threadprofile)
|
||||
|
||||
|
||||
@dllmethod
|
||||
def import_module(modname, filename):
|
||||
'''Only for old version, before PyArmor 3'''
|
||||
prototype = PYFUNCTYPE(py_object, c_char_p, c_char_p)
|
||||
_import_module = prototype(('import_module', _pytransform))
|
||||
return _import_module(modname.encode(), filename.encode())
|
||||
|
||||
|
||||
@dllmethod
|
||||
def exec_file(filename):
|
||||
'''Only for old version, before PyArmor 3'''
|
||||
prototype = PYFUNCTYPE(c_int, c_char_p)
|
||||
_exec_file = prototype(('exec_file', _pytransform))
|
||||
return _exec_file(filename.encode())
|
BIN
old/ctc/dist/pytransform/_pytransform.so
vendored
Executable file
BIN
old/ctc/dist/pytransform/_pytransform.so
vendored
Executable file
Binary file not shown.
603
old/ctc/get_infos.py
Normal file
603
old/ctc/get_infos.py
Normal file
@ -0,0 +1,603 @@
|
||||
#-*- coding:utf-8 -*-
|
||||
#!/usr/bin/env python3
|
||||
import json
|
||||
import sys
|
||||
import signal
|
||||
import readline
|
||||
import os
|
||||
|
||||
|
||||
c_title = '\033[1;4;31;42m' # title color
|
||||
c_br = '\033[1;31m' # bold red
|
||||
c_bg = '\033[1;32m' # bold green
|
||||
c_by = '\033[1;33m' # bold yellow
|
||||
c_bb = '\033[1;34m' # bold blue
|
||||
c_bp = '\033[1;35m' # bold purple
|
||||
c_bc = '\033[1;36m' # bold cyan
|
||||
c_bir= '\033[1;3;31m' # * bold italic red
|
||||
c_bib = '\033[1;3;34m' # * bold italic cyan
|
||||
c_bic = '\033[1;3;36m' # bold italic cyan
|
||||
c_e = '\033[0m' # reset
|
||||
|
||||
|
||||
def get_parent(parent_log, inp_parent_id):
|
||||
parent_all = {"dyn_first_parent": "动态一层父", "dyn_first_parent_all": "动态一层父所有", "dyn_first_parent_backups": "动态一层备父", "dyn_second_parent": "动态二层父", "first_parent": "一层父", "first_parent_backups": "一层备父", "pre_first_parent": "预部署一层父", "pre_first_parent_backups": "预部署一层备父", "pre_second_parent": "预部署二层父", "pre_second_parent_backups": "预部署二层备父", "second_parent": "二层父", "second_parent_backups": "二层备父"}
|
||||
parent_related = {}
|
||||
with open(parent_log) as obj_parent:
|
||||
parents=json.loads(obj_parent.read())
|
||||
for parent in parents['result']:
|
||||
if parent['parent_id'] == inp_parent_id:
|
||||
parent_name = parent['parent_name']
|
||||
print(f"父方案: {parent_name}")
|
||||
for parent_en, parent_cn in parent_all.items():
|
||||
if parent[parent_en] != '':
|
||||
parent_related[parent[parent_en]] = parent_cn
|
||||
for parent_en, parent_cn in parent_related.items():
|
||||
print(f"{parent_cn}: {parent_en}")
|
||||
break
|
||||
|
||||
|
||||
def get_respool(respool_log, inp_template_id, pool_type):
|
||||
with open(respool_log) as obj_respool:
|
||||
respools=json.loads(obj_respool.read())
|
||||
for respool in respools['result']:
|
||||
if int(respool['template_id']) == int(inp_template_id):
|
||||
# print(f"{pool_type}: {respool['template_name']}")
|
||||
return (f"{pool_type}: {respool['template_name']}")
|
||||
|
||||
|
||||
def domain_info_1(domain_info_log, inp_domain):
|
||||
with open(domain_info_log) as obj_domain_info:
|
||||
domain_infos=json.loads(obj_domain_info.read())
|
||||
# 判断是否是重叠域名
|
||||
multi = len(domain_infos['data'])
|
||||
if multi == 0:
|
||||
print(f"{c_br}未找到该域名相关信息,可以登录网页系统查看是否有配置解析组,退出...{c_e}")
|
||||
sys.exit(205)
|
||||
overlap = "是" if multi > 1 else "否"
|
||||
inp_index = 1
|
||||
if multi > 1:
|
||||
print(f"{c_bp}该域名是重叠域名,请确认要查询域名的归属账号: {c_e}")
|
||||
index = 1
|
||||
flag = 0
|
||||
# 遍历重叠域名的账号邮箱,需要输入确定的序号
|
||||
for domain_info in domain_infos['data']:
|
||||
print(f"账号{index} - ", end="")
|
||||
for find_it in domain_info['domains']:
|
||||
if find_it['domain'] == inp_domain:
|
||||
pretty_print3(f"账户: {find_it['account_name']}", f"邮箱: {find_it['email']}", f"accid: {find_it['account_id']}")
|
||||
flag = 0
|
||||
break
|
||||
flag = 1
|
||||
|
||||
if flag == 1:
|
||||
print()
|
||||
flag = 0
|
||||
|
||||
index += 1
|
||||
|
||||
print(f"{c_by}请输入要查询域名归属账号的序号(e.g. 1, 2, 3...): {c_e}")
|
||||
# 验证index是合法输入的逻辑
|
||||
inp_index = input()
|
||||
if inp_index.isdigit() and 1 <= int(inp_index) and int(inp_index) < index:
|
||||
inp_index = int(inp_index)
|
||||
else:
|
||||
print(f"{c_br}请输入正确的序号,{c_e}", end="")
|
||||
sys.exit(200)
|
||||
inp_index -= 1
|
||||
inp_index = inp_index if inp_index != 0 else 0
|
||||
common_cname = len(domain_infos['data'][inp_index]['domains'])
|
||||
for find_it in range(common_cname):
|
||||
if domain_infos['data'][inp_index]['domains'][find_it]['domain'] == inp_domain:
|
||||
break
|
||||
|
||||
common_cname = '是' if common_cname > 1 else '否'
|
||||
common_cnames = []
|
||||
for domain in domain_infos['data'][inp_index]['domains']:
|
||||
common_cnames.append(domain['domain'])
|
||||
|
||||
account = domain_infos['data'][inp_index]['domains'][find_it]['account_name']
|
||||
account_id = domain_infos['data'][inp_index]['domains'][find_it]['account_id']
|
||||
access_id = domain_infos['data'][inp_index]['domains'][find_it]['access_id']
|
||||
email = domain_infos['data'][inp_index]['domains'][find_it]['email']
|
||||
cname = domain_infos['data'][inp_index]['cname']
|
||||
cname_vendor = domain_infos['data'][inp_index]['access_vendor_cname']
|
||||
parse_group = domain_infos['data'][inp_index]['parse_group_name']
|
||||
|
||||
with open("info.log", 'w', encoding='utf-8') as obj_info:
|
||||
obj_info.write(f"1:{account}\n")
|
||||
obj_info.write(f"2:{email}\n")
|
||||
obj_info.write(f"3:{account_id}\n")
|
||||
obj_info.write(f"4:{access_id}\n")
|
||||
|
||||
pretty_print3(f"账户: {account}", f"邮箱: {email}", f"accId: {account_id}")
|
||||
pretty_print3(f"Map: {parse_group}", f"accessId: {access_id}", f"重叠域名: {overlap}")
|
||||
pretty_print3(f"合作方: {cname_vendor}", f"CNAME: {cname}", f"是否共享CNAME缓存: {common_cname}")
|
||||
if common_cname == '是':
|
||||
print(f"共享CNAME缓存域名列表: {common_cnames}")
|
||||
if parse_group == '':
|
||||
sys.exit(201)
|
||||
|
||||
|
||||
def domain_info_2(domain_info_log, inp_accid):
|
||||
with open(domain_info_log) as obj_domain_info:
|
||||
domain_infos=json.loads(obj_domain_info.read())
|
||||
# 遍历账号名称相同的客户,使用request id确定唯一的信息
|
||||
for acc_info in domain_infos['results']['items']:
|
||||
if acc_info['accountId'] == inp_accid:
|
||||
managerArea = acc_info['managerArea']
|
||||
platformVipLevel = acc_info['platformVipLevel']
|
||||
businessLevel = acc_info['businessLevel']
|
||||
ctYunVipLevel = acc_info['ctYunVipLevel']
|
||||
clientId = acc_info['clientId']
|
||||
accountType = acc_info['accountType']
|
||||
clientInsideName = acc_info['clientInsideName']
|
||||
maintainAfterName = acc_info['maintainAfterName']
|
||||
maintainAfterPhone = acc_info['maintainAfterPhone']
|
||||
maintainAfterEmail = acc_info['maintainAfterEmail']
|
||||
managerVendor = acc_info['managerVendor']
|
||||
|
||||
pretty_print3(f"售后姓名: {maintainAfterName}", f"售后电话: {maintainAfterPhone}", f"售后邮箱: {maintainAfterEmail}")
|
||||
pretty_print3(f"天翼云VIP等级: {ctYunVipLevel}", f"平台VIP等级: {platformVipLevel}", f"客户VIP等级: {businessLevel}")
|
||||
pretty_print3(f"clientId: {clientId}", f"客户内部名称: {clientInsideName}", f"商务渠道: {managerArea}")
|
||||
pretty_print2(f"承载平台: {managerVendor}", f"客户类型: {accountType}")
|
||||
break
|
||||
|
||||
|
||||
def domain_info_3(domain_info_log, inp_accid):
|
||||
with open(domain_info_log) as obj_domain_info:
|
||||
domain_infos=json.loads(obj_domain_info.read())
|
||||
# 遍历重叠域名,使用request id确定唯一的信息
|
||||
for domain_info in domain_infos['data']['results']:
|
||||
if domain_info['accountId'] == inp_accid:
|
||||
statusName = domain_info['statusName']
|
||||
ipv6Switch = domain_info['ipv6Switch']
|
||||
productName = domain_info['productName']
|
||||
innerTestDomain = domain_info['innerTestDomain']
|
||||
ipv6Switch = '是' if ipv6Switch == 1 else '否'
|
||||
innerTestDomain = '是' if innerTestDomain == 1 else '否'
|
||||
|
||||
pretty_print2(f"域名状态: {statusName}", f"是否开启IPv6: {ipv6Switch}")
|
||||
pretty_print2(f"是否内部测试域名: {innerTestDomain}", f"产品类型: {productName}")
|
||||
break
|
||||
|
||||
|
||||
def domain_info_4(domain_info_log, inp_accid):
|
||||
with open(domain_info_log) as obj_domain_info:
|
||||
domain_infos=json.loads(obj_domain_info.read())
|
||||
# 遍历账号名称相同的客户,使用request id确定唯一的信息
|
||||
if len(domain_infos['result']) == 0:
|
||||
sys.exit(204)
|
||||
else:
|
||||
for domain_info in domain_infos['result']:
|
||||
if domain_info['account_id'] == inp_accid:
|
||||
# 1. 回源地址
|
||||
origin = []
|
||||
for ori in domain_info['origin']:
|
||||
origin.append(ori['role'] + ':' + ori['origin'])
|
||||
# 2. 访问协议 + 端口
|
||||
http_visit = domain_info['basic_conf']['http_server_port'] if domain_info['http_status'] == 'on' else 'X'
|
||||
https_visit = domain_info['basic_conf']['https_server_port'] if domain_info['https_status'] == 'on' else 'X'
|
||||
url_visit = str(http_visit) + '/' + str(https_visit)
|
||||
# 3. 回源协议 + 端口
|
||||
https_origin = str(domain_info['basic_conf']['https_origin_port'])
|
||||
http_origin = str(domain_info['basic_conf']['http_origin_port'])
|
||||
if domain_info['backorigin_protocol'] == 'follow_request':
|
||||
url_origin = http_origin + '/' + https_origin
|
||||
elif domain_info['backorigin_protocol'] == 'http':
|
||||
url_origin = http_origin + '/X'
|
||||
elif domain_info['backorigin_protocol'] == 'https':
|
||||
url_origin = 'X/' + https_origin
|
||||
else:
|
||||
print("回源协议除了http/https/follow_request之外,还有第四种方式,请补充...")
|
||||
sys.exit(201)
|
||||
# 4. 证书备注名
|
||||
cert_name = domain_info['cert_name']
|
||||
|
||||
|
||||
# 6. 预部署资源池
|
||||
pre_node_list = domain_info['pre_node_list']
|
||||
off_pool = get_respool("respool.log", pre_node_list, '预部署资源池')
|
||||
# 7. 全局资源池
|
||||
node_list = domain_info['node_list']
|
||||
on_pool = get_respool("respool.log", node_list, '全局资源池')
|
||||
# 8. 是否热加载
|
||||
conf_order_id = domain_info['conf_order_id']
|
||||
conf_order_id = '否' if conf_order_id == -1 else '是'
|
||||
|
||||
pretty_print2(f"证书备注名: {cert_name}", f"热加载: {conf_order_id}")
|
||||
pretty_print2(off_pool, on_pool)
|
||||
print(f"回源地址: {origin}")
|
||||
print(f"http/https访问: {url_visit}")
|
||||
print(f"http/https回源: {url_origin}")
|
||||
|
||||
# 5. 父方案 parent_id
|
||||
parent_id = domain_info['parent_id']
|
||||
get_parent("parent.log", parent_id)
|
||||
break
|
||||
|
||||
|
||||
def domain_info_5(domain_info_log, inp_accid):
|
||||
with open(domain_info_log) as obj_domain_info:
|
||||
domain_infos=json.loads(obj_domain_info.read())
|
||||
# 遍历账号名称相同的客户,使用request id确定唯一的信息
|
||||
for domain_info in domain_infos['result']:
|
||||
if domain_info['account_id'] == inp_accid:
|
||||
with open("info.log", 'w', encoding='utf-8') as obj_info:
|
||||
obj_info.write(f"4:{domain_info['domain_id']}\n")
|
||||
break
|
||||
|
||||
# 如下accid没用到
|
||||
def domain_info_6(domain_info_log, inp_accid):
|
||||
with open(domain_info_log) as obj_domain_info:
|
||||
domain_info=json.loads(obj_domain_info.read())['result']
|
||||
|
||||
# 推拉流模式
|
||||
push_stream_domain = ''
|
||||
pull_stream_mode = domain_info['base_conf']['pull_stream_mode']
|
||||
if pull_stream_mode == 0:
|
||||
pull_stream_mode = "直播拉流(推拉流)"
|
||||
push_stream_domain = domain_info['base_conf']['push_stream_domain']
|
||||
elif pull_stream_mode == 1:
|
||||
pull_stream_mode = "直播拉流(回源拉流)"
|
||||
else:
|
||||
pull_stream_mode = "直播推流"
|
||||
|
||||
|
||||
# 证书备注名
|
||||
if domain_info['protocol_control']['https_switch'] == 1:
|
||||
cert_name = domain_info['protocol_control']['cert_name']
|
||||
else:
|
||||
cert_name = '无绑定证书'
|
||||
|
||||
pretty_print3(f"推拉流模式: {pull_stream_mode}", f"推流域名: {push_stream_domain}", f"证书备注名: {cert_name}")
|
||||
|
||||
# 预部署资源池
|
||||
pre_node_list = domain_info['pre_resouce_id']
|
||||
off_pool = get_respool("respool.log", pre_node_list, '预部署资源池')
|
||||
# 全局资源池
|
||||
node_list = domain_info['resouce_id']
|
||||
on_pool = get_respool("respool.log", node_list, '全局资源池')
|
||||
|
||||
pretty_print2(off_pool, on_pool)
|
||||
|
||||
# 回源模式
|
||||
origin_mode = domain_info['base_conf']['origin_mode']
|
||||
for mode in origin_mode:
|
||||
print(f"回源模式: {mode}")
|
||||
mode_desc = domain_info['base_conf'][f'{mode}_origin']
|
||||
for ori in mode_desc:
|
||||
for k, v in ori.items():
|
||||
if v != '':
|
||||
print(f"{k}: {v}")
|
||||
# 父方案 parent_id
|
||||
parent_id = domain_info['parent_id']
|
||||
get_parent("parent.log", parent_id)
|
||||
|
||||
|
||||
def domain_map_info(domain_map_log, flg):
|
||||
with open(domain_map_log) as obj_domain_map_log:
|
||||
map_info=json.loads(obj_domain_map_log.read())
|
||||
# 判断是否是重叠域名
|
||||
parse_detail=map_info['parse_detail']
|
||||
if int(flg) == 0:
|
||||
print('------------------------------分区域解析------------------------------')
|
||||
for item in parse_detail:
|
||||
pretty_print3(item['area_cnname'], item['type'], item['value'], 1)
|
||||
# write to file here
|
||||
|
||||
print('----------------------------------------------------------------------')
|
||||
else:
|
||||
with open('map.log', 'w') as obj_map_log:
|
||||
for item in parse_detail:
|
||||
obj_map_log.write(f"{item['value']}\n")
|
||||
|
||||
|
||||
def map_info(map_info_log, inp_domain):
|
||||
with open(map_info_log) as obj_map_info:
|
||||
map_infos=json.loads(obj_map_info.read())
|
||||
# 判断是否是重叠域名
|
||||
multi = len(map_infos['data'])
|
||||
if multi == 0:
|
||||
print(f"{c_br}未找到该域名相关信息,可以登录网页系统查看是否有配置解析组,退出...{c_e}")
|
||||
sys.exit(205)
|
||||
|
||||
inp_index = 1
|
||||
if multi > 1:
|
||||
print(f"{c_bp}该域名是重叠域名,请确认要查询域名的归属账号: {c_e}")
|
||||
index = 1
|
||||
flag = 0
|
||||
# 遍历重叠域名的账号邮箱,需要输入确定的序号
|
||||
for map_info in map_infos['data']:
|
||||
print(f"账号{index} - ", end="")
|
||||
for find_it in map_info['domains']:
|
||||
if find_it['domain'] == inp_domain:
|
||||
pretty_print3(f"账户: {find_it['account_name']}", f"邮箱: {find_it['email']}", f"accid: {find_it['account_id']}")
|
||||
flag = 0
|
||||
break
|
||||
flag = 1
|
||||
|
||||
if flag == 1:
|
||||
print()
|
||||
flag = 0
|
||||
|
||||
index += 1
|
||||
print(f"{c_by}请输入要查询域名归属账号的序号(e.g. 1, 2, 3...): {c_e}")
|
||||
# 验证index是合法输入的逻辑
|
||||
inp_index = input()
|
||||
if inp_index.isdigit() and 1 <= int(inp_index) and int(inp_index) < index:
|
||||
inp_index = int(inp_index)
|
||||
else:
|
||||
print(f"{c_br}请输入正确的序号,{c_e}", end="")
|
||||
sys.exit(200)
|
||||
inp_index -= 1
|
||||
inp_index = inp_index if inp_index != 0 else 0
|
||||
parse_group = map_infos['data'][inp_index]['parse_group_name']
|
||||
common_cname = len(map_infos['data'][inp_index]['domains'])
|
||||
for find_it in range(common_cname):
|
||||
if map_infos['data'][inp_index]['domains'][find_it]['domain'] == inp_domain:
|
||||
break
|
||||
account_id = map_infos['data'][inp_index]['domains'][find_it]['account_id']
|
||||
access_id = map_infos['data'][inp_index]['domains'][find_it]['access_id']
|
||||
with open("info.log", 'w', encoding='utf-8') as obj_info:
|
||||
obj_info.write(f"3:{account_id}\n")
|
||||
obj_info.write(f"4:{access_id}\n")
|
||||
if parse_group != '':
|
||||
with open("map.log", 'w', encoding='utf-8') as obj_map:
|
||||
obj_map.write(f"{parse_group}\n")
|
||||
else:
|
||||
sys.exit(201)
|
||||
|
||||
|
||||
|
||||
def domain_config_cdn(domain_info_log, inp_accid, domain):
|
||||
with open(domain_info_log) as obj_domain_info:
|
||||
domain_infos=json.loads(obj_domain_info.read())
|
||||
# 遍历账号名称相同的客户,使用request id确定唯一的信息
|
||||
if len(domain_infos['result']) == 0:
|
||||
sys.exit(204)
|
||||
else:
|
||||
for domain_info in domain_infos['result']:
|
||||
config_json = json.dumps(domain_info)
|
||||
os.environ['config_json'] = config_json
|
||||
os.environ['domain_json'] = domain
|
||||
if domain_info['account_id'] == inp_accid:
|
||||
os.system("echo $config_json > $domain_json")
|
||||
break
|
||||
|
||||
|
||||
def domain_config_live(domain_info_log, domain):
|
||||
with open(domain_info_log) as obj_domain_info:
|
||||
domain_info=json.loads(obj_domain_info.read())['result']
|
||||
config_json = json.dumps(domain_info)
|
||||
os.environ['config_json'] = config_json
|
||||
os.environ['domain_json'] = domain
|
||||
os.system("echo $config_json > $domain_json")
|
||||
|
||||
|
||||
|
||||
def parent_info_4(domain_info_log, inp_accid):
|
||||
with open(domain_info_log) as obj_domain_info:
|
||||
domain_infos=json.loads(obj_domain_info.read())
|
||||
# 遍历账号名称相同的客户,使用request id确定唯一的信息
|
||||
if len(domain_infos['result']) == 0:
|
||||
sys.exit(204)
|
||||
else:
|
||||
for domain_info in domain_infos['result']:
|
||||
if domain_info['account_id'] == inp_accid:
|
||||
# 5. 父方案 parent_id
|
||||
parent_id = domain_info['parent_id']
|
||||
get_parent_info("parent.log", parent_id)
|
||||
break
|
||||
|
||||
|
||||
def parent_info_5(domain_info_log, inp_accid):
|
||||
with open(domain_info_log) as obj_domain_info:
|
||||
domain_infos=json.loads(obj_domain_info.read())
|
||||
# 遍历账号名称相同的客户,使用request id确定唯一的信息
|
||||
for domain_info in domain_infos['result']:
|
||||
if domain_info['account_id'] == inp_accid:
|
||||
with open("info.log", 'w', encoding='utf-8') as obj_info:
|
||||
obj_info.write(f"2:{domain_info['domain_id']}\n")
|
||||
break
|
||||
|
||||
# 如下accid没用到
|
||||
def parent_info_6(domain_info_log, inp_accid):
|
||||
with open(domain_info_log) as obj_domain_info:
|
||||
domain_info=json.loads(obj_domain_info.read())['result']
|
||||
# 父方案 parent_id
|
||||
parent_id = domain_info['parent_id']
|
||||
get_parent_info("parent.log", parent_id)
|
||||
|
||||
|
||||
|
||||
def get_parent_info(parent_log, inp_parent_id):
|
||||
parent_all = ["dyn_first_parent", "dyn_first_parent_all", "dyn_first_parent_backups", "dyn_second_parent", "first_parent", "first_parent_backups", "pre_first_parent", "pre_first_parent_backups", "pre_second_parent", "pre_second_parent_backups", "second_parent", "second_parent_backups"]
|
||||
parent_related = {}
|
||||
with open(parent_log) as obj_parent:
|
||||
parents=json.loads(obj_parent.read())
|
||||
for parent in parents['result']:
|
||||
if parent['parent_id'] == inp_parent_id:
|
||||
parent_name = parent['parent_name']
|
||||
index = 1
|
||||
for parent_en in parent_all:
|
||||
if parent[parent_en] != '':
|
||||
with open("cmap", 'a', encoding='utf-8') as obj_cmap:
|
||||
obj_cmap.write(f"{index}. {parent[parent_en]}\n")
|
||||
index += 1
|
||||
break
|
||||
|
||||
|
||||
def quit(signum, frame):
|
||||
print("Bye!")
|
||||
sys.exit(205)
|
||||
|
||||
def pretty_print2(col_1, col_2):
|
||||
len_1 = len(col_1)
|
||||
len_2 = len(col_2)
|
||||
len_1_utf8 = len(col_1.encode('utf-8'))
|
||||
len_2_utf8 = len(col_2.encode('utf-8'))
|
||||
size_1 = 48 - int((len_1_utf8 - len_1) / 2)
|
||||
size_2 = 40 - int((len_2_utf8 - len_2) / 2)
|
||||
print(f"%-{size_1}s%-{size_2}s" % (col_1, col_2))
|
||||
|
||||
|
||||
def pretty_print3(col_1, col_2, col_3, col_4=0):
|
||||
len_1 = len(col_1)
|
||||
len_2 = len(col_2)
|
||||
len_3 = len(col_3)
|
||||
len_1_utf8 = len(col_1.encode('utf-8'))
|
||||
len_2_utf8 = len(col_2.encode('utf-8'))
|
||||
len_3_utf8 = len(col_3.encode('utf-8'))
|
||||
size_1 = 48- int((len_1_utf8 - len_1) / 2)
|
||||
size_2 = 40 - int((len_2_utf8 - len_2) / 2)
|
||||
size_3 = 30 - int((len_2_utf8 - len_2) / 2)
|
||||
if col_4 == 0:
|
||||
print(f"%-{size_1}s%-{size_2}s%-{size_3}s" % (col_1, col_2, col_3))
|
||||
else:
|
||||
size_1 = 16- int((len_1_utf8 - len_1) / 2)
|
||||
size_2 = 10 - int((len_2_utf8 - len_2) / 2)
|
||||
size_3 = 60 - int((len_2_utf8 - len_2) / 2)
|
||||
print(f"%-{size_1}s%-{size_2}s%-{size_3}s" % (col_1, col_2, col_3))
|
||||
|
||||
def pretty_print_data(width: list, cols: list):
|
||||
for i in range(len(cols)):
|
||||
len_text = len(cols[i])
|
||||
len_utf8 = len(cols[i].encode('utf-8'))
|
||||
len_size = width[i] - int((len_utf8 - len_text) / 2)
|
||||
if i == 8:
|
||||
if float(cols[i]) < 10:
|
||||
print(f"{c_br}%-{len_size}s{c_e}" % (cols[i]), end='')
|
||||
elif float(cols[i]) < 30:
|
||||
print(f"{c_by}%-{len_size}s{c_e}" % (cols[i]), end='')
|
||||
else:
|
||||
print(f"{c_bg}%-{len_size}s{c_e}" % (cols[i]), end='')
|
||||
else:
|
||||
print(f"%-{len_size}s" % (cols[i]), end='')
|
||||
print()
|
||||
|
||||
def pretty_print_title(width: list, cols: list):
|
||||
for i in range(len(cols)):
|
||||
len_text = len(cols[i])
|
||||
len_utf8 = len(cols[i].encode('utf-8'))
|
||||
len_size = width[i] - int((len_utf8 - len_text) / 2)
|
||||
print(f"{c_title}%-{len_size}s{c_e}" % (cols[i]), end='')
|
||||
print()
|
||||
|
||||
def fmt_print_global(res_map):
|
||||
title = ["大区", "省份", "View", "组名", "VIP", "LAKE", "压测带宽", "实时带宽", "冗余带宽", "昨晚高峰", "昨中高峰"]
|
||||
width = [5, 18, 25, 25, 32, 15, 10, 10, 10, 10, 10]
|
||||
pretty_print_title(width, title)
|
||||
with open(res_map) as obj_res_map:
|
||||
lines = obj_res_map.readlines()
|
||||
count = 1
|
||||
for line in lines:
|
||||
pretty_print_data(width, line.strip().split())
|
||||
count += 1
|
||||
if count % 25 == 0:
|
||||
pretty_print_title(width, title)
|
||||
|
||||
def fmt_print_partial(res_map, view, query, domain, domain_map):
|
||||
|
||||
if os.path.getsize(view):
|
||||
title = ["大区", "省份", "View", "组名", "VIP", "LAKE", "压测带宽", "实时带宽", "冗余带宽", "昨晚高峰", "昨中高峰"]
|
||||
width = [5, 18, 25, 25, 32, 15, 10, 10, 10, 10, 10]
|
||||
pretty_print_title(width, title)
|
||||
|
||||
with open(res_map) as obj_res_map, open(view) as obj_view:
|
||||
views = obj_view.readlines()
|
||||
lines = obj_res_map.readlines()
|
||||
count = 1
|
||||
for view_s in views:
|
||||
for line in lines:
|
||||
c_line = line.strip().split()
|
||||
if c_line[2] == view_s.strip():
|
||||
pretty_print_data(width, c_line)
|
||||
count += 1
|
||||
if count % 25 == 0:
|
||||
pretty_print_title(width, title)
|
||||
if count == 1:
|
||||
print(f"{c_br}域名{domain}的解析组{domain_map}中,不存在{query}地区的覆盖节点,请确认。{c_e}\n")
|
||||
sys.exit(206)
|
||||
else:
|
||||
print(f"{c_br}请按照规则,输入正确的查询条件,退出...{c_e}")
|
||||
sys.exit(202)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
option = sys.argv[1]
|
||||
|
||||
if option == '--domain_info_1':
|
||||
domain_info_log = sys.argv[2]
|
||||
inp_domain = sys.argv[3]
|
||||
domain_info_1(domain_info_log, inp_domain)
|
||||
elif option == '--domain_info_2':
|
||||
domain_info_log = sys.argv[2]
|
||||
inp_accid = sys.argv[3]
|
||||
domain_info_2(domain_info_log, inp_accid)
|
||||
elif option == '--domain_info_3':
|
||||
domain_info_log = sys.argv[2]
|
||||
inp_accid = sys.argv[3]
|
||||
domain_info_3(domain_info_log, inp_accid)
|
||||
elif option == '--domain_info_4':
|
||||
domain_info_log = sys.argv[2]
|
||||
inp_accid = sys.argv[3]
|
||||
domain_info_4(domain_info_log, inp_accid)
|
||||
elif option == '--domain_info_5':
|
||||
domain_info_log = sys.argv[2]
|
||||
inp_accid = sys.argv[3]
|
||||
domain_info_5(domain_info_log, inp_accid)
|
||||
elif option == '--domain_info_6':
|
||||
domain_info_log = sys.argv[2]
|
||||
inp_accid = sys.argv[3]
|
||||
domain_info_6(domain_info_log, inp_accid)
|
||||
elif option == '--domain_map_info':
|
||||
domain_map_log = sys.argv[2]
|
||||
flg = sys.argv[3]
|
||||
domain_map_info(domain_map_log, flg)
|
||||
elif option == '--map_info':
|
||||
map_info_log = sys.argv[2]
|
||||
inp_accid = sys.argv[3]
|
||||
map_info(map_info_log, inp_accid)
|
||||
elif option == '--format-global':
|
||||
res_map = sys.argv[2]
|
||||
fmt_print_global(res_map)
|
||||
elif option == '--format-partial':
|
||||
query = sys.argv[2]
|
||||
view = sys.argv[3]
|
||||
res_map = sys.argv[4]
|
||||
domain = sys.argv[5]
|
||||
domain_map = sys.argv[6]
|
||||
fmt_print_partial(res_map, view, query, domain, domain_map)
|
||||
elif option == '--domain_config_cdn':
|
||||
domain_info_log = sys.argv[2]
|
||||
inp_accid = sys.argv[3]
|
||||
domain = sys.argv[4]
|
||||
domain_config_cdn(domain_info_log, inp_accid, domain)
|
||||
elif option == '--domain_config_live':
|
||||
domain_info_log = sys.argv[2]
|
||||
domain = sys.argv[3]
|
||||
domain_config_live(domain_info_log, domain)
|
||||
elif option == '--parent_info_4':
|
||||
domain_info_log = sys.argv[2]
|
||||
inp_accid = sys.argv[3]
|
||||
parent_info_4(domain_info_log, inp_accid)
|
||||
elif option == '--parent_info_5':
|
||||
domain_info_log = sys.argv[2]
|
||||
inp_accid = sys.argv[3]
|
||||
parent_info_5(domain_info_log, inp_accid)
|
||||
elif option == '--parent_info_6':
|
||||
domain_info_log = sys.argv[2]
|
||||
inp_accid = sys.argv[3]
|
||||
parent_info_6(domain_info_log, inp_accid)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
signal.signal(signal.SIGINT, quit)
|
||||
main()
|
||||
|
||||
|
55
old/ctc/group_chatbot_xibei.sh
Normal file
55
old/ctc/group_chatbot_xibei.sh
Normal file
@ -0,0 +1,55 @@
|
||||
#!/bin/bash
|
||||
#===================================================================
|
||||
# Filename : group_chatbot_xibei.sh
|
||||
# Function :
|
||||
# Usage :
|
||||
# Author : Manford Fan
|
||||
# Date : 2023-05-12 08:59
|
||||
# Version : Version 0.1
|
||||
# Disclaimer : The author is NOT responsible for any loss caused
|
||||
# by the user's own operations.
|
||||
# And More : If you find there are some bugs in this script
|
||||
# Or you have better ideas, please do contact me
|
||||
# via E-mail -- mffan0922@163.com
|
||||
#===================================================================
|
||||
|
||||
function sendMsg() {
|
||||
|
||||
# 个人测试
|
||||
# curl 'https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=ddea3f5f-fbfc-4c21-994a-71e9fc50e4ef' \
|
||||
# -H 'Content-Type: application/json' \
|
||||
# -d '
|
||||
# {
|
||||
# "msgtype": "markdown",
|
||||
# "markdown": {
|
||||
# "content": "**'"$alarmTitle"'**\n
|
||||
# > <font color=\"warning\">'"$alarmInfo"'</font>"
|
||||
# }
|
||||
# }' > /dev/null 2>&1
|
||||
|
||||
# 群hook
|
||||
curl 'https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=5c5f18f1-8494-4f42-b7f1-9ef7295b0578' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '
|
||||
{
|
||||
"msgtype": "markdown",
|
||||
"markdown": {
|
||||
"content": "**'"$alarmTitle"'**\n
|
||||
> <font color=\"warning\">'"$alarmInfo"'</font>"
|
||||
}
|
||||
}' > /dev/null 2>&1
|
||||
}
|
||||
|
||||
|
||||
time_opt=$1
|
||||
alarmTitle="周报提醒"
|
||||
|
||||
if [[ $time_opt == '1' ]]; then
|
||||
alarmInfo='周四了,请各位及时填写周报~~'
|
||||
elif [[ $time_opt == '2' ]]; then
|
||||
alarmInfo='记得写周报,记得写周报,记得写周报~~'
|
||||
else
|
||||
:
|
||||
fi
|
||||
|
||||
sendMsg $alarmTitle $alarmInfo
|
1151
old/ctc/ids.sh
Normal file
1151
old/ctc/ids.sh
Normal file
File diff suppressed because it is too large
Load Diff
284
old/ctc/infos.sh
Normal file
284
old/ctc/infos.sh
Normal file
@ -0,0 +1,284 @@
|
||||
#!/bin/bash
|
||||
|
||||
# 捕获 Ctrl + C 终止整个脚本的运行
|
||||
function onCtrlC () {
|
||||
exec 3>&2 # 3 is now a copy of 2
|
||||
exec 2> /dev/null # 2 now points to /dev/null
|
||||
kill ${bg_pids} ${progress_pid} >/dev/null 2>&1
|
||||
sleep 1 # sleep to wait for process to die
|
||||
exec 2>&3 # restore stderr to saved
|
||||
exec 3>&- # close saved version
|
||||
echo
|
||||
echo -e "${c_bir}Ctrl+C is captured, exiting...\n${c_e}"
|
||||
exit 100
|
||||
}
|
||||
|
||||
|
||||
function infos() {
|
||||
# 判断要查询的域名是否在平台,domain.list文件每小时更新一次 -- task.sh
|
||||
res=`cat $data/domain.list | grep -w "$domain"`
|
||||
if [[ $res == '' ]]; then
|
||||
echo -e "${c_br}该域名[$domain]未在天翼平台配置,一个小时内新增的域名无法查询,退出...${c_e}"
|
||||
echo -e "${c_br}只接收一个参数,请输入要查询的域名\ne.g. infos www.ctyun.cn\n${c_e}"
|
||||
exit 247
|
||||
fi
|
||||
|
||||
# ----------------------------------------------------------------------------------------
|
||||
# 获取域名信息 - part 1
|
||||
curl "https://dcp.ctcdn.cn/traffic-control-api/v2/access/list?access_version_id=1&domain=$domain" -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjYzfQ.XZ_nNtRWoBRcPBM-bHAG_ciohkNh69n4AVHwV755r6Q' -H 'content-type: application/json;charset=UTF-8' -vo domain_info_1.log > domain_info_1.response 2>&1
|
||||
|
||||
# 判断响应是否200
|
||||
cat $trash/domain_info_1.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}获取域名-part 1信息失败,退出...${c_e}"; exit 246; }
|
||||
|
||||
python3 /usr/local/script/fanmf11/get_infos.py --domain_info_1 domain_info_1.log $domain
|
||||
# python3 /home/fanmf11/fanmf11/get_infos.py --domain_info_1 domain_info_1.log $domain
|
||||
r_code=$?
|
||||
if [[ $r_code -eq 205 ]]; then
|
||||
exit 205
|
||||
elif [[ $r_code -eq 201 ]]; then
|
||||
accessid=`cat info.log | awk -F ':' '$1==4 {print $2}'`
|
||||
curl "https://dcp.ctcdn.cn/traffic-control-api/v2/access/parseDetail?access_id=$accessid" -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjYzfQ.XZ_nNtRWoBRcPBM-bHAG_ciohkNh69n4AVHwV755r6Q' -H 'content-type: application/json;charset=UTF-8' -vo domain_map_info.log > domain_map_info.response 2>&1
|
||||
# 判断响应是否200
|
||||
cat $trash/domain_map_info.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}获取域名-map信息失败,退出...${c_e}"; exit 206; }
|
||||
python3 /usr/local/script/fanmf11/get_infos.py --domain_map_info domain_map_info.log 0
|
||||
|
||||
elif [[ $r_code -ne 0 ]]; then
|
||||
echo -e "${c_br}处理域名-part 1信息失败,退出...${c_e}"
|
||||
exit 242
|
||||
fi
|
||||
|
||||
# ----------------------------------------------------------------------------------------
|
||||
# 获取域名信息 - part 2
|
||||
account=`cat info.log | awk -F ':' '$1==1 {print $2}'`
|
||||
accid=`cat info.log | awk -F ':' '$1==3 {print $2}'`
|
||||
|
||||
curl 'https://bs.ctcdn.cn/api/v3/clientInfo/searchClientInfo' -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjI1NH0.9Zw66R0R85avd92qzO-3KQ4DJ2zYXL4ght5bH41iTfA' -H 'content-type: application/json;charset=UTF-8' -X POST -d '{"clientInfo":[{"key":"clientCnname", "value": "'$account'"}],"columnList":["openTime", "accountType", "accountResource", "accountEmail"]}' -vo domain_info_2.log > domain_info_2.response 2>&1
|
||||
|
||||
# 判断响应是否200
|
||||
cat $trash/domain_info_2.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}获取域名信息-part 2失败,退出...${c_e}"; exit 245; }
|
||||
|
||||
python3 /usr/local/script/fanmf11/get_infos.py --domain_info_2 domain_info_2.log $accid
|
||||
# python3 /home/fanmf11/fanmf11/get_infos.py --domain_info_2 domain_info_2.log $accid
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}处理域名-part 2信息失败,退出...${c_e}"; exit 241; }
|
||||
|
||||
# ----------------------------------------------------------------------------------------
|
||||
# 获取域名信息 - part 3
|
||||
curl "http://bs.ctcdn.cn/api/v3/manageDomain/list?partner=&sales_channel=&status=&productCode=&domain=$domain" -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjI1NH0.9Zw66R0R85avd92qzO-3KQ4DJ2zYXL4ght5bH41iTfA' -H 'content-type: application/json;charset=UTF-8' -vo domain_info_3.log > domain_info_3.response 2>&1
|
||||
|
||||
# 判断响应是否200
|
||||
cat $trash/domain_info_3.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}获取域名信息-part 3失败,退出...${c_e}"; exit 244; }
|
||||
|
||||
python3 /usr/local/script/fanmf11/get_infos.py --domain_info_3 domain_info_3.log $accid
|
||||
# python3 /home/fanmf11/fanmf11/get_infos.py --domain_info_3 domain_info_3.log $accid
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}处理域名-part 3信息失败,退出...${c_e}"; exit 240; }
|
||||
|
||||
# ----------------------------------------------------------------------------------------
|
||||
# 获取父方案信息
|
||||
curl 'https://lasc-new.ctcdn.cn/v1/domain/internal/parent_draft' -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjE4NH0.1Pgfnky-grT0MN2ic04PqMf32BdnF6iN_PMk3kNOzjY' -H 'content-type: application/json;charset=UTF-8' -vo parent.log > parent.response 2>&1
|
||||
|
||||
# 判断响应是否200
|
||||
cat $trash/parent.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}获取父方案信息失败,退出...${c_e}"; exit 234; }
|
||||
|
||||
# ----------------------------------------------------------------------------------------
|
||||
# 获取资源池信息
|
||||
curl 'http://rap.ctcdn.cn/v2/rapApi/resourcePoolToResourceGroup' -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjE1N30.IXVEAglOYm8bUInW4uXqDugBnd6POouBK8q4z_HItns' -H 'content-type: application/json;charset=UTF-8' -vo respool.log > respool.response 2>&1
|
||||
|
||||
# 判断响应是否200
|
||||
cat $trash/respool.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}获取资源池信息失败,退出...${c_e}"; exit 233; }
|
||||
|
||||
# ----------------------------------------------------------------------------------------
|
||||
# 获取域名信息 - part 4
|
||||
curl "https://confmanager.ctcdn.cn/v1/domain/conf/query_union?precise_query=1&domain=$domain" -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjQxNn0.KEFh_yg3J1y8sL7s3X_8jIR8GGq88A89b7J5YhVYVf8' -H 'content-type: application/json;charset=UTF-8' -vo domain_info_4.log > domain_info_4.response 2>&1
|
||||
|
||||
# 判断响应是否200
|
||||
cat $trash/domain_info_4.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}获取域名信息-part 4失败,退出...${c_e}"; exit 243; }
|
||||
|
||||
python3 /usr/local/script/fanmf11/get_infos.py --domain_info_4 domain_info_4.log $accid
|
||||
# python3 /home/fanmf11/fanmf11/get_infos.py --domain_info_4 domain_info_4.log $accid
|
||||
r_code=$?
|
||||
if [[ $r_code -eq 204 ]]; then
|
||||
# ----------------------------------------------------------------------------------------
|
||||
# 获取域名信息 - part 5
|
||||
curl "https://lasc-new.ctcdn.cn/v1/domain/conf/op_query_union?page_size=20&page=1&domain=$domain" -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjE4NH0.1Pgfnky-grT0MN2ic04PqMf32BdnF6iN_PMk3kNOzjY' -H 'content-type: application/json;charset=UTF-8' -vo domain_info_5.log > domain_info_5.response 2>&1
|
||||
|
||||
# 判断响应是否200
|
||||
cat $trash/domain_info_5.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}获取域名信息-part 5失败,退出...${c_e}"; exit 235; }
|
||||
|
||||
python3 /usr/local/script/fanmf11/get_infos.py --domain_info_5 domain_info_5.log $accid
|
||||
# python3 /home/fanmf11/fanmf11/get_infos.py --domain_info_5 domain_info_5.log $accid
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}处理域名-part 5信息失败,退出...${c_e}"; exit 237; }
|
||||
|
||||
# ----------------------------------------------------------------------------------------
|
||||
# 获取域名信息 - part 6
|
||||
domain_id=`cat info.log | awk -F ':' '$1==4 {print $2}'`
|
||||
curl "https://lasc-new.ctcdn.cn/v1/domain/internal/domain_config?domain_id=$domain_id" -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjE4NH0.1Pgfnky-grT0MN2ic04PqMf32BdnF6iN_PMk3kNOzjY' -H 'content-type: application/json;charset=UTF-8' -vo domain_info_6.log > domain_info_6.response 2>&1
|
||||
|
||||
# 判断响应是否200
|
||||
cat $trash/domain_info_6.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}获取域名信息-part 6失败,退出...${c_e}"; exit 238; }
|
||||
|
||||
python3 /usr/local/script/fanmf11/get_infos.py --domain_info_6 domain_info_6.log $accid
|
||||
# python3 /home/fanmf11/fanmf11/get_infos.py --domain_info_6 domain_info_6.log $accid
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}处理域名-part 6信息失败,退出...${c_e}"; exit 236; }
|
||||
|
||||
elif [[ $r_code -ne 0 ]]; then
|
||||
echo -e "${c_br}处理域名-part 4信息失败,退出...${c_e}"
|
||||
exit 239
|
||||
else
|
||||
exit 0
|
||||
fi
|
||||
}
|
||||
|
||||
# map.sh用如下函数获取解析组信息
|
||||
function map() {
|
||||
# ----------------------------------------------------------------------------------------
|
||||
# 获取域名信息 - part 1 - 其中包括解析组信息,但有可能是重叠域名
|
||||
curl "https://dcp.ctcdn.cn/traffic-control-api/v2/access/list?access_version_id=1&domain=$domain" -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjYzfQ.XZ_nNtRWoBRcPBM-bHAG_ciohkNh69n4AVHwV755r6Q' -H 'content-type: application/json;charset=UTF-8' -vo map_info.log > map_info.response 2>&1
|
||||
|
||||
# 判断响应是否200
|
||||
cat map_info.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}获取域名MAP信息失败,退出...${c_e}"; exit 232; }
|
||||
|
||||
# 处理接口获取的信息,拿到正确的解析组
|
||||
python3 /usr/local/script/fanmf11/get_infos.py --map_info map_info.log $domain
|
||||
# python3 /home/fanmf11/fanmf11/get_infos.py --map_info map_info.log $domain
|
||||
r_code=$?
|
||||
if [[ $r_code -eq 205 ]]; then
|
||||
exit 205
|
||||
elif [[ $r_code -eq 201 ]]; then
|
||||
accessid=`cat info.log | awk -F ':' '$1==4 {print $2}'`
|
||||
curl "https://dcp.ctcdn.cn/traffic-control-api/v2/access/parseDetail?access_id=$accessid" -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjYzfQ.XZ_nNtRWoBRcPBM-bHAG_ciohkNh69n4AVHwV755r6Q' -H 'content-type: application/json;charset=UTF-8' -vo domain_map_info.log > domain_map_info.response 2>&1
|
||||
# 判断响应是否200
|
||||
cat $trash/domain_map_info.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}获取域名-map信息失败,退出...${c_e}"; exit 206; }
|
||||
python3 /usr/local/script/fanmf11/get_infos.py --domain_map_info domain_map_info.log 1
|
||||
|
||||
elif [[ $r_code -ne 0 ]]; then
|
||||
echo -e "${c_br}处理域名MAP信息失败,退出...${c_e}"
|
||||
exit 231
|
||||
fi
|
||||
}
|
||||
|
||||
function parent() {
|
||||
# ----------------------------------------------------------------------------------------
|
||||
# 获取父方案信息
|
||||
curl 'https://lasc-new.ctcdn.cn/v1/domain/internal/parent_draft' -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjE4NH0.1Pgfnky-grT0MN2ic04PqMf32BdnF6iN_PMk3kNOzjY' -H 'content-type: application/json;charset=UTF-8' -vo parent.log > parent.response 2>&1
|
||||
|
||||
# 判断响应是否200
|
||||
cat $trash/parent.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}获取父方案信息失败,退出...${c_e}"; exit 234; }
|
||||
|
||||
# ----------------------------------------------------------------------------------------
|
||||
# 获取域名信息 - part 4
|
||||
accid=`cat ../$parent_dir/info.log | awk -F ':' '$1==3 {print $2}'`
|
||||
curl "https://confmanager.ctcdn.cn/v1/domain/conf/query_union?precise_query=1&domain=$domain" -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjQxNn0.KEFh_yg3J1y8sL7s3X_8jIR8GGq88A89b7J5YhVYVf8' -H 'content-type: application/json;charset=UTF-8' -vo domain_info_4.log > domain_info_4.response 2>&1
|
||||
|
||||
# 判断响应是否200
|
||||
cat $trash/domain_info_4.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}获取域名信息-part 4失败,退出...${c_e}"; exit 243; }
|
||||
|
||||
python3 /usr/local/script/fanmf11/get_infos.py --parent_info_4 domain_info_4.log $accid
|
||||
# python3 /home/fanmf11/fanmf11/get_infos.py --domain_info_4 domain_info_4.log $accid
|
||||
r_code=$?
|
||||
if [[ $r_code -eq 204 ]]; then
|
||||
# ----------------------------------------------------------------------------------------
|
||||
# 获取域名信息 - part 5
|
||||
curl "https://lasc-new.ctcdn.cn/v1/domain/conf/op_query_union?page_size=20&page=1&domain=$domain" -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjE4NH0.1Pgfnky-grT0MN2ic04PqMf32BdnF6iN_PMk3kNOzjY' -H 'content-type: application/json;charset=UTF-8' -vo domain_info_5.log > domain_info_5.response 2>&1
|
||||
|
||||
# 判断响应是否200
|
||||
cat $trash/domain_info_5.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}获取域名信息-part 5失败,退出...${c_e}"; exit 235; }
|
||||
|
||||
python3 /usr/local/script/fanmf11/get_infos.py --parent_info_5 domain_info_5.log $accid
|
||||
# python3 /home/fanmf11/fanmf11/get_infos.py --domain_info_5 domain_info_5.log $accid
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}处理域名-part 5信息失败,退出...${c_e}"; exit 237; }
|
||||
|
||||
# ----------------------------------------------------------------------------------------
|
||||
# 获取域名信息 - part 6
|
||||
domain_id=`cat info.log | awk -F ':' '$1==2 {print $2}'`
|
||||
curl "https://lasc-new.ctcdn.cn/v1/domain/internal/domain_config?domain_id=$domain_id" -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjE4NH0.1Pgfnky-grT0MN2ic04PqMf32BdnF6iN_PMk3kNOzjY' -H 'content-type: application/json;charset=UTF-8' -vo domain_info_6.log > domain_info_6.response 2>&1
|
||||
|
||||
# 判断响应是否200
|
||||
cat $trash/domain_info_6.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}获取域名信息-part 6失败,退出...${c_e}"; exit 238; }
|
||||
|
||||
python3 /usr/local/script/fanmf11/get_infos.py --parent_info_6 domain_info_6.log $accid
|
||||
# python3 /home/fanmf11/fanmf11/get_infos.py --domain_info_6 domain_info_6.log $accid
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}处理域名-part 6信息失败,退出...${c_e}"; exit 236; }
|
||||
|
||||
elif [[ $r_code -ne 0 ]]; then
|
||||
echo -e "${c_br}处理域名-part 4信息失败,退出...${c_e}"
|
||||
exit 239
|
||||
else
|
||||
:
|
||||
fi
|
||||
}
|
||||
|
||||
# 自定义颜色显示
|
||||
c_br='\e[1;31m' # bold red
|
||||
c_bg='\e[1;32m' # bold green
|
||||
c_by='\e[1;33m' # bold yellow
|
||||
c_bb='\e[1;34m' # bold blue
|
||||
c_bp='\e[1;35m' # bold purple
|
||||
c_bc='\e[1;36m' # bold cyan
|
||||
c_bir='\e[1;3;31m' # * bold italic red
|
||||
c_bib='\e[1;3;34m' # * bold italic cyan
|
||||
c_bic='\e[1;3;36m' # bold italic cyan
|
||||
c_e='\e[0m' # reset
|
||||
|
||||
# 初始化变量
|
||||
TS=`date +%s%N`
|
||||
toolbox='/usr/local/script/fanmf11' # *
|
||||
data='/usr/local/script/fanmf11/data' # *
|
||||
host=`whoami` # * 判断执行用户
|
||||
trash="/usr/local/script/fanmf11/trash/$host/$TS" # * 每个用户的临时文件存放处
|
||||
|
||||
if [[ -d $trash ]]; then
|
||||
echo -e "${c_br}对于同一个用户,同一时间只能运行一个实例,请重新运行...${c_e}"
|
||||
exit 245
|
||||
else
|
||||
mkdir -p $trash
|
||||
cd $trash && cd ..
|
||||
docs=`ls`
|
||||
for doc in $docs; do
|
||||
[[ -f $doc ]] && rm -rf $doc
|
||||
done
|
||||
folders=`ls -t`
|
||||
while [[ `echo $folders | awk '{print NF}'` -gt 29 ]]; do
|
||||
folder=`ls -t | tail -1`
|
||||
rm -rf $folder
|
||||
folders=`ls -t`
|
||||
done
|
||||
cd $trash && touch infos
|
||||
fi
|
||||
|
||||
# 捕获Ctrl+C时触发
|
||||
trap 'onCtrlC' INT
|
||||
|
||||
# ----------------------------------------------------------------------------------------
|
||||
# 判断入参数量是否合法
|
||||
if [[ $# -eq 1 ]]; then
|
||||
domain=$1
|
||||
infos
|
||||
elif [[ $# -eq 3 && $1 == '--map' ]]; then
|
||||
domain=$2 && map_dir=$3
|
||||
map
|
||||
cp map.log ../$map_dir/
|
||||
cp info.log ../$map_dir/
|
||||
elif [[ $# -eq 3 && $1 == '--parent' ]]; then
|
||||
domain=$2 && parent_dir=$3
|
||||
parent
|
||||
cp ./cmap ../$parent_dir/
|
||||
else
|
||||
echo -e "${c_br}只接收一个参数,请输入要查询的域名\ne.g. infos www.ctyun.cn\n${c_e}"
|
||||
exit 249
|
||||
fi
|
||||
|
396
old/ctc/ips.sh
Normal file
396
old/ctc/ips.sh
Normal file
@ -0,0 +1,396 @@
|
||||
#!/bin/bash
|
||||
# 功能实现:判定一个IP或者组是否是天翼平台的
|
||||
# 依赖文件:ip.group/lakes
|
||||
# 存在问题:
|
||||
# 整体逻辑:
|
||||
# 1. 输入有四种可能:IPv4/IPv6/英文标签/中文标签
|
||||
# 2. 以上四种可能,最终使用一个函数来完成——ipvx_check
|
||||
# 3. ipvx_check的作用就是检查一个IP是否属于天翼
|
||||
# 4. 首先根据IP找出所在的节点英文标签,再根据英文标签后缀不同,进行逐一判别
|
||||
# 5. ip.group文件:
|
||||
# 对于缓存服务器(边缘/父层/中心)节点 - 第一列是RIP,第二列是英文标签,第三列是VIP,第八列是LVS(全都一样)
|
||||
# 对于LVS节点 - 第一列是IP。第二列是英文标签
|
||||
# 对于其他节点,同LVS
|
||||
# 6. lakes文件:文件开头有介绍每一列的含义
|
||||
# 7. 英文标签的特点可以通过`cat ip.group | awk '{print $2}' | awk -F '_' '{print $4}' | sort | uniq`来筛选
|
||||
|
||||
# 自定义颜色显示
|
||||
c_br="\e[1;31m" # bold red
|
||||
c_bg="\e[1;32m" # bold green
|
||||
c_by="\e[1;33m" # bold yellow
|
||||
c_bp="\e[1;35m" # bold purple
|
||||
c_iy="\e[3;33m" # italic yellow
|
||||
c_bir='\e[1;3;31m' # * bold italic red
|
||||
c_big='\e[1;3;32m' # bold italic cyan
|
||||
c_bib='\e[1;3;34m' # * bold italic cyan
|
||||
c_bip='\e[1;3;35m' # bold italic cyan
|
||||
c_bic='\e[1;3;36m' # bold italic cyan
|
||||
c_e="\e[0m" # reset
|
||||
|
||||
# 使用说明
|
||||
function usage {
|
||||
echo -e "${c_bg}1. 查找V(R)IP/集群中文名/集群英文名是否是归属天翼云平台${c_e}"
|
||||
echo -e "${c_bg}2. 查询IP对应的内网IP和主机名,只支持IPv4地址${c_e}"
|
||||
echo -e "${c_iy}实例:${c_e}"
|
||||
echo -e "${c_iy} ips 59.56.177.149${c_e}"
|
||||
echo -e "${c_iy} ips ct_fj_fuzhou3_e1${c_e}"
|
||||
echo -e "${c_iy} ips 福州3${c_e}"
|
||||
echo -e "${c_iy} ips -m 59.56.177.149${c_e}\n"
|
||||
echo -e "${c_bp}查询内网IP对应关系功能,因线上IPv6的机器没有加白,暂不支持获取IPv6主机内网IP...${c_e}"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# 如果输入是IP,则判断该IP是否属于天翼
|
||||
# 入参-1:IP
|
||||
# 入参-2:flg -- 0/1/2
|
||||
# flg = 0 -- 脚本输入的是IPv4或者IPv6
|
||||
# flg = 1 -- 脚本输入的是英文节点名
|
||||
# flg = 2 -- 脚本输入的是中文节点名
|
||||
function ipvx_check() {
|
||||
ipvx=$1
|
||||
if [[ $flg -eq 0 ]]; then
|
||||
# 同一个IP可能会过滤出来多个英文节点
|
||||
labels=`cat $data/ip.group | fgrep -w $ipvx | awk '{print $2}' | sort | uniq`
|
||||
elif [[ $flg -eq 1 ]]; then
|
||||
# 确保过滤出来的就是输入的节点名,排除其他含有相同IP的节点
|
||||
labels=`cat $data/ip.group | fgrep -w $ipvx | awk '$2=="'$tbd'" {print $2}' | sort | uniq`
|
||||
elif [[ $flg -eq 2 ]]; then
|
||||
# 中文节点名可能对应多个不同节点,全部输出
|
||||
labels=`cat $data/ip.group | fgrep -w $ipvx | awk '$2=="'$label_single'" {print $2}' | sort | uniq`
|
||||
fi
|
||||
[[ $labels == '' ]] && { echo -e "${c_br}$tbd 不是天翼平台的节点/IP,退出...${c_e}"; exit 44; }
|
||||
for label in $labels; do
|
||||
# 根据后缀输出
|
||||
# 后缀如果是 -- e/c/n/lvs,则输出对应的资源池,vip,rip,lvs信息,如果一个lvs对应多个边缘节点,则全部输出
|
||||
# 其他后缀则只输出对应节点的信息
|
||||
postfix=`echo $label | awk -F'_' '{print $4}'`
|
||||
if [[ $postfix =~ ^c[0-9]{0,2}$ ]]; then
|
||||
center_name_en=$label
|
||||
rip=`cat $data/ip.group | awk '$2=="'$center_name_en'"' | awk '{print $1}' | sort`
|
||||
vip=`cat $data/ip.group | awk '$2=="'$center_name_en'"' | awk '{print $3}' | sort | uniq`
|
||||
lvs_name=`cat $data/ip.group | awk '$2=="'$center_name_en'"' | awk '{print $8}' | sort | uniq | awk -F ',' '{print $1}'`
|
||||
lvs=`cat $data/ip.group | awk '$2=="'$lvs_name'"' | awk '{print $1}' | sort`
|
||||
resource=`cat $data/lakes | grep $center_name_en | awk '{print $6}' | sort | uniq`
|
||||
center_name_cn=`cat $data/lakes | grep $center_name_en | awk '{print $11}' | sort | uniq`
|
||||
|
||||
echo -e "$c_bp[$center_name_en: ${c_bg}RIP]$c_e"
|
||||
echo $rip | awk '{for(i=1;i<=NF;i++) if($i=="'$ipvx'"&&"'$flg'"==0) print " \033[1;3;36m", $i, "\033[0m"; else print " ", $i}'
|
||||
echo
|
||||
|
||||
echo -e "$c_bp[$center_name_en: ${c_bg}VIP]$c_e"
|
||||
echo $vip | awk -F ',' '{for(i=1;i<=NF;i++) if($i=="'$ipvx'" && "'$flg'"==0) print " \033[1;3;36m", $i, "\033[0m"; else print " ", $i}'
|
||||
|
||||
echo -e "$c_bp[$lvs_name: ${c_bg}IP]$c_e"
|
||||
echo $lvs | awk '{for(i=1;i<=NF;i++) print " ", $i}'
|
||||
echo
|
||||
|
||||
echo -e "$c_by[$center_name_cn($center_name_en)所属资源池]$c_e"
|
||||
echo $resource | awk '{for(i=1;i<=NF;i++) print " ", $i} END{print ""}'
|
||||
|
||||
elif [[ $postfix =~ ^dns[0-9]{0,2}$ ]]; then
|
||||
dns_label=$label
|
||||
dnsip=`cat $data/ip.group | awk '$2=="'$dns_label'"' | awk '{print $1}' | sort`
|
||||
dnsrip=`cat $data/ip.group | awk '$2=="'$dns_label'"' | awk '{print $1}' | sort`
|
||||
dnsvip=`cat $data/ip.group | awk '$2=="'$dns_label'"' | awk '{print $3}' | sort | uniq`
|
||||
|
||||
echo -e "$c_bp[$dns_label: ${c_bg}RIP]$c_e"
|
||||
echo $dnsip | awk '{for(i=1;i<=NF;i++) if($i=="'$ipvx'" && "'$flg'"==0) print " \033[1;3;36m", $i, "\033[0m"; else print " ", $i}'
|
||||
|
||||
echo -e "$c_bp[$dns_label: ${c_bg}VIP]$c_e"
|
||||
echo $dnsvip | awk -F ',' '{for(i=1;i<=NF;i++) if($i=="'$ipvx'" && "'$flg'"==0) print " \033[1;3;36m", $i, "\033[0m"; else print " ", $i}'
|
||||
elif [[ $postfix =~ ^e[0-9]{0,2}$ ]]; then
|
||||
edge_name_en=$label
|
||||
rip=`cat $data/ip.group | awk '$2=="'$edge_name_en'"' | awk '{print $1}' | sort`
|
||||
vip=`cat $data/ip.group | awk '$2=="'$edge_name_en'"' | awk '{print $3}' | sort | uniq`
|
||||
lvs_name=`cat $data/ip.group | awk '$2=="'$edge_name_en'"' | awk '{print $8}' | sort | uniq | awk -F ',' '{print $1}'`
|
||||
lvs=`cat $data/ip.group | awk '$2=="'$lvs_name'"' | awk '{print $1}' | sort`
|
||||
resource=`cat $data/lakes | grep $edge_name_en | awk '{print $6}' | sort | uniq`
|
||||
edge_name_cn=`cat $data/lakes | grep $edge_name_en | awk '{print $11}' | sort | uniq`
|
||||
|
||||
echo -e "$c_bp[$edge_name_en: ${c_bg}RIP]$c_e"
|
||||
echo $rip | awk '{for(i=1;i<=NF;i++) if($i=="'$ipvx'" && "'$flg'"==0) print " \033[1;3;36m", $i, "\033[0m"; else print " ", $i}'
|
||||
echo
|
||||
|
||||
echo -e "$c_bp[$edge_name_en: ${c_bg}VIP]$c_e"
|
||||
echo $vip | awk -F ',' '{for(i=1;i<=NF;i++) if($i=="'$ipvx'" && "'$flg'"==0) print " \033[1;3;36m", $i, "\033[0m"; else print " ", $i}'
|
||||
|
||||
echo -e "$c_bp[$lvs_name: ${c_bg}IP]$c_e"
|
||||
echo $lvs | awk '{for(i=1;i<=NF;i++) print " ", $i}'
|
||||
echo
|
||||
|
||||
echo -e "$c_by[$edge_name_cn($edge_name_en)所属资源池]$c_e"
|
||||
echo $resource | awk '{for(i=1;i<=NF;i++) print " ", $i} END{print ""}'
|
||||
|
||||
elif [[ $postfix =~ ^lvs[0-9]{0,2}$ ]]; then
|
||||
lvs_name=$label
|
||||
lvs=`cat $data/ip.group | awk '$2=="'$lvs_name'"' | awk '{print $1}' | sort`
|
||||
level_unknown=`cat $data/ip.group | awk '$8 ~ "'$lvs_name'"' | awk '{print $2}' | sort | uniq`
|
||||
for unknown_en in $level_unknown; do
|
||||
rip=`cat $data/ip.group | awk '$8 ~ "'$lvs_name'" && $2 == "'$unknown_en'"' | awk '{print $1}' | sort`
|
||||
vip=`cat $data/ip.group | awk '$8 ~ "'$lvs_name'" && $2 == "'$unknown_en'"' | awk '{print $3}' | sort | uniq`
|
||||
resource=`cat $data/lakes | grep $unknown_en | awk '{print $6}' | sort | uniq`
|
||||
unknown_cn=`cat $data/lakes | grep $unknown_en | awk '{print $11}' | sort | uniq`
|
||||
|
||||
echo -e "$c_bp[$unknown_en: ${c_bg}RIP]$c_e"
|
||||
echo $rip | awk '{for(i=1;i<=NF;i++) print " ", $i}'
|
||||
echo
|
||||
|
||||
echo -e "$c_bp[$unknown_en: ${c_bg}VIP]$c_e"
|
||||
echo $vip | awk -F ',' '{for(i=1;i<=NF;i++) print " ", $i}'
|
||||
|
||||
echo -e "$c_by[$unknown_cn($unknown_en)所属资源池]$c_e"
|
||||
echo $resource | awk '{for(i=1;i<=NF;i++) print " ", $i} END{print ""}'
|
||||
done
|
||||
echo -e "$c_bp[$lvs_name: ${c_bg}IP]$c_e"
|
||||
echo $lvs | awk '{for(i=1;i<=NF;i++) if($i=="'$ipvx'" && "'$flg'"==0) print " \033[1;3;36m", $i, "\033[0m"; else print " ", $i}'
|
||||
echo
|
||||
|
||||
elif [[ $postfix =~ ^m[0-9]{0,2}$ ]]; then
|
||||
mgt_label=$label
|
||||
mgtip=`cat $data/ip.group | awk '$2=="'$mgt_label'"' | awk '{print $1}' | sort`
|
||||
|
||||
echo -e "$c_bp[$mgt_label: ${c_bg}IP]$c_e"
|
||||
echo $mgtip | awk '{for(i=1;i<=NF;i++) if($i=="'$ipvx'" && "'$flg'"==0) print " \033[1;3;36m", $i, "\033[0m"; else print " ", $i}'
|
||||
echo
|
||||
|
||||
elif [[ $postfix =~ ^mysql[0-9]{0,2}$ ]]; then
|
||||
mysql_label=$label
|
||||
mysqlip=`cat $data/ip.group | awk '$2=="'$mysql_label'"' | awk '{print $1}' | sort`
|
||||
|
||||
echo -e "$c_bp[$mysql_label: ${c_bg}IP]$c_e"
|
||||
echo $mysqlip | awk '{for(i=1;i<=NF;i++) if($i=="'$ipvx'" && "'$flg'"==0) print " \033[1;3;36m", $i, "\033[0m"; else print " ", $i}'
|
||||
|
||||
elif [[ $postfix =~ ^n[0-9]{0,2}$ ]]; then
|
||||
nation_name_en=$label
|
||||
rip=`cat $data/ip.group | awk '$2=="'$nation_name_en'"' | awk '{print $1}' | sort`
|
||||
vip=`cat $data/ip.group | awk '$2=="'$nation_name_en'"' | awk '{print $3}' | sort | uniq`
|
||||
lvs_name=`cat $data/ip.group | awk '$2=="'$nation_name_en'"' | awk '{print $8}' | sort | uniq | awk -F ',' '{print $1}'`
|
||||
lvs=`cat $data/ip.group | awk '$2=="'$lvs_name'"' | awk '{print $1}' | sort`
|
||||
resource=`cat $data/lakes | grep $nation_name_en | awk '{print $6}' | sort | uniq`
|
||||
nation_name_cn=`cat $data/lakes | grep $nation_name_en | awk '{print $11}' | sort | uniq`
|
||||
|
||||
echo -e "$c_bp[$nation_name_en: ${c_bg}RIP]$c_e"
|
||||
echo $rip | awk '{for(i=1;i<=NF;i++) if($i=="'$ipvx'" && "'$flg'"==0) print " \033[1;3;36m", $i, "\033[0m"; else print " ", $i}'
|
||||
echo
|
||||
|
||||
echo -e "$c_bp[$nation_name_en: ${c_bg}VIP]$c_e"
|
||||
echo $vip | awk -F ',' '{for(i=1;i<=NF;i++) if($i=="'$ipvx'" && "'$flg'"==0) print " \033[1;3;36m", $i, "\033[0m"; else print " ", $i}'
|
||||
|
||||
echo -e "$c_bp[$lvs_name: ${c_bg}IP]$c_e"
|
||||
echo $lvs | awk '{for(i=1;i<=NF;i++) print " ", $i}'
|
||||
echo
|
||||
|
||||
echo -e "$c_by[$nation_name_cn($nation_name_en)所属资源池]$c_e"
|
||||
echo $resource | awk '{for(i=1;i<=NF;i++) print " ", $i} END{print ""}'
|
||||
|
||||
elif [[ $postfix =~ ^prets[0-9]{0,2}$ ]]; then
|
||||
prets_label=$label
|
||||
pretsip=`cat $data/ip.group | awk '$2=="'$prets_label'"' | awk '{print $1}' | sort`
|
||||
|
||||
echo -e "$c_bp[$prets_label: ${c_bg}IP]$c_e"
|
||||
echo $pretsip | awk '{for(i=1;i<=NF;i++) if($i=="'$ipvx'" && "'$flg'"==0) print " \033[1;3;36m", $i, "\033[0m"; else print " ", $i}'
|
||||
|
||||
elif [[ $postfix =~ ^pretw[0-9]{0,2}$ ]]; then
|
||||
pretw_label=$label
|
||||
pretwip=`cat $data/ip.group | awk '$2=="'$pretw_label'"' | awk '{print $1}' | sort`
|
||||
|
||||
echo -e "$c_bp[$pretw_label: ${c_bg}IP]$c_e"
|
||||
echo $pretwip | awk '{for(i=1;i<=NF;i++) if($i=="'$ipvx'" && "'$flg'"==0) print " \033[1;3;36m", $i, "\033[0m"; else print " ", $i}'
|
||||
|
||||
elif [[ $postfix =~ ^redis[0-9]{0,2}$ ]]; then
|
||||
redis_label=$label
|
||||
redisip=`cat $data/ip.group | awk '$2=="'$redis_label'"' | awk '{print $1}' | sort`
|
||||
|
||||
echo -e "$c_bp[$redis_label: ${c_bg}IP]$c_e"
|
||||
echo $redisip | awk '{for(i=1;i<=NF;i++) if($i=="'$ipvx'" && "'$flg'"==0) print " \033[1;3;36m", $i, "\033[0m"; else print " ", $i}'
|
||||
|
||||
elif [[ $postfix =~ ^testts[0-9]{0,2}$ ]]; then
|
||||
testts_label=$label
|
||||
testtsip=`cat $data/ip.group | awk '$2=="'$testts_label'"' | awk '{print $1}' | sort`
|
||||
|
||||
echo -e "$c_bp[$testts_label: ${c_bg}IP]$c_e"
|
||||
echo $testtsip | awk '{for(i=1;i<=NF;i++) if($i=="'$ipvx'" && "'$flg'"==0) print " \033[1;3;36m", $i, "\033[0m"; else print " ", $i}'
|
||||
|
||||
elif [[ $postfix =~ ^ts[0-9]{0,2}$ ]]; then
|
||||
ts_label=$label
|
||||
tsip=`cat $data/ip.group | awk '$2=="'$ts_label'"' | awk '{print $1}' | sort`
|
||||
|
||||
echo -e "$c_bp[$ts_label: ${c_bg}IP]$c_e"
|
||||
echo $tsip | awk '{for(i=1;i<=NF;i++) if($i=="'$ipvx'" && "'$flg'"==0) print " \033[1;3;36m", $i, "\033[0m"; else print " ", $i}'
|
||||
|
||||
elif [[ $postfix =~ ^tw[0-9]{0,2}$ ]]; then
|
||||
tw_label=$label
|
||||
twip=`cat $data/ip.group | awk '$2=="'$tw_label'"' | awk '{print $1}' | sort`
|
||||
|
||||
echo -e "$c_bp[$tw_label: ${c_bg}IP]$c_e"
|
||||
echo $twip | awk '{for(i=1;i<=NF;i++) if($i=="'$ipvx'" && "'$flg'"==0) print " \033[1;3;36m", $i, "\033[0m"; else print " ", $i}'
|
||||
|
||||
elif [[ $postfix =~ ^uatts[0-9]{0,2}$ ]]; then
|
||||
uatts_label=$label
|
||||
uattsip=`cat $data/ip.group | awk '$2=="'$uatts_label'"' | awk '{print $1}' | sort`
|
||||
|
||||
echo -e "$c_bp[$uatts_label: ${c_bg}IP]$c_e"
|
||||
echo $uattsip | awk '{for(i=1;i<=NF;i++) if($i=="'$ipvx'" && "'$flg'"==0) print " \033[1;3;36m", $i, "\033[0m"; else print " ", $i}'
|
||||
|
||||
else
|
||||
echo -e "${c_br}${ipvx}${c_e}不属于我司节点,如有误判,请联系fanmf11@chinatelecom.cn。\n"
|
||||
exit 92
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
function ip_search() {
|
||||
# 判断如果是IPv4,在判断是否合法
|
||||
if [[ $tbd =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
|
||||
ip_1234=`echo $tbd | awk -F '.' '{print $1, $2, $3, $4}'`
|
||||
for num in $ip_1234; do
|
||||
if [[ $num -gt 255 ]]; then
|
||||
echo -e "${c_br}非法,请输入有效的IPv4地址。${c_e}"
|
||||
usage
|
||||
fi
|
||||
done
|
||||
|
||||
isInner=`echo $ip_1234 | awk '{print $1}'`
|
||||
if [[ $isInner == '192' ]]; then
|
||||
echo -e "${c_br}$tbd是内网IP,非法,请输入有效的外网IPv4地址。${c_e}"
|
||||
usage
|
||||
fi
|
||||
flg=0
|
||||
ipvx_check $tbd $flg
|
||||
|
||||
# 判断如果是IPv6(粗略的匹配规则,最短11,最长39,包含数字大小写字母以及英文冒号)
|
||||
elif [[ $tbd =~ ^[0-9a-fA-F:]{11,39}$ ]]; then
|
||||
flg=0
|
||||
ipvx_check $tbd $flg
|
||||
# 判断如果是节点英文标签格式
|
||||
elif [[ $tbd =~ ^(ct|cu|cm|bgp|ctbgp|cmbgp|cubgp|as|eu|sa|na|cbn|cern)_[a-z]{2,3}_[a-z]{2,20}[0-9]{1,2}_(c|dns|e|lvs|m|mysql|n|prets|pretw|redis|testts|ts|tw|uatts1)[0-9]{0,2}$ ]]; then
|
||||
anyip=`cat $data/ip.group | awk '$2=="'$tbd'"' | head -n 1 | awk '{print $1}'`
|
||||
if [[ $anyip == '' ]]; then
|
||||
echo -e "${c_br}${tbd}${c_e}不属于我司节点,如有误判,请联系fanmf11@chinatelecom.cn。"
|
||||
usage
|
||||
exit 90
|
||||
fi
|
||||
flg=1
|
||||
ipvx_check $anyip $flg
|
||||
|
||||
# 剩余的情况一律归结为中文标签格式
|
||||
else
|
||||
# 一个中文标签可能会对应着多个不同的节点
|
||||
label_multi=`cat $data/lakes | awk '$11=="'$tbd'" {print $1}' | sort | uniq`
|
||||
if [[ $label_multi == '' ]]; then
|
||||
echo -e "${c_br}${tbd}${c_e}不属于我司节点,如有误判,请联系fanmf11@chinatelecom.cn。"
|
||||
usage
|
||||
exit 91
|
||||
fi
|
||||
flg=2
|
||||
for label_single in $label_multi; do
|
||||
anyip=`cat $data/ip.group | awk '$2=="'$label_single'"' | head -n 1 | awk '{print $1}'`
|
||||
if [[ $anyip != '' ]]; then
|
||||
ipvx_check $anyip $flg
|
||||
else
|
||||
echo -e "${c_br}${label_single}节点存在,但是无法找到其下IP,可使用rip命令尝试再次查询。${c_e}\n"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
function ip_inner() {
|
||||
> res.log
|
||||
let number=`cat ips.log | tr -d ' ' | grep -Ev ']' | sed 's/^$/#/g' | awk 'BEGIN{RS="#"} {print NR}' | tail -1`
|
||||
cat ips.log | tr -d ' ' | grep -Ev ']' | sed 's/^$/#/g' | awk 'BEGIN{RS="#"} {for(i=1;i<=NR;i++) if(i==NR) print $0 > i}'
|
||||
for i in `seq $number`; do
|
||||
cat $i | fgrep -q "$tbd"
|
||||
[[ $? -ne 0 ]] && continue
|
||||
cat $i | grep -Eo "[0-9a-fA-F:]{11,39}" > ip$i
|
||||
cat $i | grep -Eo "([0-9]{1,3}\.){3}[0-9]{1,3}" >> ip$i
|
||||
# 将每一块的IP重新放回文件i,并将结果追加到res.log
|
||||
cat ip$i > $i && cat ip$i >> res.log
|
||||
done
|
||||
ip_list=`cat res.log | sort | uniq`
|
||||
for ipy in $ip_list; do
|
||||
echo $ipy | grep -Eq "[0-9a-fA-F:]{11,39}"
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo "跳板机无IPv6出口,暂不支持获取IPv6主机内网IP..." > inner_$ipy.log
|
||||
else
|
||||
ssh -o ConnectTimeout=30 $ipy "hostname; /usr/sbin/ifconfig | grep 'inet 192'" > inner_$ipy.log 2>&1 &
|
||||
fi
|
||||
done
|
||||
wait
|
||||
|
||||
echo '-----------------------------------------------------------------------------------------'
|
||||
printf "${c_bic}%-25s%-20s%-50s\n${c_e}" "IP" "Inner IP" "Hostname"
|
||||
for ipy in $ip_list; do
|
||||
cat inner_$ipy.log |grep -iq 'timed out'
|
||||
res1=$?
|
||||
cat inner_$ipy.log |grep -iq 'closed by'
|
||||
res2=$?
|
||||
cat inner_$ipy.log |grep -iq 'IPv6'
|
||||
res3=$?
|
||||
if [[ $res1 -eq 0 ]]; then
|
||||
printf "${c_bir}%-25s%-20s%-40s${c_e}\n" $ipy "===========>" "Connection timed out during banner exchange"
|
||||
continue
|
||||
elif [[ $res2 -eq 0 ]]; then
|
||||
printf "${c_bir}%-25s%-20s%-40s${c_e}\n" $ipy "===========>" "ssh_exchange_identification: Connection closed by remote host"
|
||||
continue
|
||||
elif [[ $res3 -eq 0 ]]; then
|
||||
printf "${c_bir}%-25s%-20s%-40s${c_e}\n" $ipy "===========>" "IPv6的机器没有加白,暂不支持获取IPv6主机内网IP..."
|
||||
continue
|
||||
else
|
||||
host=`cat inner_$ipy.log | fgrep 'in.ctcdn.cn'`
|
||||
[[ $host == '' ]] && host='-'
|
||||
inner_ip=`cat inner_$ipy.log | grep 'inet 192' | awk '{print $2}'`
|
||||
[[ $inner_ip == '' ]] && inner_ip='-'
|
||||
printf "%-25s%-20s%-40s\n" $ipy $inner_ip $host
|
||||
fi
|
||||
done
|
||||
printf "${c_bic}%-25s%-20s%-50s\n${c_e}" "IP" "Inner IP" "Hostname"
|
||||
echo '-----------------------------------------------------------------------------------------'
|
||||
}
|
||||
|
||||
toolbox='/usr/local/script/fanmf11/'
|
||||
data='/usr/local/script/fanmf11/data'
|
||||
label_single=''
|
||||
TS=`date +%s%N`
|
||||
host=`whoami` # * 判断执行用户
|
||||
trash="/usr/local/script/fanmf11/trash/$host/$TS" # * 每个用户的临时文件存放处
|
||||
|
||||
if [[ -d $trash ]]; then
|
||||
echo -e "${c_br}对于同一个用户,同一时间只能运行一个实例,请重新运行...${c_e}"
|
||||
exit 245
|
||||
else
|
||||
mkdir -p $trash
|
||||
cd $trash && cd ..
|
||||
docs=`ls`
|
||||
for doc in $docs; do
|
||||
[[ -f $doc ]] && rm -rf $doc
|
||||
done
|
||||
folders=`ls -t`
|
||||
while [[ `echo $folders | awk '{print NF}'` -gt 29 ]]; do
|
||||
folder=`ls -t | tail -1`
|
||||
rm -rf $folder
|
||||
folders=`ls -t`
|
||||
done
|
||||
cd $trash && touch ips
|
||||
fi
|
||||
|
||||
# 参数个数必须是一个,并把第一个参数赋值给tbd
|
||||
if [[ $# -eq 1 ]]; then
|
||||
tbd=$1
|
||||
ip_search
|
||||
elif [[ $# -eq 2 && $1 == '-m' ]]; then
|
||||
tbd=$2
|
||||
ip_search > ips.log 2>&1
|
||||
[[ $? -ne 0 ]] && { cat ips.log; exit 211; }
|
||||
# 判断如果是IPv4,在判断是否合法
|
||||
if [[ $tbd =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
|
||||
ip_inner
|
||||
else
|
||||
echo -e "${c_br}输入只能是IPv4,不接受其他格式的内容。${c_e}\n"
|
||||
exit 112
|
||||
fi
|
||||
else
|
||||
usage
|
||||
fi
|
||||
|
||||
|
||||
|
BIN
old/ctc/jaydiff
Executable file
BIN
old/ctc/jaydiff
Executable file
Binary file not shown.
58
old/ctc/logcombo.awk
Normal file
58
old/ctc/logcombo.awk
Normal file
@ -0,0 +1,58 @@
|
||||
#!/usr/bin/awk -f
|
||||
# lap : 1--overlap 0--non-overlap
|
||||
# comp: 1--access 0--origin
|
||||
# pos : 4 | 10 | 11
|
||||
|
||||
BEGIN {
|
||||
if(f00=="") { t00=1000; } else { t00=4 } # 状态码
|
||||
if(f01=="") { t01=6; f01s=0; f01e=1000; } else { t01=6 } # 边缘响应时间
|
||||
if(f02=="") { t02=9; f02s=0; f02e=1000; } else { t02=9 } # 边缘首包
|
||||
if(f03=="") { t03=1000; } else { t03=10 } # 边缘错误码
|
||||
if(f04=="") { t04=1000; } else { t04=11 }
|
||||
if(f05=="") { t05=1000; } else { t05=13 }
|
||||
if(f06=="") { t06=1000; } else { t06=15 }
|
||||
if(f07=="") { t07=1000; } else { t07=16 }
|
||||
if(f08=="") { t08=1000; } else { t08=18 }
|
||||
if(f09=="") { t09=1000; } else { t09=24 }
|
||||
if(f10=="") { t10=26; f10s=0; f10e=1000; } else { t10=26 }
|
||||
if(f11=="") { t11=1000; } else { t11=28 }
|
||||
if(f13=="") { t13=1000; } else { t13=33 }
|
||||
if(f14=="") { t14=1000; } else { t14=34 }
|
||||
|
||||
if(f28=="") { t28=1000; f28s=0; f28e=1000; } else { t28=5 }
|
||||
if(f30=="") { t30=1000; } else { t30=7 }
|
||||
if(f31=="") { t31=1000; } else { t31=8 }
|
||||
if(f32=="") { t32=1000; } else { t32=10 }
|
||||
if(f33=="") { t33=1000; } else { t33=11 }
|
||||
if(f34=="") { t34=1000; } else { t34=46 }
|
||||
if(f37=="") { t37=1000; f37s=0; f37e=1000; } else { t37=4 }
|
||||
if(f38=="") { t38=1000; f38s=0; f38e=1000;} else { t38=6 }
|
||||
|
||||
if(comp==1) { idn=56 } else if(comp==0) { idn=50 }
|
||||
number=0
|
||||
|
||||
}
|
||||
|
||||
{
|
||||
if(comp==0) {
|
||||
tt28 = $t28 / 1000
|
||||
tt37 = $t37 / 1000
|
||||
tt38 = $t38 / 1000
|
||||
}
|
||||
if(comp==1 && atype=="combo" && f00==$t00 && f01s<=$t01 && $t01<=f01e && f02s<=$t02 && $t02<=f02e && f03==$t03 && f04==$t04 && f05==$t05 && f06==$t06 && f07==$t07 && f08==$t08 && f09==$t09 && f10s<=$t10 && $t10<=f10e && f11==$t11 && f13==$t13 && f14==$t14 && acc==$idn) {
|
||||
number++
|
||||
} else if(comp==1 && atype=="logs" && f00==$t00 && f01s<=$t01 && $t01<=f01e && f02s<=$t02 && $t02<=f02e && f03==$t03 && f04==$t04 && f05==$t05 && f06==$t06 && f07==$t07 && f08==$t08 && f09==$t09 && f10s<=$t10 && $t10<=f10e && f11==$t11 && f13==$t13 && f14==$t14 && acc==$idn) {
|
||||
print $0
|
||||
} else if(comp==0 && atype=="combo" && f28s<=tt28 && tt28<=f28e && f30==$t30 && f31==$t31 && f32==$t32 && f33==$t33 && f34==$t34 && f37s<=tt37 && tt37<=f37e && f38s<=tt38 && tt38<=f38e && acc==$idn) {
|
||||
number++
|
||||
} else if(comp==0 && atype=="logs" && f28s<=tt28 && tt28<=f28e && f30==$t30 && f31==$t31 && f32==$t32 && f33==$t33 && f34==$t34 && f37s<=tt37 && tt37<=f37e && f38s<=tt38 && tt38<=f38e && acc==$idn) {
|
||||
print $0
|
||||
}
|
||||
}
|
||||
|
||||
END {
|
||||
if(atype=="combo")
|
||||
printf "%-18s%-8s%-s\n", "符合上述条件的日志占比 -- ", number, number/NR*100"%"
|
||||
}
|
||||
|
||||
|
25
old/ctc/logcommon.awk
Normal file
25
old/ctc/logcommon.awk
Normal file
@ -0,0 +1,25 @@
|
||||
#!/usr/bin/awk -f
|
||||
# lap : 1--overlap 0--non-overlap
|
||||
# comp: 1--access 0--origin
|
||||
# code:
|
||||
# non-blank -- specify status code
|
||||
# blank -- not specify status code
|
||||
BEGIN {
|
||||
if(code!="" && comp==1) { sc=4 }
|
||||
if(code!="" && comp==0) { sc=11 }
|
||||
if(code=="") { sc=1000 }
|
||||
if(comp==1) { ac=56 }
|
||||
if(comp==0) { ac=50 }
|
||||
}
|
||||
|
||||
{
|
||||
if(acc==$ac && code==$sc)
|
||||
res[$pos]++
|
||||
}
|
||||
|
||||
END {
|
||||
for(i in res)
|
||||
printf "%-12s%-8s%-s\n", res[i]/NR*100"%", res[i], i
|
||||
}
|
||||
|
||||
|
27
old/ctc/logqps.awk
Normal file
27
old/ctc/logqps.awk
Normal file
@ -0,0 +1,27 @@
|
||||
#!/usr/bin/awk -f
|
||||
# lap : 1--overlap 0--non-overlap
|
||||
# comp: 1--access 0--origin
|
||||
# pos : 4 | 10 | 11
|
||||
BEGIN {
|
||||
number=0
|
||||
if(code!="" && comp==1) { sc=4 }
|
||||
if(code!="" && comp==0) { sc=11 }
|
||||
if(code=="") { sc=1000 }
|
||||
if(comp==1) { ac=56 }
|
||||
if(comp==0) { ac=50 }
|
||||
}
|
||||
|
||||
{
|
||||
if(acc==$ac && code==$sc)
|
||||
number++
|
||||
}
|
||||
|
||||
END {
|
||||
if(code != "")
|
||||
# 如果百分比不是100%说明这个节点有重叠域名访问日志
|
||||
printf "%-8s%-15s%-s\n", code, number, number/NR*100"%"
|
||||
else
|
||||
printf "%-8s%-15s%-s\n", "QPS", number, number/NR*100"%"
|
||||
}
|
||||
|
||||
|
1569
old/ctc/logs.sh
Normal file
1569
old/ctc/logs.sh
Normal file
File diff suppressed because it is too large
Load Diff
20
old/ctc/logsc.awk
Normal file
20
old/ctc/logsc.awk
Normal file
@ -0,0 +1,20 @@
|
||||
#!/usr/bin/awk -f
|
||||
# lap : 1--overlap 0--non-overlap
|
||||
# comp: 1--access 0--origin
|
||||
# pos : 4 | 10 | 11
|
||||
BEGIN {
|
||||
if(comp==1) { ac=56 }
|
||||
if(comp==0) { ac=50 }
|
||||
}
|
||||
|
||||
{
|
||||
if(acc==$ac)
|
||||
res[$pos]++
|
||||
}
|
||||
|
||||
END {
|
||||
for(i in res)
|
||||
printf "%-8s%-15s%-s\n", i, res[i], res[i]/NR*100"%"
|
||||
}
|
||||
|
||||
|
66
old/ctc/logtime.awk
Normal file
66
old/ctc/logtime.awk
Normal file
@ -0,0 +1,66 @@
|
||||
#!/usr/bin/awk -f
|
||||
# lap : 1--overlap 0--non-overlap
|
||||
# comp: 1--access 0--origin
|
||||
# code:
|
||||
# non-blank -- specify status code
|
||||
# blank -- not specify status code
|
||||
# index = 1 if time duration < 1
|
||||
# index = 2 if time duration < 2
|
||||
# index = 3 if time duration < 3
|
||||
# index = 4 if time duration < 4
|
||||
# index = 5 if time duration < 5
|
||||
# index = 6 if time duration < 6
|
||||
# index = 7 if time duration < 11
|
||||
# index = 8 if time duration < 16
|
||||
# index = 9 if time duration < 21
|
||||
|
||||
function timeproc(dur, trans) {
|
||||
if((dur / trans)<1)
|
||||
res[1]++
|
||||
else if((dur / trans)<2)
|
||||
res[2]++
|
||||
else if((dur / trans)<3)
|
||||
res[3]++
|
||||
else if((dur / trans)<4)
|
||||
res[4]++
|
||||
else if((dur / trans)<5)
|
||||
res[5]++
|
||||
else if((dur / trans)<6)
|
||||
res[6]++
|
||||
else if((dur / trans)<11)
|
||||
res[7]++
|
||||
else if((dur / trans)<16)
|
||||
res[8]++
|
||||
else if((dur / trans)<21)
|
||||
res[9]++
|
||||
else if((dur / trans)>=21)
|
||||
res[10]++
|
||||
}
|
||||
|
||||
BEGIN {
|
||||
if(code!="" && comp==1) { sc=4 }
|
||||
if(code!="" && comp==0) { sc=11 }
|
||||
if(code=="") { sc=1000 }
|
||||
if(comp==1) { ac=56; trans=1; }
|
||||
if(comp==0) { ac=50; trans=1000; }
|
||||
}
|
||||
|
||||
{
|
||||
if(acc==$ac && code==$sc)
|
||||
timeproc($pos, trans)
|
||||
|
||||
}
|
||||
END {
|
||||
for(i in res)
|
||||
if(i==1||i==2||i==3||i==4||i==5||i==6)
|
||||
printf "%-15s%-8s%-s\n", "time < "i"s", res[i], res[i]/NR*100"%"
|
||||
else if(i==7)
|
||||
printf "%-15s%-8s%-s\n", "time < 11s", res[i], res[i]/NR*100"%"
|
||||
else if(i==8)
|
||||
printf "%-15s%-8s%-s\n", "time < 16s", res[i], res[i]/NR*100"%"
|
||||
else if(i==9)
|
||||
printf "%-15s%-8s%-s\n", "time < 21s", res[i], res[i]/NR*100"%"
|
||||
else if(i==10)
|
||||
printf "%-15s%-8s%-s\n", "time >= 21s", res[i], res[i]/NR*100"%"
|
||||
}
|
||||
|
349
old/ctc/map.sh
Normal file
349
old/ctc/map.sh
Normal file
@ -0,0 +1,349 @@
|
||||
#!/bin/bash
|
||||
# 功能实现:根据指定的域名,查询对于应域名所在解析组的相关信息
|
||||
# 依赖文件:dna/
|
||||
# 存在问题:
|
||||
#
|
||||
|
||||
# 自定义控制台颜色显示
|
||||
c_br='\e[1;31m' # bold red
|
||||
c_bg='\e[1;32m' # bold green
|
||||
c_bc='\e[1;36m' # bold cyan
|
||||
c_by='\e[1;33m' # bold yellow
|
||||
c_bp='\e[1;35m' # bold purple
|
||||
c_bir='\e[1;3;31m' # bold italic red
|
||||
c_big='\e[1;3;32m' # bold italic green
|
||||
c_biy='\e[1;3;33m' # bold italic yellow
|
||||
c_bib='\e[1;3;34m' # bold italic blue
|
||||
c_bip='\e[1;3;35m' # bold italic purple
|
||||
c_bic='\e[1;3;36m' # bold italic cyan
|
||||
c_biw='\e[1;3;30m' # bold italic gray
|
||||
c_e='\e[0m' # reset
|
||||
|
||||
# 使用说明
|
||||
function usage {
|
||||
echo -e "${c_bib}Usage: ${c_e}"
|
||||
echo -e "${c_bib} map -d domain vip # 从域名解析组中随机获取一个VIP ${c_e}"
|
||||
echo -e "${c_bib} map -d domain rip # 从域名解析组中随机获取一个RIP ${c_e}"
|
||||
echo -e "${c_bib} map -d domain ip # 验证一个IP是否属于域名解析组中的VIP或者RIP ${c_e}"
|
||||
echo -e "${c_bib} map -d domain label # 验证一个节点中/英文标签名是否包含在域名解析组中 ${c_e}"
|
||||
echo -e "${c_bib} map -d domain label # 打印域名解析组中所有的节点信息 ${c_e}"
|
||||
echo -e "${c_bib} map -d domain cover # 输出域名边缘解析组节点资源覆盖情况,可指定区域查询 ${c_e}"
|
||||
echo -e "${c_bib} map -d domain parent # 输出域名父解析组节点资源覆盖情况,可指定区域查询 ${c_e}\n"
|
||||
echo -e "${c_bic}[MAP-100] 该脚本工具会根据指定的域名,查询对于应域名所在解析组的相关信息,其中vip/rip/pool/cover/parent均是字符串参数,domain/ip是实际要输入真实值的参数,label既可以是字符串参数,也可以是实际节点中/英文标签名称。${c_e}\n"
|
||||
exit 100
|
||||
}
|
||||
|
||||
function onCtrlC () {
|
||||
# while capture Ctrl+C, kill all background processes silently and exit
|
||||
exec 3>&2 # 3 is now a copy of 2
|
||||
exec 2> /dev/null # 2 now points to /dev/null
|
||||
sleep 1 # sleep to wait for process to die
|
||||
exec 2>&3 # restore stderr to saved
|
||||
exec 3>&- # close saved version
|
||||
echo
|
||||
echo -e "${c_bir}[MAP-101] Ctrl+C is captured, exiting...\n${c_e}"
|
||||
exit 101
|
||||
}
|
||||
|
||||
# 随机获取域名对应解析组的一个VIP
|
||||
function random_vip() {
|
||||
# 其实支持的不仅如下,只要输入正确,其他网络运营商也是可以查询的
|
||||
echo -ne "${c_bg}请输入ISP类型--【ct|cu|cm|bgp|ctbgp|cubgp|cmbgp】:${c_e}"
|
||||
read -t 60 isp
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}[MAP-102] 60s内无任何输入,退出...${c_e}\n"; exit 102; }
|
||||
# do a check to see if isp is correct or not
|
||||
[[ $isp == '' ]] && isp='ct'
|
||||
# 从解析组VIP列过滤以运营商标识开头的节点和VIP
|
||||
ip_list=`cat map.log | awk '{print $4, $5}' | grep "^${isp}_" | awk '{print $2}' | sort | uniq`
|
||||
[[ $ip_list == '' ]] && { echo -e "${c_br}[MAP-103] 在解析组$map中,没有找到$isp的网络运营商,退出...${c_e}"; exit 103; }
|
||||
# 匹配v6和v4的子集
|
||||
v6_list=`echo $ip_list | grep -Eo "[0-9a-fA-F:]{11,39}"`
|
||||
v4_list=`echo $ip_list | grep -Eo "([0-9]{1,3}\.){3}[0-9]{1,3}"`
|
||||
# 计算分别有多少个
|
||||
num_v6=`echo $v6_list | awk '{print NF}'`
|
||||
num_v4=`echo $v4_list | awk '{print NF}'`
|
||||
# 获取随机的IP,注意用$RANDOM取模之后,结果有可能是0,最大值不超过总数,所以需要 +1
|
||||
[[ num_v6 -ne 0 ]] && rand6=$(( $RANDOM % $num_v6 )) && vip_v6=`echo $v6_list | awk -v v6=$rand6 '{print $(v6+1)}'`
|
||||
[[ num_v4 -ne 0 ]] && rand4=$(( $RANDOM % $num_v4 )) && vip_v4=`echo $v4_list | awk -v v4=$rand4 '{print $(v4+1)}'`
|
||||
echo "vip_v6: $vip_v6"
|
||||
echo "vip_v4: $vip_v4"
|
||||
echo -e "${c_bip}MAP: $map\n${c_e}"
|
||||
}
|
||||
|
||||
|
||||
# 随机获取域名对应解析组的一个RIP
|
||||
function random_rip() {
|
||||
# 其实支持的不仅如下,只要输入正确,其他网络运营商也是可以查询的
|
||||
echo -ne "${c_bg}请输入ISP类型--【ct|cu|cm|bgp|ctbgp|cubgp|cmbgp】:${c_e}"
|
||||
read -t 60 isp
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}[MAP-104] 60s内无任何输入,退出...${c_e}\n"; exit 104; }
|
||||
# do a check to see if isp is correct or not
|
||||
[[ $isp == '' ]] && isp='ct'
|
||||
# 从解析组VIP列过滤以运营商标识开头的节点和VIP
|
||||
ip_list=`cat map.log | awk '{print $4, $5}' | grep "^${isp}_" | awk '{print $2}' | sort | uniq`
|
||||
[[ $ip_list == '' ]] && { echo -e "${c_br}[MAP-106] 在解析组$map中,没有找到$isp的网络运营商,退出...${c_e}"; exit 105; }
|
||||
# 匹配v6和v4的子集
|
||||
v6_list=`echo $ip_list | grep -Eo "[0-9a-fA-F:]{11,39}"`
|
||||
v4_list=`echo $ip_list | grep -Eo "([0-9]{1,3}\.){3}[0-9]{1,3}"`
|
||||
# 计算分别有多少个
|
||||
num_v6=`echo $v6_list | awk '{print NF}'`
|
||||
num_v4=`echo $v4_list | awk '{print NF}'`
|
||||
# 获取随机的IP,注意用$RANDOM取模之后,结果有可能是0,最大值不超过总数,所以需要 +1
|
||||
[[ num_v6 -ne 0 ]] && rand6=$(( $RANDOM % $num_v6 )) && vip_v6=`echo $v6_list | awk -v v6=$rand6 '{print $(v6+1)}'`
|
||||
[[ num_v4 -ne 0 ]] && rand4=$(( $RANDOM % $num_v4 )) && vip_v4=`echo $v4_list | awk -v v4=$rand4 '{print $(v4+1)}'`
|
||||
# 拿到VPI之后,使用`ips`获取RIP列表
|
||||
[[ num_v6 -ne 0 ]] && v6_list=`ips $vip_v6 | sed -n '/RIP/, /VIP/ p' | grep -Eo '([0-9]{1,3}.){3}[0-9]{1,3}'`
|
||||
[[ num_v4 -ne 0 ]] && v4_list=`ips $vip_v4 | sed -n '/RIP/, /VIP/ p' | grep -Eo '([0-9]{1,3}.){3}[0-9]{1,3}'`
|
||||
# 计算分别有多少个
|
||||
num_v6=`echo $v6_list | awk '{print NF}'`
|
||||
num_v4=`echo $v4_list | awk '{print NF}'`
|
||||
# 获取随机的IP,注意用$RANDOM取模之后,结果有可能是0,最大值不超过总数,所以需要 +1
|
||||
[[ num_v6 -ne 0 ]] && rand6=$(( $RANDOM % $num_v6 )) && rip_v6=`echo $v6_list | awk -v v6=$rand6 '{print $(v6+1)}'`
|
||||
[[ num_v4 -ne 0 ]] && rand4=$(( $RANDOM % $num_v4 )) && rip_v4=`echo $v4_list | awk -v v4=$rand4 '{print $(v4+1)}'`
|
||||
|
||||
echo "rip_v6: $rip_v6"
|
||||
echo "rip_v4: $rip_v4"
|
||||
echo -e "${c_bip}MAP: $map\n${c_e}"
|
||||
}
|
||||
|
||||
|
||||
# 判断一个IP是否归属域名的解析组,可以是VIP,也可以是RIP
|
||||
function ip_inmap() {
|
||||
# 使用`ips`判断IP是否在天翼平台
|
||||
ips $item > ips.log 2>&1
|
||||
[[ $? -ne 0 ]] && { cat ips.log; echo -e "${c_br}[MAP-106]${c_e}"; exit 106; }
|
||||
# 判断IP是否是RIP
|
||||
cat $data/ip.group | awk '{print $1}' | grep -wq $item
|
||||
is_rip=$?
|
||||
# 如果是RIP
|
||||
if [[ $is_rip -eq 0 ]]; then
|
||||
# 获取对应RIP的英文节点标签名,并判断该节点是否在域名的解析组中
|
||||
label=`cat ips.log | grep -Eo "(ct|cu|cm|bgp|ctbgp|cmbgp|cubgp|as|eu|sa|na|cbn|cern)_[a-z]{2,3}_[a-z]{2,20}[0-9]{1,2}_(c|e|m|n)[0-9]{0,2}" | head -n 1`
|
||||
cat map.log | awk '{print $4}' | sort | uniq | grep -wq $label
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo -e "${c_big}$item是域名$domain对应解析组$map中的IP,并且是一个RIP。\n${c_e}"
|
||||
else
|
||||
echo -e "${c_bir}[MAP-107] $item${c_biy}不是${c_bir}域名$domain对应解析组中的IP。\n${c_e}"
|
||||
exit 107
|
||||
fi
|
||||
# 如果不是RIP,那就是VIP
|
||||
else
|
||||
# 直接判断该IP是否在域名的解析组
|
||||
cat map.log | awk '{print $5}' | sort | uniq | grep -wq $item
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo -e "${c_big}$item是域名$domain对应解析组$map中的IP,并且是一个VIP。\n${c_e}"
|
||||
else
|
||||
echo -e "${c_bir}[MAP-108] $item${c_biy}不是${c_bir}域名$domain对应解析组中的IP。\n${c_e}"
|
||||
exit 108
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# 判断一个标签是否在域名的解析组中
|
||||
function label_inmap() {
|
||||
# 输出改解析组所有的节点中英文对应标签信息
|
||||
cat map.log | awk '{print $14, $4}' | sed '1d' | sort | uniq | awk '{if(NR%3==0) print " | "$0" | "; else printf "%s", " | "$0;} END{print ""}' | column -t
|
||||
# 查询节点中英文标签名称是否在域名的解析组中,在的话并输出相应信息
|
||||
cat map.log | awk '{print $14, $4}' | sort | uniq | grep -wq $item
|
||||
if [[ $? -eq 0 ]]; then
|
||||
node=`cat map.log | awk '{print $14, $4}' | sort | uniq | grep -w $item`
|
||||
echo -e "${c_big}$node${c_by} 是域名 $domain 对应解析组 $map 中的节点,如上是该解析组所有节点列表汇总,可参考。\n${c_e}"
|
||||
else
|
||||
echo -e "${c_bir}[MAP-109] $item${c_biy} 不是${c_bir}域名 $domain 对应解析组 $map 中的节点,如上是该解析组所有节点列表汇总,可参考。\n${c_e}"
|
||||
exit 109
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
# 打印域名对应解析组所有节点信息
|
||||
function labels_inmap() {
|
||||
cat map.log | awk '{print $14, $4}' | sed '1d' | sort | uniq | awk '{if(NR%3==0) print " | "$0" | "; else printf "%s", " | "$0;} END{print ""}' | column -t
|
||||
echo -e "${c_big}如上是域名 $domain 对应解析组 $map 所有节点列表汇总,可参考。\n${c_e}"
|
||||
}
|
||||
|
||||
# 输出域名对应解析组的覆盖情况,可指定地区
|
||||
function cover() {
|
||||
# 宽度提示
|
||||
width=`tput cols`
|
||||
if [[ $width -lt 170 ]]; then
|
||||
echo -e "${c_biy}因该选项输出的每行数据比较多,需要终端宽度大于170,当前终端宽度为$width,输出的界面会不整齐,是否继续(Y/n):${c_e}"
|
||||
read -t 60 YON
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}[MAP-110] 60s内无任何输入,退出...${c_e}\n"; exit 110; }
|
||||
if [[ $YON != '' && $YON != 'y' && $YON != 'Y' && $YON != 'Yes' && $YON != 'yes' && $YON != 'YES' ]]; then
|
||||
echo -e "${c_br}[MAP-111] 请调整终端宽度之后,重新运行,退出...${c_e}\n"
|
||||
exit 111
|
||||
fi
|
||||
fi
|
||||
|
||||
# 将需要保留的的字段过滤出来
|
||||
cat map.log | sed '1d' | awk '{print $1, $2, $3, $4, $5, $14, $16, $22, $16-$22, $23, $24}' | sort -k 2 > res.map
|
||||
echo -e "${c_bib}1. 省份维度:31个省市自治区 + 电信|移动|联通,例如-内蒙古电信${c_e}"
|
||||
echo -e "${c_bib}2. 大区维度:东北|华北|华东|华南|华中|西北|西南 + 电信|移动|联通|长宽|铁通|广电,例如-东北移动${c_e}"
|
||||
echo -e "${c_bib}3. 全国维度:中国 + 电信|移动|联通|长宽|铁通|广电|其他/香港/澳门/台湾/教育网,例如-中国移动${c_e}"
|
||||
echo -e "${c_bib}4. 国际维度:日本/韩国/蒙古国/北朝鲜/澳洲/欧洲/非洲/北美洲/南美洲/中亚/西亚/南亚/东南亚/亚洲其他等${c_e}"
|
||||
echo -e "${c_bib}5. 其他维度:全球/其他/其他广电/其他电信/其他移动/其他联通/其他铁通/其他长宽${c_e}"
|
||||
echo -ne "${c_bg}请按照如上规则,输入查询条件:${c_e}\n"
|
||||
read -t 60 query
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}[MAP-112] 60s内无任何输入,退出...${c_e}\n"; exit 112; }
|
||||
# 无任何输入,则默认打印所有资源
|
||||
if [[ ${query} == '' ]]; then
|
||||
python3 /usr/local/script/fanmf11/get_infos.py --format-global res.map
|
||||
# python3 /home/fanmf11/fanmf11/get_infos.py --format-global res.map
|
||||
# 否则打印指定地区的资源覆盖情况
|
||||
else
|
||||
cat $data/area | grep $query| awk '{print $1}' > view
|
||||
python3 /usr/local/script/fanmf11/get_infos.py --format-partial $query view res.map $domain $map
|
||||
# python3 /home/fanmf11/fanmf11/get_infos.py --format-partial $query view res.map $domain $map
|
||||
fi
|
||||
}
|
||||
|
||||
# 输出域名对应父解析组的覆盖情况,可指定地区
|
||||
function parent() {
|
||||
# 宽度提示
|
||||
width=`tput cols`
|
||||
if [[ $width -lt 170 ]]; then
|
||||
echo -e "${c_biy}因该选项输出的每行数据比较多,需要终端宽度大于170,当前终端宽度为$width,输出的界面会不整齐,是否继续(Y/n):${c_e}"
|
||||
read -t 60 YON
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}[MAP-113] 60s内无任何输入,退出...${c_e}\n"; exit 113; }
|
||||
if [[ $YON != '' && $YON != 'y' && $YON != 'Y' && $YON != 'Yes' && $YON != 'yes' && $YON != 'YES' ]]; then
|
||||
echo -e "${c_br}[MAP-114] 请调整终端宽度之后,重新运行,退出...${c_e}\n"
|
||||
exit 114
|
||||
fi
|
||||
fi
|
||||
|
||||
# 获取域名的父解析组
|
||||
# python3 /usr/local/script/fanmf11/get_infos.py --domain_config_accid map_info.log $domain
|
||||
infos --parent $domain $TS
|
||||
cat cmap && echo -e "${c_bg}请选择要查看的父方案序号(e.g. 1, 2, 3...):${c_e}"
|
||||
read -t 60 index
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}[MAP-115] 60s内无任何输入,退出...${c_e}\n"; exit 115; }
|
||||
cat cmap | grep -Eq "^$index\."
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}[MAp-116] 请输入正确的序号,退出...${c_e}\n"; exit 116; }
|
||||
cmap=`cat cmap | grep -E "^$index\." | awk '{print $2}'`
|
||||
getlastcover $cmap > cmap.log 2>&1
|
||||
|
||||
|
||||
# 将需要保留的的字段过滤出来
|
||||
cat cmap.log | sed '1d' | awk '{print $1, $2, $3, $4, $5, $14, $16, $22, $16-$22, $23, $24}' | sort -k 2 > res.map
|
||||
echo -e "${c_bib}1. 省份维度:31个省市自治区 + 电信|移动|联通,例如-内蒙古电信${c_e}"
|
||||
echo -e "${c_bib}2. 大区维度:东北|华北|华东|华南|华中|西北|西南 + 电信|移动|联通|长宽|铁通|广电,例如-东北移动${c_e}"
|
||||
echo -e "${c_bib}3. 全国维度:中国 + 电信|移动|联通|长宽|铁通|广电|其他/香港/澳门/台湾/教育网,例如-中国移动${c_e}"
|
||||
echo -e "${c_bib}4. 国际维度:日本/韩国/蒙古国/北朝鲜/澳洲/欧洲/非洲/北美洲/南美洲/中亚/西亚/南亚/东南亚/亚洲其他等${c_e}"
|
||||
echo -e "${c_bib}5. 其他维度:全球/其他/其他广电/其他电信/其他移动/其他联通/其他铁通/其他长宽${c_e}"
|
||||
echo -ne "${c_bg}请按照如上规则,输入查询条件:${c_e}\n"
|
||||
read -t 60 query
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}[MAP-117] 60s内无任何输入,退出...${c_e}\n"; exit 117; }
|
||||
# 无任何输入,则默认打印所有资源
|
||||
if [[ ${query} == '' ]]; then
|
||||
python3 /usr/local/script/fanmf11/get_infos.py --format-global res.map
|
||||
# python3 /home/fanmf11/fanmf11/get_infos.py --format-global res.map
|
||||
# 否则打印指定地区的资源覆盖情况
|
||||
else
|
||||
cat $data/area | grep $query| awk '{print $1}' > view
|
||||
python3 /usr/local/script/fanmf11/get_infos.py --format-partial $query view res.map $domain $map
|
||||
# python3 /home/fanmf11/fanmf11/get_infos.py --format-partial $query view res.map $domain $map
|
||||
fi
|
||||
}
|
||||
|
||||
function logfile() {
|
||||
if [[ -d $trash ]]; then
|
||||
echo -e "${c_br}[MAP-118]对于同一个用户,同一时间只能运行一个实例,请重新运行...${c_e}"
|
||||
exit 118
|
||||
else
|
||||
mkdir -p $trash
|
||||
cd $trash && cd ..
|
||||
docs=`ls`
|
||||
for doc in $docs; do
|
||||
[[ -f $doc ]] && rm -rf $doc
|
||||
done
|
||||
folders=`ls -t`
|
||||
while [[ `echo $folders | awk '{print NF}'` -gt 29 ]]; do
|
||||
folder=`ls -t | tail -1`
|
||||
rm -rf $folder
|
||||
folders=`ls -t`
|
||||
done
|
||||
cd $trash && touch map
|
||||
fi
|
||||
}
|
||||
|
||||
# --------------------------------------------------------------------------------
|
||||
# set a trap for Ctrl+C
|
||||
trap 'onCtrlC' INT
|
||||
|
||||
# --------------------------------------------------------------------------------
|
||||
# 初始化设定
|
||||
stty erase '^H' # allow backspace
|
||||
data='/usr/local/script/fanmf11/data' # set data directory path
|
||||
toolbox='/usr/local/script/fanmf11/' # set toobbox directory path
|
||||
map=''
|
||||
accid=''
|
||||
TS=`date +%s%N` # document the start time of the script
|
||||
host=`whoami` # who use this script
|
||||
trash="/usr/local/script/fanmf11/trash/$host/$TS" # set trash directory path
|
||||
|
||||
# --------------------------------------------------------------------------------
|
||||
# 入参正确性检测
|
||||
let NumOP=$# # number of parameter
|
||||
OP="prefix "$@ # do a prefix cause '-' char may damage echo command
|
||||
dash_d=`echo $OP | awk '{print $2}'` # get first param -d
|
||||
domain=`echo $OP | awk '{print $3}'` # get second param domain
|
||||
item=`echo $OP | awk '{print $4}'` # get third param item, can be vip, rip, testip, pool, cover etc.
|
||||
[[ $NumOP -ne 3 || $dash_d != '-d' ]] && usage || logfile
|
||||
|
||||
# --------------------------------------------------------------------------------
|
||||
# 检查域名是否在平台注册
|
||||
res=`cat $data/domain.list | grep -w "$domain"`
|
||||
[[ $res == '' ]] && { echo -e "${c_br}[MAp-119] 该域名未在天翼平台配置,一个小时内新增的域名无法查询,退出...${c_e}"; exit 119; }
|
||||
|
||||
# --------------------------------------------------------------------------------
|
||||
# 获取域名解析组信息
|
||||
infos --map $domain $TS
|
||||
[[ $? -eq 205 || $? -eq 231 ]] && { echo -e "${c_br}[MAP-120] exiting...${c_e}"; exit 120; }
|
||||
# cd $trash && map=`cat map.log` && getlastcover $map > map.log
|
||||
cd $trash
|
||||
if [[ `cat map.log | wc -l` -eq 1 ]]; then
|
||||
map=`cat map.log`
|
||||
else
|
||||
maps=`cat map.log | sort | uniq`
|
||||
count=1 && > remap.log
|
||||
for map in $maps; do
|
||||
echo $count": "$map | tee -a remap.log
|
||||
let count=count+1
|
||||
done
|
||||
echo -ne "${c_bg}存在分区域解析,需确定解析组名称(默认是1):${c_e}\n"
|
||||
read -t 60 imap
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}[MAP-121] 60s内无任何输入,退出...${c_e}\n"; exit 121; }
|
||||
# do a check to see if isp is correct or not
|
||||
[[ $imap == '' ]] && let imap=1
|
||||
map=`cat remap.log | awk -F ':' -v imap=$imap '$1==imap {print $2}'`
|
||||
[[ $map == '' ]] && { echo -e "${c_br}[MAP-122] 请输入正确的序号,退出...${c_e}"; exit 122; }
|
||||
fi
|
||||
|
||||
getlastcover $map > map.log
|
||||
cat map.log | grep -q 'can not find sys_id'
|
||||
[[ $? -eq 0 ]] && { echo -e "${c_br}[MAP-123] 该解析组未在平台配置,退出...${c_e}"; exit 123; }
|
||||
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------------
|
||||
# 随机获取VIP
|
||||
if [[ $item == 'vip' ]]; then
|
||||
random_vip
|
||||
elif [[ $item == 'rip' ]]; then
|
||||
random_rip
|
||||
elif [[ $item =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ || $item =~ ^[0-9a-fA-F:]{11,39}$ ]]; then
|
||||
ip_inmap
|
||||
elif [[ $item =~ ^(ct|cu|cm|bgp|ctbgp|cmbgp|cubgp|as|eu|sa|na|cbn|cern)_[a-z]{2,3}_[a-z]{2,20}[0-9]{1,2}_(c|dns|e|lvs|m|mysql|n|prets|pretw|redis|testts|ts|tw|uatts1)[0-9]{0,2}$ ]]; then
|
||||
label_inmap
|
||||
elif [[ $item == 'label' ]]; then
|
||||
labels_inmap
|
||||
elif [[ $item == 'cover' ]]; then
|
||||
cover
|
||||
elif [[ $item == 'parent' ]]; then
|
||||
parent
|
||||
# 兜底是中文节点名的查询
|
||||
else
|
||||
label_inmap
|
||||
fi
|
||||
|
||||
|
13
old/ctc/normalize.jq
Normal file
13
old/ctc/normalize.jq
Normal file
@ -0,0 +1,13 @@
|
||||
# Apply f to composite entities recursively using keys[], and to atoms
|
||||
def sorted_walk(f):
|
||||
. as $in
|
||||
| if type == "object" then
|
||||
reduce keys[] as $key
|
||||
( {}; . + { ($key): ($in[$key] | sorted_walk(f)) } ) | f
|
||||
elif type == "array" then map( sorted_walk(f) ) | f
|
||||
else f
|
||||
end;
|
||||
|
||||
def normalize: sorted_walk(if type == "array" then sort else . end);
|
||||
|
||||
normalize
|
31
old/ctc/reformat.awk
Normal file
31
old/ctc/reformat.awk
Normal file
@ -0,0 +1,31 @@
|
||||
#!/usr/bin/awk -f
|
||||
|
||||
BEGIN{
|
||||
start1="\"-----BEGINCERTIFICATE-----";
|
||||
start2="-----BEGINCERTIFICATE-----";
|
||||
end1="-----ENDCERTIFICATE-----";
|
||||
end2="-----ENDCERTIFICATE-----\",";
|
||||
}
|
||||
|
||||
{
|
||||
if($0~"https_public_content") {
|
||||
printf "%s", $1;
|
||||
for(i=2;i<=NF;i++) {
|
||||
if($i==start1)
|
||||
printf "%s", "\"-----BEGIN CERTIFICATE-----\\n";
|
||||
else if($i==start2)
|
||||
printf "%s", "-----BEGIN CERTIFICATE-----\\n";
|
||||
else if($i==end1)
|
||||
printf "%s", "-----END CERTIFICATE-----\\n";
|
||||
else if($i==end2)
|
||||
printf "%s", "-----END CERTIFICATE-----\",";
|
||||
else if($i=="")
|
||||
continue
|
||||
else
|
||||
printf "%s", $i"\\n"
|
||||
}
|
||||
}
|
||||
else print $0
|
||||
}
|
||||
|
||||
|
92
old/ctc/tasks.sh
Normal file
92
old/ctc/tasks.sh
Normal file
@ -0,0 +1,92 @@
|
||||
#!/bin/bash
|
||||
|
||||
function isAlarm()
|
||||
{
|
||||
alarmDescrption=$1
|
||||
alarmFile=$2
|
||||
alarmDate=`date`
|
||||
curl 'https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=ddea3f5f-fbfc-4c21-994a-71e9fc50e4ef' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '
|
||||
{
|
||||
"msgtype": "markdown",
|
||||
"markdown": {
|
||||
"content": "**'"$alarmDescrption"'**\n
|
||||
> 错误文件:<font color=\"warning\">'"$alarmFile"'生成错误,请立即查看</font>
|
||||
> 告警时间:<font color=\"warning\">'"$alarmDate"'</font>"
|
||||
}
|
||||
} ' > /dev/null 2>&1
|
||||
}
|
||||
|
||||
function domain_list() {
|
||||
# ----------------------------------------------------------------------------------------
|
||||
# 获取平台全量域名信息
|
||||
let count=0
|
||||
while [[ $count -lt 3 ]]; do
|
||||
curl 'https://bs.ctcdn.cn/api/v3/manageDomain/export' -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyaWQiOjI1NH0.9Zw66R0R85avd92qzO-3KQ4DJ2zYXL4ght5bH41iTfA' -H 'content-type: application/json;charset=UTF-8' -vo $data/domain.list > $data/domain.list.response 2>&1
|
||||
|
||||
# 判断响应是否200
|
||||
line_number=`cat $data/domain.list | wc -l`
|
||||
cat $data/domain.list.response | grep -Eq 'HTTP/(1.1|2) 200 OK'
|
||||
if [[ $? -ne 0 || $line_number -lt 20000 ]]; then
|
||||
count=$((count+1))
|
||||
else
|
||||
exit
|
||||
fi
|
||||
done
|
||||
isAlarm '【严重】获取全量域名信息失败' 'domain.list'
|
||||
exit 248
|
||||
}
|
||||
|
||||
function renew_backup() {
|
||||
cd $data
|
||||
lakes_bak > $data/lakes
|
||||
curl -so $data/ip.group "http://150.223.254.77:5044/download/ip.group"
|
||||
|
||||
# backups
|
||||
cd $toolbox
|
||||
[[ -d '/home/fanmf11/.backups/' ]] && rm -rf /home/fanmf11/.backups/*.tgz || mkdir '/home/fanmf11/.backups/'
|
||||
cd $toolbox & bt=$(date +%Y%m%d%H%M%S)
|
||||
# cp $data/lakes $data/lakes-$(date +%d)
|
||||
# cp $data/ip.group $data/ip.group-$(date +%d)
|
||||
tar -czf /home/fanmf11/.backups/toolbox-${bt}.tgz ./*
|
||||
|
||||
[[ ! -s $data/lakes ]] && isAlarm '【严重】基础文件生成错误告警' 'lakes'
|
||||
[[ ! -s $data/ip.group ]] && isAlarm '【严重】基础文件生成错误告警' 'ip.group'
|
||||
[[ ! -s $backups/toolbox-${bt}.tgz ]] && isAlarm '备份失败告警' "toolbox-${bt}.tgz"
|
||||
}
|
||||
|
||||
|
||||
|
||||
function view_check() {
|
||||
maps=`cat $data/maps`
|
||||
> $data/area.new
|
||||
for map in $maps; do
|
||||
getlastcover $map > $map
|
||||
cat $map | awk '{print $3}' | sed '1d' | sort | uniq >> $data/area.new
|
||||
rm $map
|
||||
done
|
||||
news=`cat $data/area.new | sort | uniq`
|
||||
olds=`cat $data/area | awk '{print $1}' | sort | uniq`
|
||||
> $data/area.new
|
||||
> $data/area.diff
|
||||
for new in $news; do
|
||||
[[ $new == 'find' ]] && continue
|
||||
echo $new >> $data/area.new
|
||||
echo $olds | grep -wq $new
|
||||
[[ $? -ne 0 ]] && { isAlarm '有新的View需要添加' "$new"; echo $new >> $data/area.diff; sleep 1; }
|
||||
done
|
||||
}
|
||||
|
||||
data='/usr/local/script/fanmf11/data'
|
||||
host=`whoami`
|
||||
toolbox='/usr/local/script/fanmf11'
|
||||
backups='/home/fanmf11/.backups'
|
||||
|
||||
|
||||
[[ $1 == '--renew_backup' ]] && renew_backup
|
||||
[[ $1 == '--domain_list' ]] && domain_list
|
||||
[[ $1 == '--new_area' ]] && view_check
|
||||
|
||||
|
||||
|
134
old/ctc/utool
Normal file
134
old/ctc/utool
Normal file
@ -0,0 +1,134 @@
|
||||
#!/bin/bash
|
||||
# User specific aliases and functions
|
||||
|
||||
alias cls='clear && ls'
|
||||
# alias trash='cd /usr/local/script/fanmf11/trash/fanmf11 && ls'
|
||||
alias fanmf11='cd /usr/local/script/fanmf11 && ls'
|
||||
alias ..='cd ../ && ls'
|
||||
alias ...='cd ../.. && ls'
|
||||
alias l='ls -alh'
|
||||
alias common='cat /usr/local/script/fanmf11/data/cmds'
|
||||
|
||||
c_br='\e[1;31m' # bold red
|
||||
c_bg='\e[1;32m' # bold green
|
||||
c_by='\e[1;33m' # bold yellow
|
||||
c_bb='\e[1;34m' # bold blue
|
||||
c_bp='\e[1;35m' # bold purple
|
||||
c_bc='\e[1;36m' # bold cyan
|
||||
c_bir='\e[1;3;31m' # * bold italic red
|
||||
c_big='\e[1;3;32m' # bold italic cyan
|
||||
c_bib='\e[1;3;34m' # * bold italic cyan
|
||||
c_bip='\e[1;3;35m' # bold italic cyan
|
||||
c_bic='\e[1;3;36m' # bold italic cyan
|
||||
c_e='\e[0m' # reset
|
||||
|
||||
trash='/usr/local/script/fanmf11/trash'
|
||||
|
||||
function utool() {
|
||||
|
||||
if [[ $1 == '-a' ]]; then
|
||||
# set -x
|
||||
[[ $# -lt 1 ]] && { echo -e "${c_bir}[UTOOL-100] Need at least one parameter, exiting...${c_e}"; return 100; }
|
||||
[[ $# -eq 1 ]] && place='fanmf11' || place="$2"
|
||||
ls $trash | grep -wq $place
|
||||
[[ $? -ne 0 ]] && { echo -e "${c_br}[UTOOL-101] $place用户不存在,或该用户从未使用过相关工具,退出...${c_e}"; return 101; }
|
||||
|
||||
> $trash/fanmf11/record.log
|
||||
items="ips ids map infos logs config"
|
||||
for item in $items; do
|
||||
date_lists=`find $trash/$place -name "$item" -type f | sort | uniq | awk -F '/' '{print $8}' | sort | uniq`
|
||||
for date_list in $date_lists; do
|
||||
let number=${date_list:0:10}
|
||||
date_fmt=`date -d@$number +'%Y-%m-%d %H:%M:%S'`
|
||||
echo "$date_fmt $date_list $item" >> $trash/fanmf11/record.log
|
||||
done
|
||||
done
|
||||
cat $trash/fanmf11/record.log | sort -nk3 | awk '{printf "%-11s%-14s%-25s%-s\n", $1, $2, $3, $4}'
|
||||
# set +x
|
||||
elif [[ $1 == '-b' ]]; then
|
||||
cat /usr/local/script/fanmf11/data/cmds
|
||||
elif [[ $1 == '-c' ]]; then
|
||||
:
|
||||
elif [[ $1 == '-d' ]]; then
|
||||
:
|
||||
elif [[ $1 == '-e' ]]; then
|
||||
:
|
||||
elif [[ $1 == '-f' ]]; then
|
||||
:
|
||||
elif [[ $1 == '-g' ]]; then
|
||||
:
|
||||
elif [[ $1 == '-h' ]]; then
|
||||
usage
|
||||
elif [[ $1 == '-i' ]]; then
|
||||
:
|
||||
elif [[ $1 == '-j' ]]; then
|
||||
:
|
||||
elif [[ $1 == '-k' ]]; then
|
||||
:
|
||||
elif [[ $1 == '-l' ]]; then
|
||||
:
|
||||
elif [[ $1 == '-m' ]]; then
|
||||
:
|
||||
elif [[ $1 == '-n' ]]; then
|
||||
:
|
||||
elif [[ $1 == '-o' ]]; then
|
||||
:
|
||||
elif [[ $1 == '-p' ]]; then
|
||||
:
|
||||
elif [[ $1 == '-q' ]]; then
|
||||
:
|
||||
elif [[ $1 == '-r' ]]; then
|
||||
:
|
||||
elif [[ $1 == '-s' ]]; then
|
||||
:
|
||||
elif [[ $1 == '-t' ]]; then
|
||||
:
|
||||
elif [[ $1 == '-u' ]]; then
|
||||
:
|
||||
elif [[ $1 == '-v' ]]; then
|
||||
:
|
||||
elif [[ $1 == '-w' ]]; then
|
||||
:
|
||||
elif [[ $1 == '-x' ]]; then
|
||||
:
|
||||
elif [[ $1 == '-y' ]]; then
|
||||
:
|
||||
elif [[ $1 == '-z' ]]; then
|
||||
echo -e "${c_bg}直播:120.39.248.231"
|
||||
echo -e "全站:222.187.236.6"
|
||||
echo -e "全站:222.187.236.7"
|
||||
echo -e "点播:113.62.113.33${c_e}"
|
||||
else
|
||||
:
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
|
||||
function usage() {
|
||||
let col=`tput cols`
|
||||
if [[ $col -lt 120 ]]; then
|
||||
echo -e "\e[1;3;31mYour screen width is too small to show the usage info neatly. So make the display window maximized.\e[0m"
|
||||
read -p "Press any key to continue..."
|
||||
echo ''
|
||||
fi
|
||||
|
||||
|
||||
echo -e "\e[1;32mDESCRIPTION:\e[0m"
|
||||
echo -e "\e[3;32mutool -- a self-defined command line interface, which is used to facilitate operating the system, supports the following options. In the description part, where there is a leading asterisk signifies that this option must take an argument.\e[0m"
|
||||
echo -e "\e[1;4m \e[0m"
|
||||
echo -e "\e[37;40m|\e[0m\e[1;4;37;40mOption| Description |Option| Description \e[0m\e[37;40m|\e[0m"
|
||||
echo -e "\e[37;40m| -a |*find dirs of specified item in trash | -n | |\e[0m"
|
||||
echo -e "\e[37;40m| -b | show some often used commands | -o | |\e[0m"
|
||||
echo -e "\e[37;40m| -c | | -p | |\e[0m"
|
||||
echo -e "\e[37;40m| -d | | -q | |\e[0m"
|
||||
echo -e "\e[37;40m| -e | | -r | |\e[0m"
|
||||
echo -e "\e[37;40m| -f | | -s | |\e[0m"
|
||||
echo -e "\e[37;40m| -g | | -t | |\e[0m"
|
||||
echo -e "\e[37;40m| -h | show usage info | -u | |\e[0m"
|
||||
echo -e "\e[37;40m| -i | | -v | |\e[0m"
|
||||
echo -e "\e[37;40m| -j | | -w | |\e[0m"
|
||||
echo -e "\e[37;40m| -k | | -x | |\e[0m"
|
||||
echo -e "\e[37;40m| -l | | -y | |\e[0m"
|
||||
echo -e "\e[37;40m|\e[0m\e[4;37;40m -m | | -z | \e[0m\e[37;40m|\e[0m\n"
|
||||
}
|
41
old/github_update.sh
Normal file
41
old/github_update.sh
Normal file
@ -0,0 +1,41 @@
|
||||
#!/bin/bash
|
||||
#===================================================================
|
||||
# Filename : update_github.sh
|
||||
# Function :
|
||||
# Usage :
|
||||
# Author : Manford Fan
|
||||
# Date : 2022-03-26 18:46
|
||||
# Version : Version 0.1
|
||||
# Disclaimer : The author is NOT responsible for any loss caused
|
||||
# by the user's own operations.
|
||||
# And More : If you find there are some bugs in this scripts
|
||||
# Or you have better ideas, please do contact me
|
||||
# via E-mail -- mffan0922@163.com
|
||||
#===================================================================
|
||||
|
||||
t=`date +%Y%m%d%H%M%S`
|
||||
echo $t >> /opt/logs/github_update.log
|
||||
|
||||
# first try
|
||||
echo -n "1-blog: "
|
||||
cd /opt/source-code/blog && git pull --rebase
|
||||
echo -n "1-wiki: "
|
||||
cd /opt/websites/wiki && git pull --rebase
|
||||
echo -n "1-nav: "
|
||||
cd /opt/websites/nav && git pull --rebase
|
||||
echo -n "1-homepage: "
|
||||
cd /opt/websites/homepage && git pull --rebase
|
||||
|
||||
# check if done
|
||||
echo -n "2-blog: "
|
||||
cd /opt/source-code/blog && git pull --rebase
|
||||
echo -n "2-wiki: "
|
||||
cd /opt/websites/wiki && git pull --rebase
|
||||
echo -n "2-nav: "
|
||||
cd /opt/websites/nav && git pull --rebase
|
||||
echo -n "2-homepage: "
|
||||
cd /opt/websites/homepage && git pull --rebase
|
||||
|
||||
echo -e "-----------------------------------------------------------\n"
|
||||
|
||||
|
41
old/github_update.sh.homepage
Normal file
41
old/github_update.sh.homepage
Normal file
@ -0,0 +1,41 @@
|
||||
#!/bin/bash
|
||||
#===================================================================
|
||||
# Filename : update_github.sh
|
||||
# Function :
|
||||
# Usage :
|
||||
# Author : Manford Fan
|
||||
# Date : 2022-03-26 18:46
|
||||
# Version : Version 0.1
|
||||
# Disclaimer : The author is NOT responsible for any loss caused
|
||||
# by the user's own operations.
|
||||
# And More : If you find there are some bugs in this scripts
|
||||
# Or you have better ideas, please do contact me
|
||||
# via E-mail -- mffan0922@163.com
|
||||
#===================================================================
|
||||
|
||||
t=`date +%Y%m%d%H%M%S`
|
||||
echo $t >> /opt/logs/github_update.log
|
||||
|
||||
# first try
|
||||
# echo -n "1-blog: "
|
||||
# cd /opt/source-code/blog && git pull --rebase
|
||||
# echo -n "1-wiki: "
|
||||
# cd /opt/websites/wiki && git pull --rebase
|
||||
# echo -n "1-nav: "
|
||||
# cd /opt/websites/nav && git pull --rebase
|
||||
echo -n "1-homepage: "
|
||||
cd /opt/websites/homepage && git pull --rebase
|
||||
|
||||
# check if done
|
||||
# echo -n "2-blog: "
|
||||
# cd /opt/source-code/blog && git pull --rebase
|
||||
# echo -n "2-wiki: "
|
||||
# cd /opt/websites/wiki && git pull --rebase
|
||||
# echo -n "2-nav: "
|
||||
# cd /opt/websites/nav && git pull --rebase
|
||||
echo -n "2-homepage: "
|
||||
cd /opt/websites/homepage && git pull --rebase
|
||||
|
||||
echo -e "-----------------------------------------------------------\n"
|
||||
|
||||
|
17
old/jekyll_blog_update.sh
Normal file
17
old/jekyll_blog_update.sh
Normal file
@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
inotifywait -mrq -e create,delete,move,close_write /opt/source-code/blog --exclude '^.*/avatar.jpg|^.*/\.git' | while read directory action filename; do
|
||||
echo ====================================================
|
||||
echo `date`
|
||||
echo $directory$filename $action
|
||||
rm -rf /opt/websites/blog
|
||||
let numOfAvatar=`ls /opt/websites/nav/assets/images/logos/ | wc -l`
|
||||
let randNumber=$RANDOM%$numOfAvatar
|
||||
|
||||
cp /opt/websites/nav/assets/images/logos/${randNumber}.jpg /opt/websites/homepage/assets/img/logo.jpg -rf
|
||||
cp /opt/websites/nav/assets/images/logos/${randNumber}.jpg /opt/websites/nav/assets/images/logos/avatar.jpg -rf
|
||||
cp /opt/websites/nav/assets/images/logos/${randNumber}.jpg /opt/source-code/blog/img/avatar.jpg -rf
|
||||
jekyll b -s /opt/source-code/blog/ -d /opt/websites/blog/
|
||||
echo -e '\n'
|
||||
done
|
13
old/jekyll_python_update.sh
Normal file
13
old/jekyll_python_update.sh
Normal file
@ -0,0 +1,13 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
inotifywait -mrq -e create,delete,move,close_write /opt/source-code/document/python | while read directory action filename; do
|
||||
echo ====================================================
|
||||
echo `date`
|
||||
echo $directory$filename $action
|
||||
rm -rf /opt/websites/just-the-docs/python
|
||||
jekyll b -s /opt/source-code/document/python -d /opt/websites/just-the-docs/python
|
||||
echo -e '\n'
|
||||
done
|
||||
|
||||
|
36
old/koel_update.sh
Normal file
36
old/koel_update.sh
Normal file
@ -0,0 +1,36 @@
|
||||
#!/bin/bash
|
||||
#===================================================================
|
||||
# Filename : koel_update.sh
|
||||
# Function :
|
||||
# Usage :
|
||||
# Author : Manford Fan
|
||||
# Date : 2022-10-15 23:34
|
||||
# Version : Version 0.1
|
||||
# Disclaimer : The author is NOT responsible for any loss caused
|
||||
# by the user's own operations.
|
||||
# And More : If you find there are some bugs in this script
|
||||
# Or you have better ideas, please do contact me
|
||||
# via E-mail -- mffan0922@163.com
|
||||
#===================================================================
|
||||
|
||||
[[ ! -e /tmp/files_now ]] && touch /tmp/files_now
|
||||
[[ ! -e /tmp/files_pre_60s ]] && touch /tmp/files_pre_60s
|
||||
success_flg=1
|
||||
ls -aR /opt/media/Music | grep -E "*.(mp3|flac|opus|aac|ogg|m4a)" | sort > /tmp/files_now
|
||||
diff /tmp/files_now /tmp/files_pre_60s >> /opt/logs/koel_update.log
|
||||
if [[ $? -ne 0 ]]; then
|
||||
chown -R www-data:www-data /opt/media/Music
|
||||
for i in `seq 10`; do
|
||||
php /opt/source-code/koel/artisan koel:sync > /dev/null
|
||||
if [[ $? -eq 0 ]]; then
|
||||
php /opt/source-code/koel/artisan koel:sync >> /opt/logs/koel_update.log
|
||||
success_flg=0
|
||||
break
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
[[ success_flg -eq 1 ]] && echo "Happening @ $(date) Failed scanning the media dir, need processing that by hand." >> /opt/logs/koel_update.log
|
||||
echo -e "Happening @ $(date) Sync koel music successfully." >> /opt/logs/koel_update.log
|
||||
fi
|
||||
cp /tmp/files_now /tmp/files_pre_60s
|
||||
|
14
old/nav_jpg.sh
Normal file
14
old/nav_jpg.sh
Normal file
@ -0,0 +1,14 @@
|
||||
#!/bin/bash
|
||||
|
||||
html='/opt/websites/nav/index.html'
|
||||
jpg_max_num=`cat $html | grep -oE "[0-9]+\.jpg" | awk -F '.' '{print $1}' | sort -n | tail -n 1`
|
||||
line_num=`cat $html | grep -oE "[0-9]+\.jpg" | awk -F '.' '{print $1}' | sort -n | uniq -c | wc -l`
|
||||
jpg_all_num=`ls -al /opt/websites/nav/assets/images/logos/ | wc -l`
|
||||
if [[ $((jpg_max_num+1)) -ne $line_num ]]; then
|
||||
echo -e "\e[1;31mThere must be duplicated jpg files, plz check!\e[0m"
|
||||
return 2
|
||||
fi
|
||||
echo "Now: $jpg_max_num | MAX: $jpg_all_num | AVAILABLE: $((jpg_all_num-jpg_max_num)) | NEXT: $((jpg_max_num+1))"
|
||||
|
||||
|
||||
|
116
old/rclone/rclone_alist_automount.sh
Normal file
116
old/rclone/rclone_alist_automount.sh
Normal file
@ -0,0 +1,116 @@
|
||||
#!/bin/bash
|
||||
#===================================================================
|
||||
# Filename : rclone_alist_automount.sh
|
||||
# Function :
|
||||
# Usage :
|
||||
# Author : Manford Fan
|
||||
# Date : 2022-10-19 14:05
|
||||
# Version : Version 0.1
|
||||
# Disclaimer : The author is NOT responsible for any loss caused
|
||||
# by the user's own operations.
|
||||
# And More : If you find there are some bugs in this script
|
||||
# Or you have better ideas, please do contact me
|
||||
# via E-mail -- mffan0922@163.com
|
||||
#===================================================================
|
||||
|
||||
|
||||
function rclone_alist_reset() {
|
||||
systemctl restart alist.service
|
||||
for i in `seq 3`; do
|
||||
fusermount -uzq /opt/webdav/alist > /dev/null 2>&1
|
||||
umount /opt/webdav/alist > /dev/null 2>&1
|
||||
sleep 2
|
||||
done
|
||||
ps -ef | grep 'rclone mount Alist' | grep -v grep | awk '{print $2}' > /tmp/rclone/rclone_alist_pids
|
||||
for rclone_alist_pid in `cat /tmp/rclone/rclone_alist_pids`; do
|
||||
kill -9 $rclone_alist_pid;
|
||||
done
|
||||
nohup /usr/bin/rclone mount Alist:/ /opt/webdav/alist \
|
||||
--allow-other --vfs-cache-mode full --vfs-cache-max-size 10G --vfs-read-ahead 100M --dir-cache-time 2m --poll-interval 0 \
|
||||
--vfs-cache-max-age 4h --cache-dir /tmp/vfs-cache --bwlimit-file 20M --bwlimit 100M --no-update-modtime --contimeout 30m \
|
||||
--log-file /opt/logs/rclone/rclone_alist.log --log-level NOTICE --vfs-read-chunk-size 100M --vfs-read-chunk-size-limit 500M \
|
||||
--buffer-size 200M --attr-timeout 5m --transfers=6 --multi-thread-streams=6 --fast-list --timeout 30m \
|
||||
--allow-non-empty --no-modtime --max-duration 3h --vfs-read-wait 2s --vfs-write-wait 5s > /dev/null 2>&1 &
|
||||
}
|
||||
|
||||
alist_log='/opt/logs/rclone/rclone_alist.log'
|
||||
pid_self=$$
|
||||
# get all kinds of states for later decision
|
||||
num=`cat /proc/mounts | grep /opt/webdav/alist | wc -l`
|
||||
[[ $num -eq 0 ]] && loaded=0
|
||||
[[ $num -eq 1 ]] && loaded=1
|
||||
[[ $num -gt 1 ]] && loaded=2
|
||||
|
||||
ps -ef | grep '/usr/bin/rclone mount Alist' | grep -v grep | awk '{print $2}' > /tmp/rclone/rclone_alist_pids
|
||||
num=`cat /tmp/rclone/rclone_alist_pids | wc -l`
|
||||
[[ $num -eq 0 ]] && rclone_running=0
|
||||
[[ $num -eq 1 ]] && rclone_running=1
|
||||
[[ $num -gt 1 ]] && rclone_running=2
|
||||
|
||||
sleep 2
|
||||
ps -ef | grep '/usr/bin/bash /opt/scripts/rclone/rclone_alist_automount.sh' | grep -v 'grep' > /tmp/rclone/rclone_alist_automount_pids
|
||||
let num=`cat /tmp/rclone/rclone_alist_automount_pids | sed -e '/^$/d' | wc -l`
|
||||
[[ $num -eq 1 ]] && script_running=1
|
||||
[[ $num -gt 1 ]] && script_running=2
|
||||
|
||||
# print the states for debug
|
||||
echo `date` >> $alist_log
|
||||
echo loaded = $loaded >> $alist_log
|
||||
echo rclone_running = $rclone_running >> $alist_log
|
||||
echo script_running = $script_running >> $alist_log
|
||||
# exit 5
|
||||
|
||||
# decide if `rclone` command function normally
|
||||
if [[ $1 == '-f' ]]; then
|
||||
echo -e "Happening @ $(date) [Alist] Executing BY Hands.\n" >> $alist_log
|
||||
|
||||
if [[ $script_running -eq 1 ]]; then
|
||||
rclone_alist_reset
|
||||
elif [[ script_running -eq 2 ]]; then
|
||||
echo "Happening @ $(date) [Alist] Script rclone_alist_automount.sh -f has already been executing..." | tee -a $alist_log
|
||||
echo "Happening @ $(date) [Alist] Alist RESET will be done with -f option" | tee -a $alist_log
|
||||
for rclone_alist_automount_pid in `cat /tmp/rclone/rclone_alist_automount_pids`; do
|
||||
[[ $rclone_alist_automount_pid != $pid_self ]] && kill -9 $rclone_alist_automount_pid > /dev/null 2>&1
|
||||
done
|
||||
rclone_alist_reset
|
||||
else
|
||||
echo "Happening @ $(date) [Alist] In general, this -f case will NOT happen" >> $alist_log
|
||||
fi
|
||||
elif [[ $1 == '-c' ]]; then
|
||||
echo -e "Happening @ $(date) [Alist] Executing BY Cron Service.\n" >> $alist_log
|
||||
|
||||
if [[ $script_running -eq 1 ]]; then
|
||||
rclone_alist_reset
|
||||
elif [[ script_running -eq 2 ]]; then
|
||||
echo "Happening @ $(date) [Alist] Script rclone_alist_automount.sh -c has already been executing..." | tee -a $alist_log
|
||||
echo "Happening @ $(date) [Alist] Alist RESET will be done on CRON condition." | tee -a $alist_log
|
||||
for rclone_alist_automount_pid in `cat /tmp/rclone/rclone_alist_automount_pids`; do
|
||||
[[ $rclone_alist_automount_pid != $pid_self ]] && kill -9 $rclone_alist_automount_pid > /dev/null 2>&1
|
||||
done
|
||||
rclone_alist_reset
|
||||
else
|
||||
echo "Happening @ $(date) [Alist] In general, this -c case will NOT happen" >> $alist_log
|
||||
fi
|
||||
elif [[ $1 == '' ]]; then
|
||||
sleep 10
|
||||
if [[ script_running -eq 1 ]]; then
|
||||
if [[ $loaded -ne 1 || $rclone_running -ne 1 ]]; then
|
||||
echo "Happening @ $(date) [Alist] Executing automatically." >> $alist_log
|
||||
rclone_alist_reset
|
||||
fi
|
||||
elif [[ $script_running -eq 2 ]]; then
|
||||
echo "Happening @ $(date) [Alist] Script rclone_alist_automount.sh auto has already been executing..." | tee -a $alist_log
|
||||
echo "Happening @ $(date) [Alist] Nothing will be done at this auto-situation" | tee -a $alist_log
|
||||
# for rclone_alist_automount_pid in `cat /tmp/rclone_alist_automount_pids`; do
|
||||
# [[ $rclone_alist_automount_pid != $pid_self ]] && kill -9 $rclone_alist_automount_pid > /dev/null 2>&1
|
||||
# done
|
||||
# rclone_alist_reset
|
||||
else
|
||||
echo "Happening @ $(date) [Alist] In general, this auto case will NOT happen" >> $alist_log
|
||||
fi
|
||||
else
|
||||
echo "Happening @ $(date) [Alist] Wrong usage of script/tool, only accept -f | -c or nothing as argument." >> $alist_log
|
||||
fi
|
||||
|
||||
|
||||
|
342
old/rclone/rclone_bash_completion.sh
Normal file
342
old/rclone/rclone_bash_completion.sh
Normal file
@ -0,0 +1,342 @@
|
||||
#!/bin/bash
|
||||
#===================================================================
|
||||
# Filename : rclone_bash_completion.sh
|
||||
# Function :
|
||||
# Usage :
|
||||
# Author : Manford Fan
|
||||
# Date : 2022-10-27 10:04
|
||||
# Version : Version 0.1
|
||||
# Disclaimer : The author is NOT responsible for any loss caused
|
||||
# by the user's own operations.
|
||||
# And More : If you find there are some bugs in this script
|
||||
# Or you have better ideas, please do contact me
|
||||
# via E-mail -- mffan0922@163.com
|
||||
#===================================================================
|
||||
|
||||
|
||||
# bash completion V2 for rclone -*- shell-script -*-
|
||||
|
||||
__rclone_debug()
|
||||
{
|
||||
if [[ -n ${BASH_COMP_DEBUG_FILE:-} ]]; then
|
||||
echo "$*" >> "${BASH_COMP_DEBUG_FILE}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Macs have bash3 for which the bash-completion package doesn't include
|
||||
# _init_completion. This is a minimal version of that function.
|
||||
__rclone_init_completion()
|
||||
{
|
||||
COMPREPLY=()
|
||||
_get_comp_words_by_ref "$@" cur prev words cword
|
||||
}
|
||||
|
||||
# This function calls the rclone program to obtain the completion
|
||||
# results and the directive. It fills the 'out' and 'directive' vars.
|
||||
__rclone_get_completion_results() {
|
||||
local requestComp lastParam lastChar args
|
||||
|
||||
# Prepare the command to request completions for the program.
|
||||
# Calling ${words[0]} instead of directly rclone allows to handle aliases
|
||||
args=("${words[@]:1}")
|
||||
requestComp="${words[0]} __complete ${args[*]}"
|
||||
|
||||
lastParam=${words[$((${#words[@]}-1))]}
|
||||
lastChar=${lastParam:$((${#lastParam}-1)):1}
|
||||
__rclone_debug "lastParam ${lastParam}, lastChar ${lastChar}"
|
||||
|
||||
if [ -z "${cur}" ] && [ "${lastChar}" != "=" ]; then
|
||||
# If the last parameter is complete (there is a space following it)
|
||||
# We add an extra empty parameter so we can indicate this to the go method.
|
||||
__rclone_debug "Adding extra empty parameter"
|
||||
requestComp="${requestComp} ''"
|
||||
fi
|
||||
|
||||
# When completing a flag with an = (e.g., rclone -n=<TAB>)
|
||||
# bash focuses on the part after the =, so we need to remove
|
||||
# the flag part from $cur
|
||||
if [[ "${cur}" == -*=* ]]; then
|
||||
cur="${cur#*=}"
|
||||
fi
|
||||
|
||||
__rclone_debug "Calling ${requestComp}"
|
||||
# Use eval to handle any environment variables and such
|
||||
out=$(eval "${requestComp}" 2>/dev/null)
|
||||
|
||||
# Extract the directive integer at the very end of the output following a colon (:)
|
||||
directive=${out##*:}
|
||||
# Remove the directive
|
||||
out=${out%:*}
|
||||
if [ "${directive}" = "${out}" ]; then
|
||||
# There is not directive specified
|
||||
directive=0
|
||||
fi
|
||||
__rclone_debug "The completion directive is: ${directive}"
|
||||
__rclone_debug "The completions are: ${out}"
|
||||
}
|
||||
|
||||
__rclone_process_completion_results() {
|
||||
local shellCompDirectiveError=1
|
||||
local shellCompDirectiveNoSpace=2
|
||||
local shellCompDirectiveNoFileComp=4
|
||||
local shellCompDirectiveFilterFileExt=8
|
||||
local shellCompDirectiveFilterDirs=16
|
||||
|
||||
if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then
|
||||
# Error code. No completion.
|
||||
__rclone_debug "Received error from custom completion go code"
|
||||
return
|
||||
else
|
||||
if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then
|
||||
if [[ $(type -t compopt) = "builtin" ]]; then
|
||||
__rclone_debug "Activating no space"
|
||||
compopt -o nospace
|
||||
else
|
||||
__rclone_debug "No space directive not supported in this version of bash"
|
||||
fi
|
||||
fi
|
||||
if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then
|
||||
if [[ $(type -t compopt) = "builtin" ]]; then
|
||||
__rclone_debug "Activating no file completion"
|
||||
compopt +o default
|
||||
else
|
||||
__rclone_debug "No file completion directive not supported in this version of bash"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Separate activeHelp from normal completions
|
||||
local completions=()
|
||||
local activeHelp=()
|
||||
__rclone_extract_activeHelp
|
||||
|
||||
if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then
|
||||
# File extension filtering
|
||||
local fullFilter filter filteringCmd
|
||||
|
||||
# Do not use quotes around the $completions variable or else newline
|
||||
# characters will be kept.
|
||||
for filter in ${completions[*]}; do
|
||||
fullFilter+="$filter|"
|
||||
done
|
||||
|
||||
filteringCmd="_filedir $fullFilter"
|
||||
__rclone_debug "File filtering command: $filteringCmd"
|
||||
$filteringCmd
|
||||
elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then
|
||||
# File completion for directories only
|
||||
|
||||
# Use printf to strip any trailing newline
|
||||
local subdir
|
||||
subdir=$(printf "%s" "${completions[0]}")
|
||||
if [ -n "$subdir" ]; then
|
||||
__rclone_debug "Listing directories in $subdir"
|
||||
pushd "$subdir" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return
|
||||
else
|
||||
__rclone_debug "Listing directories in ."
|
||||
_filedir -d
|
||||
fi
|
||||
else
|
||||
__rclone_handle_completion_types
|
||||
fi
|
||||
|
||||
__rclone_handle_special_char "$cur" :
|
||||
__rclone_handle_special_char "$cur" =
|
||||
|
||||
# Print the activeHelp statements before we finish
|
||||
if [ ${#activeHelp} -ne 0 ]; then
|
||||
printf "\n";
|
||||
printf "%s\n" "${activeHelp[@]}"
|
||||
printf "\n"
|
||||
|
||||
# The prompt format is only available from bash 4.4.
|
||||
# We test if it is available before using it.
|
||||
if (x=${PS1@P}) 2> /dev/null; then
|
||||
printf "%s" "${PS1@P}${COMP_LINE[@]}"
|
||||
else
|
||||
# Can't print the prompt. Just print the
|
||||
# text the user had typed, it is workable enough.
|
||||
printf "%s" "${COMP_LINE[@]}"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Separate activeHelp lines from real completions.
|
||||
# Fills the $activeHelp and $completions arrays.
|
||||
__rclone_extract_activeHelp() {
|
||||
local activeHelpMarker="_activeHelp_ "
|
||||
local endIndex=${#activeHelpMarker}
|
||||
|
||||
while IFS='' read -r comp; do
|
||||
if [ "${comp:0:endIndex}" = "$activeHelpMarker" ]; then
|
||||
comp=${comp:endIndex}
|
||||
__rclone_debug "ActiveHelp found: $comp"
|
||||
if [ -n "$comp" ]; then
|
||||
activeHelp+=("$comp")
|
||||
fi
|
||||
else
|
||||
# Not an activeHelp line but a normal completion
|
||||
completions+=("$comp")
|
||||
fi
|
||||
done < <(printf "%s\n" "${out}")
|
||||
}
|
||||
|
||||
__rclone_handle_completion_types() {
|
||||
__rclone_debug "__rclone_handle_completion_types: COMP_TYPE is $COMP_TYPE"
|
||||
|
||||
case $COMP_TYPE in
|
||||
37|42)
|
||||
# Type: menu-complete/menu-complete-backward and insert-completions
|
||||
# If the user requested inserting one completion at a time, or all
|
||||
# completions at once on the command-line we must remove the descriptions.
|
||||
# https://github.com/spf13/cobra/issues/1508
|
||||
local tab=$'\t' comp
|
||||
while IFS='' read -r comp; do
|
||||
[[ -z $comp ]] && continue
|
||||
# Strip any description
|
||||
comp=${comp%%$tab*}
|
||||
# Only consider the completions that match
|
||||
if [[ $comp == "$cur"* ]]; then
|
||||
COMPREPLY+=("$comp")
|
||||
fi
|
||||
done < <(printf "%s\n" "${completions[@]}")
|
||||
;;
|
||||
|
||||
*)
|
||||
# Type: complete (normal completion)
|
||||
__rclone_handle_standard_completion_case
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
__rclone_handle_standard_completion_case() {
|
||||
local tab=$'\t' comp
|
||||
|
||||
# Short circuit to optimize if we don't have descriptions
|
||||
if [[ "${completions[*]}" != *$tab* ]]; then
|
||||
IFS=$'\n' read -ra COMPREPLY -d '' < <(compgen -W "${completions[*]}" -- "$cur")
|
||||
return 0
|
||||
fi
|
||||
|
||||
local longest=0
|
||||
local compline
|
||||
# Look for the longest completion so that we can format things nicely
|
||||
while IFS='' read -r compline; do
|
||||
[[ -z $compline ]] && continue
|
||||
# Strip any description before checking the length
|
||||
comp=${compline%%$tab*}
|
||||
# Only consider the completions that match
|
||||
[[ $comp == "$cur"* ]] || continue
|
||||
COMPREPLY+=("$compline")
|
||||
if ((${#comp}>longest)); then
|
||||
longest=${#comp}
|
||||
fi
|
||||
done < <(printf "%s\n" "${completions[@]}")
|
||||
|
||||
# If there is a single completion left, remove the description text
|
||||
if [ ${#COMPREPLY[*]} -eq 1 ]; then
|
||||
__rclone_debug "COMPREPLY[0]: ${COMPREPLY[0]}"
|
||||
comp="${COMPREPLY[0]%%$tab*}"
|
||||
__rclone_debug "Removed description from single completion, which is now: ${comp}"
|
||||
COMPREPLY[0]=$comp
|
||||
else # Format the descriptions
|
||||
__rclone_format_comp_descriptions $longest
|
||||
fi
|
||||
}
|
||||
|
||||
__rclone_handle_special_char()
|
||||
{
|
||||
local comp="$1"
|
||||
local char=$2
|
||||
if [[ "$comp" == *${char}* && "$COMP_WORDBREAKS" == *${char}* ]]; then
|
||||
local word=${comp%"${comp##*${char}}"}
|
||||
local idx=${#COMPREPLY[*]}
|
||||
while [[ $((--idx)) -ge 0 ]]; do
|
||||
COMPREPLY[$idx]=${COMPREPLY[$idx]#"$word"}
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
__rclone_format_comp_descriptions()
|
||||
{
|
||||
local tab=$'\t'
|
||||
local comp desc maxdesclength
|
||||
local longest=$1
|
||||
|
||||
local i ci
|
||||
for ci in ${!COMPREPLY[*]}; do
|
||||
comp=${COMPREPLY[ci]}
|
||||
# Properly format the description string which follows a tab character if there is one
|
||||
if [[ "$comp" == *$tab* ]]; then
|
||||
__rclone_debug "Original comp: $comp"
|
||||
desc=${comp#*$tab}
|
||||
comp=${comp%%$tab*}
|
||||
|
||||
# $COLUMNS stores the current shell width.
|
||||
# Remove an extra 4 because we add 2 spaces and 2 parentheses.
|
||||
maxdesclength=$(( COLUMNS - longest - 4 ))
|
||||
|
||||
# Make sure we can fit a description of at least 8 characters
|
||||
# if we are to align the descriptions.
|
||||
if [[ $maxdesclength -gt 8 ]]; then
|
||||
# Add the proper number of spaces to align the descriptions
|
||||
for ((i = ${#comp} ; i < longest ; i++)); do
|
||||
comp+=" "
|
||||
done
|
||||
else
|
||||
# Don't pad the descriptions so we can fit more text after the completion
|
||||
maxdesclength=$(( COLUMNS - ${#comp} - 4 ))
|
||||
fi
|
||||
|
||||
# If there is enough space for any description text,
|
||||
# truncate the descriptions that are too long for the shell width
|
||||
if [ $maxdesclength -gt 0 ]; then
|
||||
if [ ${#desc} -gt $maxdesclength ]; then
|
||||
desc=${desc:0:$(( maxdesclength - 1 ))}
|
||||
desc+="…"
|
||||
fi
|
||||
comp+=" ($desc)"
|
||||
fi
|
||||
COMPREPLY[ci]=$comp
|
||||
__rclone_debug "Final comp: $comp"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
__start_rclone()
|
||||
{
|
||||
local cur prev words cword split
|
||||
|
||||
COMPREPLY=()
|
||||
|
||||
# Call _init_completion from the bash-completion package
|
||||
# to prepare the arguments properly
|
||||
if declare -F _init_completion >/dev/null 2>&1; then
|
||||
_init_completion -n "=:" || return
|
||||
else
|
||||
__rclone_init_completion -n "=:" || return
|
||||
fi
|
||||
|
||||
__rclone_debug
|
||||
__rclone_debug "========= starting completion logic =========="
|
||||
__rclone_debug "cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}, cword is $cword"
|
||||
|
||||
# The user could have moved the cursor backwards on the command-line.
|
||||
# We need to trigger completion from the $cword location, so we need
|
||||
# to truncate the command-line ($words) up to the $cword location.
|
||||
words=("${words[@]:0:$cword+1}")
|
||||
__rclone_debug "Truncated words[*]: ${words[*]},"
|
||||
|
||||
local out directive
|
||||
__rclone_get_completion_results
|
||||
__rclone_process_completion_results
|
||||
}
|
||||
|
||||
if [[ $(type -t compopt) = "builtin" ]]; then
|
||||
complete -o default -F __start_rclone rclone
|
||||
else
|
||||
complete -o default -o nospace -F __start_rclone rclone
|
||||
fi
|
||||
|
||||
# ex: ts=4 sw=4 et filetype=sh
|
||||
|
119
old/rclone/rclone_cloudreve_automount.sh
Normal file
119
old/rclone/rclone_cloudreve_automount.sh
Normal file
@ -0,0 +1,119 @@
|
||||
#!/bin/bash
|
||||
#===================================================================
|
||||
# Filename : rclone_cloudreve_automount.sh
|
||||
# Function :
|
||||
# Usage :
|
||||
# Author : Manford Fan
|
||||
# Date : 2022-10-19 14:05
|
||||
# Version : Version 0.1
|
||||
# Disclaimer : The author is NOT responsible for any loss caused
|
||||
# by the user's own operations.
|
||||
# And More : If you find there are some bugs in this script
|
||||
# Or you have better ideas, please do contact me
|
||||
# via E-mail -- mffan0922@163.com
|
||||
#===================================================================
|
||||
|
||||
|
||||
function rclone_cloudreve_reset() {
|
||||
for i in `seq 3`; do
|
||||
fusermount -uzq /opt/webdav/cloudreve > /dev/null 2>&1
|
||||
umount /opt/webdav/cloudreve > /dev/null 2>&1
|
||||
sleep 2
|
||||
done
|
||||
ps -ef | grep 'rclone mount Cloudreve' | grep -v grep | awk '{print $2}' > /tmp/rclone/rclone_cloudreve_pids
|
||||
for rclone_cloudreve_pid in `cat /tmp/rclone/rclone_cloudreve_pids`; do
|
||||
kill -9 $rclone_cloudreve_pid;
|
||||
done
|
||||
nohup /usr/bin/rclone mount Cloudreve:/ /opt/webdav/cloudreve \
|
||||
--allow-other --vfs-cache-mode full --vfs-cache-max-size 10G --vfs-read-ahead 100M --dir-cache-time 2m\
|
||||
--vfs-cache-max-age 4h --cache-dir /tmp/vfs-cache --bwlimit-file 20M --bwlimit 100M --no-update-modtime \
|
||||
--log-file /opt/logs/rclone/rclone_cloudreve.log --log-level NOTICE --vfs-read-chunk-size 100M --vfs-read-chunk-size-limit 500M \
|
||||
--buffer-size 200M --attr-timeout 5m --transfers=6 --multi-thread-streams=6 --fast-list \
|
||||
--allow-non-empty --no-modtime --max-duration 3h --vfs-read-wait 2s --vfs-write-wait 5s > /dev/null 2>&1 &
|
||||
}
|
||||
|
||||
cloudreve_log='/opt/logs/rclone/rclone_cloudreve.log'
|
||||
pid_self=$$
|
||||
# get all kinds of states for later decision
|
||||
num=`cat /proc/mounts | grep /opt/webdav/cloudreve | wc -l`
|
||||
[[ $num -eq 0 ]] && loaded=0
|
||||
[[ $num -eq 1 ]] && loaded=1
|
||||
[[ $num -gt 1 ]] && loaded=2
|
||||
|
||||
ps -ef | grep '/usr/bin/rclone mount Cloudreve' | grep -v grep | awk '{print $2}' > /tmp/rclone/rclone_cloudreve_pids
|
||||
num=`cat /tmp/rclone/rclone_cloudreve_pids | wc -l`
|
||||
[[ $num -eq 0 ]] && rclone_running=0
|
||||
[[ $num -eq 1 ]] && rclone_running=1
|
||||
[[ $num -gt 1 ]] && rclone_running=2
|
||||
|
||||
sleep 2
|
||||
ps -ef | grep '/usr/bin/bash /opt/scripts/rclone/rclone_cloudreve_automount.sh' | grep -v 'grep' > /tmp/rclone/rclone_cloudreve_automount_pids
|
||||
let num=`cat /tmp/rclone/rclone_cloudreve_automount_pids | sed -e '/^$/d' | wc -l`
|
||||
[[ $num -eq 1 ]] && script_running=1
|
||||
if [[ $num -gt 1 ]]; then
|
||||
script_running=2
|
||||
echo `date` >> /tmp/rclone/rclone_cloudreve_abnormal.log
|
||||
ps -ef | grep '/usr/bin/bash /opt/scripts/rclone/rclone_cloudreve_automount.sh' | grep -v 'grep' >> /tmp/rclone/rclone_cloudreve_abnormal.log
|
||||
fi
|
||||
|
||||
# print the states for debug
|
||||
echo `date` >> $cloudreve_log
|
||||
echo loaded = $loaded >> $cloudreve_log
|
||||
echo rclone_running = $rclone_running >> $cloudreve_log
|
||||
echo script_running = $script_running >> $cloudreve_log
|
||||
# exit 5
|
||||
|
||||
# decide if `rclone` command function normally
|
||||
if [[ $1 == '-f' ]]; then
|
||||
echo -e "Happening @ $(date) [Cloudreve] Executing BY Hands.\n" >> $cloudreve_log
|
||||
|
||||
if [[ $script_running -eq 1 ]]; then
|
||||
rclone_cloudreve_reset
|
||||
elif [[ script_running -eq 2 ]]; then
|
||||
echo "Happening @ $(date) [Cloudreve] Script rclone_cloudreve_automount.sh -f has already been executing..." | tee -a $cloudreve_log
|
||||
echo "Happening @ $(date) [Cloudreve] Cloudreve RESET will be done with -f option" | tee -a $cloudreve_log
|
||||
for rclone_cloudreve_automount_pid in `cat /tmp/rclone/rclone_cloudreve_automount_pids`; do
|
||||
[[ $rclone_cloudreve_automount_pid != $pid_self ]] && kill -9 $rclone_cloudreve_automount_pid > /dev/null 2>&1
|
||||
done
|
||||
rclone_cloudreve_reset
|
||||
else
|
||||
echo "Happening @ $(date) [Cloudreve] In general, this case will NOT happen" >> $cloudreve_log
|
||||
fi
|
||||
elif [[ $1 == '-c' ]]; then
|
||||
echo -e "Happening @ $(date) [Cloudreve] Executing BY Cron Service.\n" >> $cloudreve_log
|
||||
|
||||
if [[ $script_running -eq 1 ]]; then
|
||||
rclone_cloudreve_reset
|
||||
elif [[ script_running -eq 2 ]]; then
|
||||
echo "Happening @ $(date) [Cloudreve] Script rclone_cloudreve_automount.sh -c has already been executing..." | tee -a $cloudreve_log
|
||||
echo "Happening @ $(date) [Cloudreve] Cloudreve RESET will be done on CRON condition." | tee -a $cloudreve_log
|
||||
for rclone_cloudreve_automount_pid in `cat /tmp/rclone/rclone_cloudreve_automount_pids`; do
|
||||
[[ $rclone_cloudreve_automount_pid != $pid_self ]] && kill -9 $rclone_cloudreve_automount_pid > /dev/null 2>&1
|
||||
done
|
||||
rclone_cloudreve_reset
|
||||
else
|
||||
echo "Happening @ $(date) [Cloudreve] In general, this case will NOT happen" >> $cloudreve_log
|
||||
fi
|
||||
elif [[ $1 == '' ]]; then
|
||||
sleep 10
|
||||
if [[ script_running -eq 1 ]]; then
|
||||
if [[ $loaded -ne 1 || $rclone_running -ne 1 ]]; then
|
||||
echo "Happening @ $(date) [Cloudreve] Executing automatically." >> $cloudreve_log
|
||||
rclone_cloudreve_reset
|
||||
fi
|
||||
elif [[ $script_running -eq 2 ]]; then
|
||||
echo "Happening @ $(date) [Cloudreve] Script rclone_cloudreve_automount.sh auto has already been executing..." | tee -a $cloudreve_log
|
||||
echo "Happening @ $(date) [Cloudreve] Nothing will be done at this auto-situation" | tee -a $cloudreve_log
|
||||
# for rclone_cloudreve_automount_pid in `cat /tmp/rclone_cloudreve_automount_pids`; do
|
||||
# [[ $rclone_cloudreve_automount_pid != $pid_self ]] && kill -9 $rclone_cloudreve_automount_pid > /dev/null 2>&1
|
||||
# done
|
||||
# rclone_cloudreve_reset
|
||||
else
|
||||
echo "Happening @ $(date) [Cloudreve] In general, this case will NOT happen" >> $cloudreve_log
|
||||
fi
|
||||
else
|
||||
echo "Happening @ $(date) [Cloudreve] Wrong usage of script/tool, only accept -f | -c or nothing as argument." >> $cloudreve_log
|
||||
fi
|
||||
|
||||
|
||||
|
116
old/rclone/rclone_onedrive_automount.sh
Normal file
116
old/rclone/rclone_onedrive_automount.sh
Normal file
@ -0,0 +1,116 @@
|
||||
#!/bin/bash
|
||||
#===================================================================
|
||||
# Filename : rclone_onedrive_automount.sh
|
||||
# Function :
|
||||
# Usage :
|
||||
# Author : Manford Fan
|
||||
# Date : 2022-10-19 14:05
|
||||
# Version : Version 0.1
|
||||
# Disclaimer : The author is NOT responsible for any loss caused
|
||||
# by the user's own operations.
|
||||
# And More : If you find there are some bugs in this script
|
||||
# Or you have better ideas, please do contact me
|
||||
# via E-mail -- mffan0922@163.com
|
||||
#===================================================================
|
||||
|
||||
|
||||
function rclone_onedrive_reset() {
|
||||
for i in `seq 3`; do
|
||||
fusermount -uzq /opt/webdav/onedrive > /dev/null 2>&1
|
||||
umount /opt/webdav/onedrive > /dev/null 2>&1
|
||||
sleep 2
|
||||
done
|
||||
ps -ef | grep 'rclone mount Onedrive' | grep -v grep | awk '{print $2}' > /tmp/rclone/rclone_onedrive_pids
|
||||
for rclone_onedrive_pid in `cat /tmp/rclone/rclone_onedrive_pids`; do
|
||||
kill -9 $rclone_onedrive_pid;
|
||||
done
|
||||
nohup /usr/bin/rclone mount Onedrive:/ /opt/webdav/onedrive \
|
||||
--allow-other --vfs-cache-mode full --vfs-cache-max-size 10G --vfs-read-ahead 100M --dir-cache-time 2m --poll-interval 0 \
|
||||
--vfs-cache-max-age 4h --cache-dir /tmp/vfs-cache --bwlimit-file 20M --bwlimit 100M --no-update-modtime --contimeout 30m \
|
||||
--log-file /opt/logs/rclone/rclone_onedrive.log --log-level NOTICE --vfs-read-chunk-size 100M --vfs-read-chunk-size-limit 500M \
|
||||
--buffer-size 200M --attr-timeout 5m --transfers=6 --multi-thread-streams=6 --fast-list --timeout 30m \
|
||||
--allow-non-empty --no-modtime --max-duration 3h --vfs-read-wait 2s --vfs-write-wait 5s > /dev/null 2>&1 &
|
||||
}
|
||||
|
||||
onedrive_log='/opt/logs/rclone/rclone_onedrive.log'
|
||||
pid_self=$$
|
||||
# get all kinds of states for later decision
|
||||
num=`cat /proc/mounts | grep /opt/webdav/onedrive | wc -l`
|
||||
[[ $num -eq 0 ]] && loaded=0
|
||||
[[ $num -eq 1 ]] && loaded=1
|
||||
[[ $num -gt 1 ]] && loaded=2
|
||||
|
||||
ps -ef | grep '/usr/bin/rclone mount Onedrive' | grep -v grep | awk '{print $2}' > /tmp/rclone/rclone_onedrive_pids
|
||||
num=`cat /tmp/rclone/rclone_onedrive_pids | wc -l`
|
||||
[[ $num -eq 0 ]] && rclone_running=0
|
||||
[[ $num -eq 1 ]] && rclone_running=1
|
||||
[[ $num -gt 1 ]] && rclone_running=2
|
||||
|
||||
sleep 2
|
||||
ps -ef | grep '/usr/bin/bash /opt/scripts/rclone/rclone_onedrive_automount.sh' | grep -v 'grep' > /tmp/rclone/rclone_onedrive_automount_pids
|
||||
let num=`cat /tmp/rclone/rclone_onedrive_automount_pids | sed -e '/^$/d' | wc -l`
|
||||
[[ $num -eq 1 ]] && script_running=1
|
||||
[[ $num -gt 1 ]] && script_running=2
|
||||
|
||||
# print the states for debug
|
||||
echo `date` >> $onedrive_log
|
||||
echo loaded = $loaded >> $onedrive_log
|
||||
echo rclone_running = $rclone_running >> $onedrive_log
|
||||
echo script_running = $script_running >> $onedrive_log
|
||||
# exit 5
|
||||
|
||||
# decide if `rclone` command function normally
|
||||
if [[ $1 == '-f' ]]; then
|
||||
echo -e "Happening @ $(date) [Onedrive] Executing BY Hands.\n" >> $onedrive_log
|
||||
|
||||
if [[ $script_running -eq 1 ]]; then
|
||||
rclone_onedrive_reset
|
||||
elif [[ script_running -eq 2 ]]; then
|
||||
echo "Happening @ $(date) [Onedrive] Script rclone_onedrive_automount.sh -f has already been executing..." | tee -a $onedrive_log
|
||||
echo "Happening @ $(date) [Onedrive] Onedrive RESET will be done with -f option" | tee -a $onedrive_log
|
||||
for rclone_onedrive_automount_pid in `cat /tmp/rclone/rclone_onedrive_automount_pids`; do
|
||||
[[ $rclone_onedrive_automount_pid != $pid_self ]] && kill -9 $rclone_onedrive_automount_pid > /dev/null 2>&1
|
||||
done
|
||||
rclone_onedrive_reset
|
||||
else
|
||||
echo "Happening @ $(date) [Onedrive] In general, this case will NOT happen" >> $onedrive_log
|
||||
fi
|
||||
elif [[ $1 == '-c' ]]; then
|
||||
echo -e "Happening @ $(date) [Onedrive] Executing BY Cron Service.\n" >> $onedrive_log
|
||||
|
||||
if [[ $script_running -eq 1 ]]; then
|
||||
rclone_onedrive_reset
|
||||
elif [[ script_running -eq 2 ]]; then
|
||||
echo "Happening @ $(date) [Onedrive] Script rclone_onedrive_automount.sh -c has already been executing..." | tee -a $onedrive_log
|
||||
echo "Happening @ $(date) [Onedrive] Onedrive RESET will be done on CRON condition." | tee -a $onedrive_log
|
||||
for rclone_onedrive_automount_pid in `cat /tmp/rclone/rclone_onedrive_automount_pids`; do
|
||||
[[ $rclone_onedrive_automount_pid != $pid_self ]] && kill -9 $rclone_onedrive_automount_pid > /dev/null 2>&1
|
||||
done
|
||||
rclone_onedrive_reset
|
||||
else
|
||||
echo "Happening @ $(date) [Onedrive] In general, this case will NOT happen" >> $onedrive_log
|
||||
fi
|
||||
elif [[ $1 == '' ]]; then
|
||||
sleep 10
|
||||
if [[ script_running -eq 1 ]]; then
|
||||
if [[ $loaded -ne 1 || $rclone_running -ne 1 ]]; then
|
||||
echo "Happening @ $(date) [Onedrive] Executing automatically." >> $onedrive_log
|
||||
rclone_onedrive_reset
|
||||
fi
|
||||
elif [[ $script_running -eq 2 ]]; then
|
||||
echo "Happening @ $(date) [Onedrive] script rclone_onedrive_automount.sh auto has already been executing..." | tee -a $onedrive_log
|
||||
echo "Happening @ $(date) [Onedrive] Nothing will be done at this auto-situation" | tee -a $onedrive_log
|
||||
echo "Nothing will be done at this situation" | tee -a $onedrive_log
|
||||
# for rclone_onedrive_automount_pid in `cat /tmp/rclone_onedrive_automount_pids`; do
|
||||
# [[ $rclone_onedrive_automount_pid != $pid_self ]] && kill -9 $rclone_onedrive_automount_pid > /dev/null 2>&1
|
||||
# done
|
||||
# rclone_onedrive_reset
|
||||
else
|
||||
echo "Happening @ $(date) [Onedrive] In general, this case will NOT happen" >> $onedrive_log
|
||||
fi
|
||||
else
|
||||
echo "Happening @ $(date) [Onedrive] Wrong usage of script/tool, only accept -f | -c or nothing as argument." >> $onedrive_log
|
||||
fi
|
||||
|
||||
|
||||
|
21
old/rclone/rclone_sync.sh
Normal file
21
old/rclone/rclone_sync.sh
Normal file
@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
#===================================================================
|
||||
# Filename : auto_start_self.sh
|
||||
# Function :
|
||||
# Usage :
|
||||
# Author : Manford Fan
|
||||
# Date : 2022-04-12 09:50
|
||||
# Version : Version 0.1
|
||||
# Disclaimer : The author is NOT responsible for any loss caused
|
||||
# by the user's own operations.
|
||||
# And More : If you find there are some bugs in this script
|
||||
# Or you have better ideas, please do contact me
|
||||
# via E-mail -- mffan0922@163.com
|
||||
#===================================================================
|
||||
|
||||
rclone sync -P /opt/media/Kindle/ Onedrive:/A-Book/Kindle/
|
||||
rclone sync -P /opt/media/Music/ Onedrive:/B-Media/Music/Koel/
|
||||
rclone sync -P Onedrive:/ /opt/webdav/wd/72-Backups/Onedrive/ --exclude=/E-Github/**
|
||||
|
||||
|
||||
|
330
old/restore.sh
Normal file
330
old/restore.sh
Normal file
@ -0,0 +1,330 @@
|
||||
#!/bin/bash
|
||||
|
||||
# ===========================================================================
|
||||
# This script must be executed by root privilege
|
||||
if [[ $(id -u) -ne 0 ]]; then
|
||||
echo -e "\e[1;31mThis script MUST be executed with root privilege.\e[0m\n"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# ===========================================================================
|
||||
# Double check if do run this script
|
||||
echo -e "\e[1;2;31m[VPS USE ONLY] - Are you sure you want to run this script to re-configure you system???\e[0m"
|
||||
read -p "Yes/No: " YON
|
||||
[[ $YON != 'Yes' && $YON != 'YES' && $YON != 'yes' && $YON != 'y' && $YON != 'Y' ]] && exit 2
|
||||
echo -e "\e[1;2;33m[VPS USE ONLY] - AGAIN, are you sure you want to run this script to re-configure you system???\e[0m"
|
||||
read -p "Yes/No: " YON
|
||||
[[ $YON != 'Yes' && $YON != 'YES' && $YON != 'yes' && $YON != 'y' && $YON != 'Y' ]] && exit 3
|
||||
|
||||
# ===========================================================================
|
||||
# extract backup files
|
||||
vps=`find . -name vps*.tar.xz`
|
||||
if [[ ! -f flg && $vps != '' ]]; then
|
||||
echo -e "\n\e[1;34mExtracting backups to current dir...\e[0m\n"
|
||||
tar -I pixz -xmf vps*.xz
|
||||
touch flg
|
||||
elif [[ -f flg ]]; then
|
||||
echo -e "\n\e[1;32mAlready extracted, doing nothing.\e[0m\n"
|
||||
else
|
||||
echo -e "\n\e[1;31mThere is no backup file right here, plz check.\e[0m\n"
|
||||
exit 4
|
||||
fi
|
||||
|
||||
# ===========================================================================
|
||||
# sone prerequisites in aspect of path & content
|
||||
echo -e "\n\e[1;34mPreparing initial env...\e[0m\n"
|
||||
rm -rf /opt/*
|
||||
mkdir -p /opt/logs
|
||||
mkdir -p /opt/logs/rclone
|
||||
mkdir -p /opt/temp
|
||||
mkdir -p /opt/webdav/{alist,onedrive,wd}
|
||||
mkdir -p /root/.pip
|
||||
cp -rf configs scripts source-code websites /opt/
|
||||
cp /opt/configs/pip.conf /root/.pip
|
||||
|
||||
# ===========================================================================
|
||||
# set hostame
|
||||
echo -e "\n\e[1;34mConfig hostname...\e[0m\n"
|
||||
echo -ne "\e[1;34mPlz specify hostname: \e[0m"
|
||||
read -t 600 host
|
||||
hostnamectl set-hostname $host
|
||||
name=`hostname`
|
||||
cat /etc/hosts | grep -q $name
|
||||
[[ $? -ne 0 ]] && sed -i "/^127/ s|$| $name|g" /etc/hosts
|
||||
|
||||
# ===========================================================================
|
||||
# config self-defined environment variable and function
|
||||
echo -e "\n\e[1;34mconfig self-defined environment variable and function...\e[0m\n"
|
||||
cat /root/.bashrc | grep -q 'toolbox'
|
||||
[[ $? -ne 0 ]] && echo 'source /opt/scripts/utool/toolbox.sh' >> /root/.bashrc
|
||||
source /root/.bashrc
|
||||
chmod +x /opt/scripts/utool/utool.py
|
||||
rm -rf /usr/local/bin/utool
|
||||
ln -s /opt/scripts/utool/utool.py /usr/local/bin/utool
|
||||
|
||||
# ===========================================================================
|
||||
# set apt sources
|
||||
echo -e "\n\e[1;34mConfig apt source list...\e[0m\n"
|
||||
cat > /etc/apt/sources.list << EOF
|
||||
deb https://mirrors.tuna.tsinghua.edu.cn/debian/ $(lsb_release -sc) main contrib non-free
|
||||
deb-src https://mirrors.tuna.tsinghua.edu.cn/debian/ $(lsb_release -sc) main contrib non-free
|
||||
deb https://mirrors.tuna.tsinghua.edu.cn/debian/ $(lsb_release -sc)-updates main contrib non-free
|
||||
deb-src https://mirrors.tuna.tsinghua.edu.cn/debian/ $(lsb_release -sc)-updates main contrib non-free
|
||||
deb https://mirrors.tuna.tsinghua.edu.cn/debian/ $(lsb_release -sc)-backports main contrib non-free
|
||||
deb-src https://mirrors.tuna.tsinghua.edu.cn/debian/ $(lsb_release -sc)-backports main contrib non-free
|
||||
deb https://mirrors.tuna.tsinghua.edu.cn/debian-security $(lsb_release -sc)-security main contrib non-free
|
||||
deb-src https://mirrors.tuna.tsinghua.edu.cn/debian-security $(lsb_release -sc)-security main contrib non-free
|
||||
EOF
|
||||
echo -e "\n\e[1;34mUpdating system...\e[0m\n"
|
||||
apt update && apt upgrade -y
|
||||
|
||||
# ===========================================================================
|
||||
# install some frequently used software
|
||||
echo -e "\n\e[1;34mInstalling some tools...\e[0m\n"
|
||||
apt install lrzsz unzip vim gcc g++ make automake curl wget gnupg2 aria2 jq apt-transport-https \
|
||||
ca-certificates lsb-release debian-archive-keyring oathtool ufw ruby ruby-dev qbittorrent-nox\
|
||||
git shc tmux htop pwgen imagemagick bash-completion dnsutils ghostscript nethogs ffmpeg iftop \
|
||||
python3-pip python3-dev golang net-tools ethtool tcpflow lshw rsync parallel rclone pigz pbzip2 \
|
||||
pixz neofetch mlocate ncdu dstat fzf tldr nscd inotify-hookable inotify-tools vsftpd mtr bridge-utils -y
|
||||
|
||||
# ===========================================================================
|
||||
# update pip3 setuptools and install jupyter lab
|
||||
echo -e "\n\e[1;34mupdate pip3 setuptools and install jupyter lab...\e[0m\n"
|
||||
pip3 install --upgrade setuptools -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
pip3 install --upgrade pip -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
pip3 install ipython -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
pip3 install jupyterlab -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
# cp /root/.jupyter/jupyter_lab_config.py /root/.jupyter/jupyter_lab_config_origin.py
|
||||
# cp /opt/configs/jupyter/jupyter_lab_config.py /root/.jupyter/
|
||||
# nohup jupyter lab --allow-root > /dev/null 2>&1 &
|
||||
|
||||
# ===========================================================================
|
||||
# configure vim
|
||||
echo -e "\n\e[1;34mConfig vim editor...\e[0m\n"
|
||||
cd /opt/configs/tools/
|
||||
[[ -d vim ]] && rm -rf vim
|
||||
unzip -q vimConfig.zip
|
||||
cd vim && bash install.sh
|
||||
cd .. && rm -rf vim
|
||||
|
||||
# ===========================================================================
|
||||
# config ssh git ufw and aria2
|
||||
echo -e "\n\e[1;34mConfig publickey ssh && git && ufw && aria2...\e[0m\n"
|
||||
cd /opt/configs/rsa/
|
||||
cp -f VPS* Github* config /root/.ssh/
|
||||
cat VPS.pub > /root/.ssh/authorized_keys
|
||||
echo '' >> /root/.ssh/authorized_keys
|
||||
chmod 600 /root/.ssh/*
|
||||
|
||||
git config --global user.name 'mffan0922'
|
||||
git config --global user.email 'mffan0922@163.com'
|
||||
|
||||
# ufw allow 22
|
||||
# ufw allow 80
|
||||
# ufw allow 443
|
||||
ufw disable
|
||||
|
||||
cp -rf /opt/configs/aria2/ /etc/
|
||||
> /etc/aria2/aria2.session
|
||||
|
||||
# ===========================================================================
|
||||
# install nginx
|
||||
echo -e "\n\e[1;34mInstalling nginx...\e[0m\n"
|
||||
apt install libpcre3 libpcre3-dev openssl libssl-dev zlib1g-dev libgeoip-dev -y
|
||||
cd /opt/source-code/nginx-1.22.0/
|
||||
./configure --prefix=/usr/local/nginx \
|
||||
--with-select_module \
|
||||
--with-poll_module \
|
||||
--with-threads \
|
||||
--with-file-aio \
|
||||
--with-http_ssl_module \
|
||||
--with-http_v2_module \
|
||||
--with-http_realip_module \
|
||||
--with-http_addition_module \
|
||||
--with-http_geoip_module \
|
||||
--with-http_sub_module \
|
||||
--with-http_dav_module \
|
||||
--with-http_flv_module \
|
||||
--with-http_mp4_module \
|
||||
--with-http_gunzip_module \
|
||||
--with-http_gzip_static_module \
|
||||
--with-http_auth_request_module \
|
||||
--with-http_random_index_module \
|
||||
--with-http_secure_link_module \
|
||||
--with-http_degradation_module \
|
||||
--with-http_slice_module \
|
||||
--with-http_stub_status_module \
|
||||
--with-mail \
|
||||
--with-mail_ssl_module \
|
||||
--with-stream \
|
||||
--with-stream_ssl_module \
|
||||
--with-stream_realip_module \
|
||||
--with-stream_geoip_module \
|
||||
--with-stream_ssl_preread_module \
|
||||
--user=www-data \
|
||||
--group=www-data \
|
||||
--add-module=/opt/source-code/nginx-1.22.0/modules/headers-more-nginx-module
|
||||
make -j 4 && make install
|
||||
[[ -f /usr/sbin/nginx ]] && rm -rf /usr/sbin/nginx
|
||||
ln -s /usr/local/nginx/sbin/nginx /usr/sbin/nginx
|
||||
cp -rf /opt/configs/nginx/nginx.conf /usr/local/nginx/conf/
|
||||
cp -rf /opt/configs/nginx/nginx.service /lib/systemd/system/
|
||||
systemctl enable nginx.service
|
||||
systemctl start nginx.service
|
||||
|
||||
# ===========================================================================
|
||||
# get https certificates
|
||||
echo -e "\n\e[1;34mSetting https...\e[0m\n"
|
||||
cd /root/ && git clone git@github.com:acmesh-official/acme.sh.git
|
||||
cd acme.sh && ./acme.sh --install -m mffan0922@163.com
|
||||
alias acme.sh=~/.acme.sh/acme.sh
|
||||
./acme.sh --issue --dns dns_ali -d rustle.cc -d *.rustle.cc
|
||||
cp /root/.acme.sh/rustle.cc/fullchain.cer /opt/configs/certs/rustle.cc.cer
|
||||
cp /root/.acme.sh/rustle.cc/rustle.cc.key /opt/configs/certs/
|
||||
|
||||
# ===========================================================================
|
||||
# install jekyll
|
||||
echo -e "\n\e[1;34mInstall jekyll blog env...\e[0m\n"
|
||||
gem install jekyll jekyll-paginate
|
||||
|
||||
# ===========================================================================
|
||||
echo -e "\n\e[1;34mInstalling mysql server...\e[0m\n"
|
||||
cd /opt/configs/mysql/ && dpkg -i mysql-apt-config_0.8.23-1_all.deb
|
||||
apt update && apt upgrade -y
|
||||
apt install mysql-server -y
|
||||
# cp -f /opt/configs/mysql/mysql.cnf /etc/mysql/conf.d/
|
||||
systemctl restart mysql.service
|
||||
|
||||
# ===========================================================================
|
||||
# install php8.0 for nextcloud
|
||||
echo -e "\n\e[1;34mInstall php8.0...\e[0m\n"
|
||||
wget -O /usr/share/keyrings/php-archive-keyring.gpg https://packages.sury.org/php/apt.gpg
|
||||
echo "deb [signed-by=/usr/share/keyrings/php-archive-keyring.gpg] https://packages.sury.org/php/ $(lsb_release -sc) main" > /etc/apt/sources.list.d/php.list
|
||||
apt update && apt upgrade -y
|
||||
apt install php8.0-fpm php8.0-cli php8.0-mysql php8.0-curl php8.0-gd \
|
||||
php8.0-mbstring php8.0-xml php8.0-zip php8.0-imap php8.0-opcache \
|
||||
php8.0-soap php8.0-gmp php8.0-bcmath php8.0-intl php8.0-imagick -y
|
||||
|
||||
# ===========================================================================
|
||||
# configure nextcloud
|
||||
echo -e "\n\e[1;34mRestore nextcloud env...\e[0m\n"
|
||||
apt install php8.0-memcache* memcached php8.0-apcu libmagickcore-6.q16-6-extra -y
|
||||
|
||||
echo -e "\n\e[1;34mbackup origin php data and restore previous php data...\e[0m\n"
|
||||
cp -rf /etc/php/ /tmp/
|
||||
cp -rf /opt/configs/php/8.0/fpm/pool.d/www.conf /etc/php/8.0/fpm/pool.d/www.conf
|
||||
cp -rf /opt/configs/php/8.0/mods-available/apcu.ini /etc/php/8.0/mods-available/apcu.ini
|
||||
cp -rf /opt/configs/php/8.0/cli/php.ini /etc/php/8.0/cli/php.ini
|
||||
cp -rf /opt/configs/php/8.0/fpm/php.ini /etc/php/8.0/fpm/php.ini
|
||||
|
||||
# ===========================================================================
|
||||
# restore mysql data
|
||||
echo -e "\n\e[1;34mrestore mysql data...\e[0m\n"
|
||||
cp /opt/configs/mysql/*.gz /root
|
||||
cd /root && gzip -d sql-*.gz
|
||||
mysql -uroot < sql-*.sql
|
||||
rm sql*
|
||||
|
||||
# ===========================================================================
|
||||
# configure frpc
|
||||
echo -e "\n\e[1;34mRestore frpc env...\e[0m\n"
|
||||
cp /opt/source-code/frpc/frpc.service /lib/systemd/system/
|
||||
cp /opt/source-code/frpc/frpc-free.service /lib/systemd/system/
|
||||
systemctl enable frpc.service
|
||||
systemctl enable frpc-free.service
|
||||
systemctl start frpc.service
|
||||
systemctl start frpc-free.service
|
||||
|
||||
# ===========================================================================
|
||||
# configure cloudreve
|
||||
echo -e "\n\e[1;34mRestore cloudreve env...\e[0m\n"
|
||||
cp /opt/source-code/cloudreve/cloudreve.service /lib/systemd/system/
|
||||
systemctl enable cloudreve.service
|
||||
systemctl start cloudreve.service
|
||||
|
||||
# ===========================================================================
|
||||
# configure navidrome
|
||||
echo -e "\n\e[1;34mRestore navidrome env...\e[0m\n"
|
||||
cp /opt/source-code/navidrome/navidrome.service /lib/systemd/system/
|
||||
systemctl enable navidrome.service
|
||||
systemctl start navidrome.service
|
||||
|
||||
# ===========================================================================
|
||||
# configure calibre
|
||||
echo -e "\n\e[1;34mStarting calibre...\e[0m\n"
|
||||
nohup /usr/bin/python3 /opt/source-code/calibre-web/cps.py > /dev/null 2>&1 &
|
||||
|
||||
# ===========================================================================
|
||||
# configure blog
|
||||
echo -e "\n\e[1;34mStarting blog...\e[0m\n"
|
||||
nohup /usr/bin/ruby2.7 /usr/local/bin/jekyll b -s /opt/source-code/blog/ -d /opt/websites/blog/ --trace --watch --incremental > /dev/null 2>&1 &
|
||||
|
||||
# ===========================================================================
|
||||
# configure alist
|
||||
echo -e "\n\e[1;34mConfig alist...\e[0m\n"
|
||||
cp /opt/source-code/alist/alist.service /lib/systemd/system/
|
||||
systemctl enable alist.service
|
||||
systemctl start alist.service
|
||||
|
||||
# ===========================================================================
|
||||
# configure rclone
|
||||
echo -e "\n\e[1;34mConfig rclone...\e[0m\n"
|
||||
cp -rf /opt/configs/rclone /root/.config/
|
||||
|
||||
# ===========================================================================
|
||||
# install php-8.1 & nodejs 16x
|
||||
echo -e "\n\e[1;34mInstall php-8.1 & nodejs 16x for monica...\e[0m\n"
|
||||
apt install -y php8.1 php8.1-bcmath php8.1-curl php8.1-gd php8.1-gmp php8.1-intl \
|
||||
php8.1-mbstring php8.1-mysql php8.1-redis php8.1-xml php8.1-zip
|
||||
curl -sSL https://getcomposer.org/installer | php -- --install-dir=/usr/local/bin/ --filename=composer
|
||||
curl -fsSL https://deb.nodesource.com/setup_16.x | bash -
|
||||
apt-get install -y nodejs
|
||||
npm install --global yarn
|
||||
cd /opt/source-code/monica/
|
||||
composer install --no-interaction --no-dev
|
||||
yarn install
|
||||
yarn run production
|
||||
php artisan key:generate
|
||||
php artisan setup:production -v
|
||||
|
||||
# ===========================================================================
|
||||
echo -e "\n\e[1;34mRemove no longer required packages...\e[0m\n"
|
||||
apt autoremove -y
|
||||
|
||||
# ===========================================================================
|
||||
echo -e "\n\e[1;34mRestart nginx mariadb php and cloudreve services...\e[0m\n"
|
||||
systemctl restart nginx.service
|
||||
systemctl restart mysql.service
|
||||
systemctl restart cloudreve.service
|
||||
|
||||
# ===========================================================================
|
||||
echo -e "\n\e[1;34mimprove nextcloud performance...\e[0m\n"
|
||||
chown -R www-data:www-data /opt/websites/
|
||||
# cd /opt/websites/nextcloud/
|
||||
# sudo -u www-data php8.0 occ config:app:set files max_chunk_size --value 0
|
||||
# sudo -u www-data php8.0 occ files:scan --all
|
||||
|
||||
# ===========================================================================
|
||||
echo -e "\n\e[1;34mConfig crontabs and set correct timezone...\e[0m\n"
|
||||
cp -f /opt/configs/crontabs/* /var/spool/cron/crontabs/
|
||||
timedatectl set-timezone Asia/Shanghai
|
||||
|
||||
# ===========================================================================
|
||||
echo -e "\n\e[1;31m基本环境已经安装完成,还需要手动配置如下:\e[0m\n"
|
||||
echo " 1. 查看Homepage/Wiki/Nav站点是否可以正常访问"
|
||||
echo " 2. 查看blog生成日志是否正常"
|
||||
echo " 3. 访问nextcloud/cloudreve站点,是否可以正常登陆,并手动优化"
|
||||
echo " 4. 手动配置Jupyter Lab"
|
||||
echo " 5. 检查Navidrome是否能正常播放音乐"
|
||||
echo " 6. 手动运行一次qbittorrent-nox,并配置相关选项"
|
||||
echo " 7. 需要手动配置koel,并运行"
|
||||
echo " 8. 检查frp的运行状态"
|
||||
echo " 9. 直接访问Calibre Web,看是否可以正常访问"
|
||||
echo " 10. 需要手动配置monica,并运行"
|
||||
echo " 11. 手动安装jellyfin,因为可能安装包无法下载,需要去腾讯云主机下载传过来再安装"
|
||||
echo " 12. 访问Alist主页,看是否可以正常访问"
|
||||
echo " 13. 访问uptime status,看是否可以正常访问"
|
||||
echo " 14. 手动安装bashit"
|
||||
echo " 15. 重启系统"
|
||||
|
||||
|
7
old/sql_backup.sh
Normal file
7
old/sql_backup.sh
Normal file
@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
rm -rf /opt/configs/mysql/sql*.gz
|
||||
filename='sql-'`date +%Y%m%d%H%M`'.sql.gz'
|
||||
mysql -e "show databases;" -uroot | grep -Ev "Database|information_schema|performance_schema" | xargs mysqldump --skip-lock-tables -uroot --databases | gzip > $filename
|
||||
mv sql-*.gz /opt/configs/mysql/
|
||||
|
66
old/v2ray.sh
Normal file
66
old/v2ray.sh
Normal file
@ -0,0 +1,66 @@
|
||||
#!/bin/bash
|
||||
|
||||
env_file='/opt/source-code/v2ray-4.34.0/envfile'
|
||||
|
||||
if [[ $1 == 'start' ]]; then
|
||||
cat $env_file | grep -q 'https_proxy'
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo 'export http_proxy="http://127.0.0.1:10808"' >> $env_file
|
||||
echo 'export https_proxy="http://127.0.0.1:10809"' >> $env_file
|
||||
echo 'export all_proxy="socks://127.0.0.1:10809"' >> $env_file
|
||||
source $env_file
|
||||
else
|
||||
echo -e "\e[1;3;31mv2ray has \e[1;3;32mALREADY\e[1;3;31m been started, do nothing...\e[0m"
|
||||
exit 11
|
||||
fi
|
||||
/opt/source-code/v2ray-4.34.0/v2ray -config /opt/source-code/v2ray-4.34.0/config.json > /dev/null 2>&1 &
|
||||
echo -e "\e[1;33mNow you can surfing around~\e[0m"
|
||||
elif [[ $1 == 'stop' ]]; then
|
||||
v2ray_pid=`ps -ef | grep '/opt/source-code/v2ray-4.34.0/v2ray' | grep -v grep | awk '{print $2}'`
|
||||
> $env_file
|
||||
source $env_file
|
||||
if [[ $v2ray_pid != '' ]]; then
|
||||
for vpid in $v2ray_pid; do
|
||||
kill -9 $vpid > /dev/null 2>&1
|
||||
done
|
||||
echo -e "\e[1;33mWelcome to the real world~\e[0m"
|
||||
else
|
||||
echo -e "\e[1;3;31mv2ray has \e[1;3;32mALREADY\e[1;3;31m been stopped, do nothing...\e[0m"
|
||||
fi
|
||||
elif [[ $1 == 'renew' ]]; then
|
||||
read -t 60 -p "Please input valid oversea IP: " ip
|
||||
sed -i '69s/.*/ "address": "'$ip'",/' /opt/source-code/v2ray-4.34.0/config.json > /dev/null 2>&1 &
|
||||
sed -i '/azure/{n;s/.*/ Hostname '$ip'/g}' /root/.ssh/config
|
||||
elif [[ $1 == 'status' ]]; then
|
||||
cat $env_file | grep -q 'https_proxy'
|
||||
is_empty=$?
|
||||
v2ray_pid=`ps -ef | grep '/opt/source-code/v2ray-4.34.0/v2ray' | grep -v grep | awk '{print $2}'`
|
||||
if [[ $v2ray_pid == '' && $is_empty -ne 0 ]]; then
|
||||
echo -e "\e[1;36mService is NOT running~\e[0m"
|
||||
elif [[ $v2ray_pid == '' && $is_empty -eq 0 ]]; then
|
||||
echo -e "\e[1;35mService is NOT running, BUT need check /opt/source-code/v2ray-4.34.0/envfile content, should be EMPTY\e[0m"
|
||||
elif [[ $v2ray_pid != '' && $is_empty -eq 0 ]]; then
|
||||
echo -e "\e[1;32mService is running~\e[0m"
|
||||
elif [[ $v2ray_pid != '' && $is_empty -ne 0 ]]; then
|
||||
echo -e "\e[1;35mService is running, BUT need check /opt/source-code/v2ray-4.34.0/envfile content, should NOT be empty~\e[0m"
|
||||
fi
|
||||
elif [[ $1 == 'restart' ]]; then
|
||||
> $env_file
|
||||
echo 'export http_proxy="http://127.0.0.1:10808"' >> $env_file
|
||||
echo 'export https_proxy="http://127.0.0.1:10809"' >> $env_file
|
||||
echo 'export all_proxy="socks://127.0.0.1:10809"' >> $env_file
|
||||
v2ray_pid=`ps -ef | grep '/opt/source-code/v2ray-4.34.0/v2ray' | grep -v grep | awk '{print $2}'`
|
||||
if [[ $v2ray_pid == '' ]]; then
|
||||
/opt/source-code/v2ray-4.34.0/v2ray -config /opt/source-code/v2ray-4.34.0/config.json > /dev/null 2>&1 &
|
||||
else
|
||||
:
|
||||
fi
|
||||
source $env_file
|
||||
echo -e "\e[1;35mService restarted, dive deeper~\e[0m"
|
||||
else
|
||||
echo -e "\e[1;3;31mOnly accept start|stop|renew as parameter.\e[0m"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
|
1
roll_api/calendar_tips.py
Symbolic link
1
roll_api/calendar_tips.py
Symbolic link
@ -0,0 +1 @@
|
||||
/opt/scripts/alert/calendar_tips.py
|
45
roll_api/domain_reg_check.py
Normal file
45
roll_api/domain_reg_check.py
Normal file
@ -0,0 +1,45 @@
|
||||
import base64
|
||||
import sys
|
||||
import time
|
||||
import requests
|
||||
import json
|
||||
|
||||
|
||||
def reg_check(app_id, app_secret, domain):
|
||||
api_url = f'https://www.mxnzp.com/api/beian/search?domain={domain}&app_id={app_id}&app_secret={app_secret}'
|
||||
res = requests.get(api_url)
|
||||
|
||||
res_http_code = res.status_code
|
||||
res_text = json.loads(res.text)
|
||||
res_code = res_text['code']
|
||||
res_msg = res_text['msg']
|
||||
if res_http_code != 200 or res_code == 0:
|
||||
print(res_msg)
|
||||
exit(3)
|
||||
else:
|
||||
# print(res_text)
|
||||
print("*" * 50)
|
||||
print(f"域 名:{res_text['data']['domain']}")
|
||||
print(f"单 位:{res_text['data']['unit']}")
|
||||
print(f"类 型:{res_text['data']['type']}")
|
||||
print(f"备案号:{res_text['data']['icpCode']}")
|
||||
print(f"名 称:{res_text['data']['name']}")
|
||||
print(f"审核时间:{res_text['data']['passTime']}\n")
|
||||
|
||||
|
||||
def main():
|
||||
app_id = "nrsngdkvknqkrwko"
|
||||
app_secret = "SFFmQWo2dnNBRjdNYkVSclZxa2ZvUT09"
|
||||
if len(sys.argv) == 1:
|
||||
print("Must specify one or more domains to check registration.")
|
||||
exit(2)
|
||||
|
||||
for domain in sys.argv[1:]:
|
||||
domain = str(base64.b64encode(domain.encode('utf-8')), encoding='utf-8')
|
||||
reg_check(app_id, app_secret, domain)
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
45
roll_api/get_ip.py
Normal file
45
roll_api/get_ip.py
Normal file
@ -0,0 +1,45 @@
|
||||
import sys
|
||||
import json
|
||||
import requests
|
||||
import time
|
||||
|
||||
|
||||
def ip_check(app_id, app_secret, ip):
|
||||
api_url = f"https://www.mxnzp.com/api/ip/aim_ip?ip={ip}&app_id={app_id}&app_secret={app_secret}"
|
||||
res = requests.get(api_url)
|
||||
res_http_code = res.status_code
|
||||
res_text = json.loads(res.text)
|
||||
res_code = res_text['code']
|
||||
res_msg = res_text['msg']
|
||||
if res_http_code != 200 or res_code == 0:
|
||||
print(f"接口查询失败:{res_msg}")
|
||||
else:
|
||||
print('*' * 50)
|
||||
for key, value in res_text['data'].items():
|
||||
print(key.strip().ljust(10), ':', end=' ')
|
||||
print(str(value).strip())
|
||||
print()
|
||||
|
||||
|
||||
def main():
|
||||
app_id = "nrsngdkvknqkrwko"
|
||||
app_secret = "SFFmQWo2dnNBRjdNYkVSclZxa2ZvUT09"
|
||||
|
||||
if len(sys.argv) == 1:
|
||||
print("Must specify at least one IP for information check.")
|
||||
exit(2)
|
||||
|
||||
# try:
|
||||
# ip = sys.argv[1]
|
||||
# except Exception as Err:
|
||||
# print(f"Error Desc: {Err}. Maybe you need to supply correct ip next time.")
|
||||
# exit(2)
|
||||
|
||||
for ip in sys.argv[1:]:
|
||||
ip_check(app_id, app_secret, ip)
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
28
roll_api/get_self_ip.py
Normal file
28
roll_api/get_self_ip.py
Normal file
@ -0,0 +1,28 @@
|
||||
import requests
|
||||
import json
|
||||
|
||||
|
||||
def ip_self(app_id, app_secret):
|
||||
api_url = f'https://www.mxnzp.com/api/ip/self?app_id={app_id}&app_secret={app_secret}'
|
||||
res = requests.get(api_url)
|
||||
res_http_code = res.status_code
|
||||
res_text = json.loads(res.text)
|
||||
res_code = res_text['code']
|
||||
res_msg = res_text['msg']
|
||||
if res_http_code != 200 or res_code == 0:
|
||||
print(f"接口查询失败:{res_msg}\n")
|
||||
else:
|
||||
for key, value in res_text['data'].items():
|
||||
print(key.strip().ljust(10), ':', end=' ')
|
||||
print(str(value).strip())
|
||||
|
||||
|
||||
def main():
|
||||
app_id = "nrsngdkvknqkrwko"
|
||||
app_secret = "SFFmQWo2dnNBRjdNYkVSclZxa2ZvUT09"
|
||||
ip_self(app_id, app_secret)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
1
roll_api/love_words.py
Symbolic link
1
roll_api/love_words.py
Symbolic link
@ -0,0 +1 @@
|
||||
/opt/scripts/alert/love_words.py
|
1
roll_api/weather_tips.py
Symbolic link
1
roll_api/weather_tips.py
Symbolic link
@ -0,0 +1 @@
|
||||
/opt/scripts/alert/weather_tips.py
|
154
todo/todo.cfg
Normal file
154
todo/todo.cfg
Normal file
@ -0,0 +1,154 @@
|
||||
# === EDIT FILE LOCATIONS BELOW ===
|
||||
|
||||
# Your todo.txt directory (this should be an absolute path)
|
||||
export TODO_DIR="/opt/logs/TODO"
|
||||
# export TODO_DIR=$(dirname "$0")
|
||||
|
||||
# Your todo/done/report.txt locations
|
||||
export TODO_FILE="$TODO_DIR/todo.txt"
|
||||
export DONE_FILE="$TODO_DIR/done.txt"
|
||||
export REPORT_FILE="$TODO_DIR/report.txt"
|
||||
|
||||
# You can customize your actions directory location
|
||||
#export TODO_ACTIONS_DIR="$HOME/.todo.actions.d"
|
||||
|
||||
# == EDIT FILE LOCATIONS ABOVE ===
|
||||
|
||||
# === COLOR MAP ===
|
||||
|
||||
## Text coloring and formatting is done by inserting ANSI escape codes.
|
||||
## If you have re-mapped your color codes, or use the todo.txt
|
||||
## output in another output system (like Conky), you may need to
|
||||
## over-ride by uncommenting and editing these defaults.
|
||||
## If you change any of these here, you also need to uncomment
|
||||
## the defaults in the COLORS section below. Otherwise, todo.txt
|
||||
## will still use the defaults!
|
||||
|
||||
#======> normal <========
|
||||
export BLACK='\\033[0;30m'
|
||||
export RED='\\033[0;31m'
|
||||
export GREEN='\\033[0;32m'
|
||||
export YELLOW='\\033[0;33m'
|
||||
export BLUE='\\033[0;34m'
|
||||
export PURPLE='\\033[0;35m'
|
||||
export CYAN='\\033[0;36m'
|
||||
export GREY='\\033[0;37m'
|
||||
#======> bold <========
|
||||
export BOLD_BLACK='\\033[1;30m'
|
||||
export BOLD_RED='\\033[1;31m'
|
||||
export BOLD_GREEN='\\033[1;32m'
|
||||
export BOLD_YELLOW='\\033[1;33m'
|
||||
export BOLD_BLUE='\\033[1;34m'
|
||||
export BOLD_PURPLE='\\033[1;35m'
|
||||
export BOLD_CYAN='\\033[1;36m'
|
||||
export BOLD_GREY='\\033[1;37m'
|
||||
#======> italic <========
|
||||
export ITALIC_BLACK='\\033[3;30m'
|
||||
export ITALIC_RED='\\033[3;31m'
|
||||
export ITALIC_GREEN='\\033[3;32m'
|
||||
export ITALIC_YELLOW='\\033[3;33m'
|
||||
export ITALIC_BLUE='\\033[3;34m'
|
||||
export ITALIC_PURPLE='\\033[3;35m'
|
||||
export ITALIC_CYAN='\\033[3;36m'
|
||||
export ITALIC_GREY='\\033[3;37m'
|
||||
#======> underline <========
|
||||
export UNDERLINE_BLACK='\\033[4;30m'
|
||||
export UNDERLINE_RED='\\033[3;41m'
|
||||
export UNDERLINE_GREEN='\\033[4;32m'
|
||||
export UNDERLINE_YELLOW='\\033[4;33m'
|
||||
export UNDERLINE_BLUE='\\033[4;34m'
|
||||
export UNDERLINE_PURPLE='\\033[4;35m'
|
||||
export UNDERLINE_CYAN='\\033[4;36m'
|
||||
export UNDERLINE_GREY='\\033[4;37m'
|
||||
#======> bold & italic <========
|
||||
export BOLD_ITALIC_BLACK='\\033[1;3;30m'
|
||||
export BOLD_ITALIC_RED='\\033[1;3;31m'
|
||||
export BOLD_ITALIC_GREEN='\\033[1;3;32m'
|
||||
export BOLD_ITALIC_YELLOW='\\033[1;3;33m'
|
||||
export BOLD_ITALIC_BLUE='\\033[1;3;34m'
|
||||
export BOLD_ITALIC_PURPLE='\\033[1;3;35m'
|
||||
export BOLD_ITALIC_CYAN='\\033[1;3;36m'
|
||||
export BOLD_ITALIC_GREY='\\033[1;3;37m'
|
||||
#======> bold & underline <========
|
||||
export BOLD_UNDERLINE_BLACK='\\033[1;4;30m'
|
||||
export BOLD_UNDERLINE_RED='\\033[1;4;31m'
|
||||
export BOLD_UNDERLINE_GREEN='\\033[1;4;32m'
|
||||
export BOLD_UNDERLINE_YELLOW='\\033[1;4;33m'
|
||||
export BOLD_UNDERLINE_BLUE='\\033[1;4;34m'
|
||||
export BOLD_UNDERLINE_PURPLE='\\033[1;4;35m'
|
||||
export BOLD_UNDERLINE_CYAN='\\033[1;4;36m'
|
||||
export BOLD_UNDERLINE_GREY='\\033[1;4;37m'
|
||||
#======> italic & underline <========
|
||||
export ITALIC_UNDERLINE_BLACK='\\033[3;4;30m'
|
||||
export ITALIC_UNDERLINE_RED='\\033[3;4;31m'
|
||||
export ITALIC_UNDERLINE_GREEN='\\033[3;4;32m'
|
||||
export ITALIC_UNDERLINE_YELLOW='\\033[3;4;33m'
|
||||
export ITALIC_UNDERLINE_BLUE='\\033[3;4;34m'
|
||||
export ITALIC_UNDERLINE_PURPLE='\\033[3;4;35m'
|
||||
export ITALIC_UNDERLINE_CYAN='\\033[3;4;36m'
|
||||
export ITALIC_UNDERLINE_GREY='\\033[3;4;37m'
|
||||
#======> bold & italic & underline <========
|
||||
export BOLD_ITALIC_UNDERLINE_BLACK='\\033[1;3;4;30m'
|
||||
export BOLD_ITALIC_UNDERLINE_RED='\\033[1;3;4;31m'
|
||||
export BOLD_ITALIC_UNDERLINE_GREEN='\\033[1;3;4;32m'
|
||||
export BOLD_ITALIC_UNDERLINE_YELLOW='\\033[1;3;4;33m'
|
||||
export BOLD_ITALIC_UNDERLINE_BLUE='\\033[1;3;4;34m'
|
||||
export BOLD_ITALIC_UNDERLINE_PURPLE='\\033[1;3;4;35m'
|
||||
export BOLD_ITALIC_UNDERLINE_CYAN='\\033[1;3;4;36m'
|
||||
export BOLD_ITALIC_UNDERLINE_GREY='\\033[1;3;4;37m'
|
||||
#======> REAL PRI COLOR <========
|
||||
PRI_A_COLOR='\\033[1;4;31;47m'
|
||||
PRI_B_COLOR='\\033[1;4;31;42m'
|
||||
PRI_C_COLOR='\\033[1;4;31;43m'
|
||||
PRI_D_COLOR='\\033[1;4;31;44m'
|
||||
PRI_E_COLOR='\\033[1;4;31;46m'
|
||||
#======> restore normal <========
|
||||
export DEFAULT='\\033[0m'
|
||||
|
||||
# === COLORS ===
|
||||
|
||||
## Uncomment and edit to override these defaults.
|
||||
## Reference the constants from the color map above,
|
||||
## or use $NONE to disable highlighting.
|
||||
#
|
||||
# Priorities can be any upper-case letter.
|
||||
# A,B,C are highlighted; you can add coloring for more.
|
||||
|
||||
export PRI_A=$PRI_A_COLOR # color for A priority
|
||||
export PRI_B=$PRI_B_COLOR # color for B priority
|
||||
export PRI_C=$PRI_C_COLOR # color for C priority
|
||||
export PRI_D=$PRI_D_COLOR # color for D priority
|
||||
export PRI_E=$PRI_E_COLOR # color for E priority
|
||||
|
||||
# There is highlighting for tasks that have been done,
|
||||
# but haven't been archived yet.
|
||||
#
|
||||
export COLOR_DONE=$BOLD_ITALIC_UNDERLINE_GREY
|
||||
|
||||
# There is highlighting for projects, contexts, dates, and item numbers.
|
||||
|
||||
export COLOR_PROJECT=$ITALIC_UNDERLINE_PURPLE
|
||||
export COLOR_CONTEXT=$ITALIC_UNDERLINE_GREEN
|
||||
export COLOR_DATE=$BOLD_BLUE
|
||||
export COLOR_NUMBER=$BOLD_PURPLE
|
||||
|
||||
# There is highlighting for metadata key:value pairs e.g.
|
||||
# DUE:2006-08-01 or note:MYNOTE
|
||||
|
||||
export COLOR_META=$BOLD_CYAN
|
||||
|
||||
# === BEHAVIOR ===
|
||||
|
||||
## customize list output
|
||||
#
|
||||
# TODOTXT_SORT_COMMAND will filter after line numbers are
|
||||
# inserted, but before colorization, and before hiding of
|
||||
# priority, context, and project.
|
||||
#
|
||||
# export TODOTXT_SORT_COMMAND='env LC_COLLATE=C sort -f -k2'
|
||||
|
||||
# TODOTXT_FINAL_FILTER will filter list output after colorization,
|
||||
# priority hiding, context hiding, and project hiding. That is,
|
||||
# just before the list output is displayed.
|
||||
#
|
||||
# export TODOTXT_FINAL_FILTER='cat'
|
1531
todo/todo.sh
Executable file
1531
todo/todo.sh
Executable file
File diff suppressed because it is too large
Load Diff
120
todo/todo_completion
Executable file
120
todo/todo_completion
Executable file
@ -0,0 +1,120 @@
|
||||
#!/bin/bash source-this-script
|
||||
[ "$BASH_VERSION" ] || return
|
||||
|
||||
_todo()
|
||||
{
|
||||
local cur prev opts
|
||||
COMPREPLY=()
|
||||
cur="${COMP_WORDS[COMP_CWORD]}"
|
||||
prev="${COMP_WORDS[COMP_CWORD-1]}"
|
||||
|
||||
local -r OPTS="-@ -@@ -+ -++ -d -f -h -p -P -PP -a -n -t -v -vv -V -x"
|
||||
local -r COMMANDS="\
|
||||
add a addto addm append app archive command del \
|
||||
rm depri dp do help list ls listaddons listall lsa listcon \
|
||||
lsc listfile lf listpri lsp listproj lsprj move \
|
||||
mv prepend prep pri p replace report shorthelp"
|
||||
local -r MOVE_COMMAND_PATTERN='move|mv'
|
||||
|
||||
local _todo_sh=${_todo_sh:-todo.sh}
|
||||
local completions
|
||||
if [ $COMP_CWORD -eq 1 ]; then
|
||||
completions="$COMMANDS $(eval TODOTXT_VERBOSE=0 $_todo_sh command listaddons 2>/dev/null) $OPTS"
|
||||
elif [[ $COMP_CWORD -gt 2 && ( \
|
||||
"${COMP_WORDS[COMP_CWORD-2]}" =~ ^($MOVE_COMMAND_PATTERN${_todo_file2_actions:+|${_todo_file2_actions}})$ || \
|
||||
"${COMP_WORDS[COMP_CWORD-3]}" =~ ^($MOVE_COMMAND_PATTERN${_todo_file3_actions:+|${_todo_file3_actions}})$ ) ]]; then
|
||||
# "move ITEM# DEST [SRC]" has file arguments on positions 2 and 3.
|
||||
completions=$(eval TODOTXT_VERBOSE=0 $_todo_sh command listfile 2>/dev/null)
|
||||
else
|
||||
case "$prev" in
|
||||
command)
|
||||
completions=$COMMANDS;;
|
||||
help)
|
||||
completions="$COMMANDS $(eval TODOTXT_VERBOSE=0 $_todo_sh command listaddons 2>/dev/null)";;
|
||||
-*) completions="$COMMANDS $(eval TODOTXT_VERBOSE=0 $_todo_sh command listaddons 2>/dev/null) $OPTS";;
|
||||
*) if [[ "$prev" =~ ^(addto|listfile|lf${_todo_file1_actions:+|${_todo_file1_actions}})$ ]]; then
|
||||
completions=$(eval TODOTXT_VERBOSE=0 $_todo_sh command listfile 2>/dev/null)
|
||||
else
|
||||
case "$cur" in
|
||||
+*) completions=$(eval TODOTXT_VERBOSE=0 $_todo_sh command listproj 2>/dev/null)
|
||||
COMPREPLY=( $( compgen -W "$completions" -- $cur ))
|
||||
[ ${#COMPREPLY[@]} -gt 0 ] && return 0
|
||||
# Fall back to projects extracted from done tasks.
|
||||
completions=$(eval 'TODOTXT_VERBOSE=0 TODOTXT_SOURCEVAR=\$DONE_FILE' $_todo_sh command listproj 2>/dev/null)
|
||||
;;
|
||||
@*) completions=$(eval TODOTXT_VERBOSE=0 $_todo_sh command listcon 2>/dev/null)
|
||||
COMPREPLY=( $( compgen -W "$completions" -- $cur ))
|
||||
[ ${#COMPREPLY[@]} -gt 0 ] && return 0
|
||||
# Fall back to contexts extracted from done tasks.
|
||||
completions=$(eval 'TODOTXT_VERBOSE=0 TODOTXT_SOURCEVAR=\$DONE_FILE' $_todo_sh command listcon 2>/dev/null)
|
||||
;;
|
||||
*) if [[ "$cur" =~ ^[0-9]+$ ]]; then
|
||||
declare -a sedTransformations=(
|
||||
# Remove the (padded) task number; we prepend the
|
||||
# user-provided $cur instead.
|
||||
-e 's/^ *[0-9]\{1,\} //'
|
||||
# Remove the timestamp prepended by the -t option,
|
||||
# but keep any priority (as it's short and may
|
||||
# provide useful context).
|
||||
-e 's/^\((.) \)\{0,1\}[0-9]\{2,4\}-[0-9]\{2\}-[0-9]\{2\} /\1/'
|
||||
# Remove the done date and (if there) the timestamp.
|
||||
# Keep the "x" (as it's short and may provide useful
|
||||
# context)
|
||||
-e 's/^\([xX] \)\([0-9]\{2,4\}-[0-9]\{2\}-[0-9]\{2\} \)\{1,2\}/\1/'
|
||||
# Remove any trailing whitespace; the Bash
|
||||
# completion inserts a trailing space itself.
|
||||
-e 's/[[:space:]]*$//'
|
||||
# Finally, limit the output to a single line just as
|
||||
# a safety check of the ls action output.
|
||||
-e '1q'
|
||||
)
|
||||
local todo=$( \
|
||||
eval TODOTXT_VERBOSE=0 $_todo_sh '-@ -+ -p -x command ls "^ *${cur} "' 2>/dev/null | \
|
||||
sed "${sedTransformations[@]}" \
|
||||
)
|
||||
# Append task text as a shell comment. This
|
||||
# completion can be a safety check before a
|
||||
# destructive todo.txt operation.
|
||||
[ "$todo" ] && COMPREPLY[0]="$cur # $todo"
|
||||
return 0
|
||||
else
|
||||
return 0
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
COMPREPLY=( $( compgen -W "$completions" -- $cur ))
|
||||
return 0
|
||||
}
|
||||
complete -F _todo todo.sh
|
||||
|
||||
# If you define an alias (e.g. "t") to todo.sh, you need to explicitly enable
|
||||
# completion for it, too:
|
||||
#complete -F _todo t
|
||||
# It is recommended to put this line next to your alias definition in your
|
||||
# ~/.bashrc (or wherever else you're defining your alias). If you simply
|
||||
# uncomment it here, you will need to redo this on every todo.txt update!
|
||||
|
||||
# If you have renamed the todo.sh executable, or if it is not accessible through
|
||||
# PATH, you need to add and use a wrapper completion function, like this:
|
||||
#_todoElsewhere()
|
||||
#{
|
||||
# local _todo_sh='/path/to/todo2.sh'
|
||||
# _todo "$@"
|
||||
#}
|
||||
#complete -F _todoElsewhere /path/to/todo2.sh
|
||||
|
||||
# If you use aliases to use different configuration(s), you need to add and use
|
||||
# a wrapper completion function for each configuration if you want to complete
|
||||
# from the actual configured task locations:
|
||||
#alias todo2='todo.sh -d "$HOME/todo2.cfg"'
|
||||
#_todo2()
|
||||
#{
|
||||
# local _todo_sh='todo.sh -d "$HOME/todo2.cfg"'
|
||||
# _todo "$@"
|
||||
#}
|
||||
#complete -F _todo2 todo2
|
136
todo/todo_format.py
Normal file
136
todo/todo_format.py
Normal file
@ -0,0 +1,136 @@
|
||||
import os
|
||||
import time
|
||||
import re
|
||||
|
||||
|
||||
def utf8_length(text):
|
||||
if text is None:
|
||||
return 0
|
||||
len_text = len(text)
|
||||
len_text_utf8 = len(text.encode('utf-8'))
|
||||
# utf-8一个汉字占3个字符,减去原计数就是多出来的2/3,再除以2就是增量。再加回去即可
|
||||
size = int((len_text_utf8 - len_text) / 2 + len_text)
|
||||
return size
|
||||
|
||||
|
||||
def tidy_done():
|
||||
|
||||
with open('/opt/logs/TODO/done.txt', 'r') as donetxt, open('/tmp/tmp_done.txt', 'w') as tmpdone:
|
||||
lines = donetxt.readlines()
|
||||
for line in lines:
|
||||
tmpdone.write(line.strip() + '\n')
|
||||
|
||||
os.system('mv /tmp/tmp_done.txt /opt/logs/TODO/done.txt')
|
||||
|
||||
|
||||
def format_col_1(item):
|
||||
global number_of_days
|
||||
global item_no
|
||||
global auto_weekend
|
||||
|
||||
end_time_date = item.strip()[-10:]
|
||||
try:
|
||||
end_time_stamp = time.strptime(end_time_date, "%Y-%m-%d")
|
||||
except Exception as Err:
|
||||
print('Error Encounted: ', end='')
|
||||
print(str(Err))
|
||||
print(f"Please modify /opt/logs/TODO/todo.txt @ line {item_no} manually, and then run it again.\n")
|
||||
os._exit(1)
|
||||
|
||||
end_time_stamp = time.mktime(end_time_stamp)
|
||||
now_time_stamp = int(time.time())
|
||||
number_of_days = round((end_time_stamp - now_time_stamp) / 3600 / 24 + 0.75, 2)
|
||||
# print('number_of_days-1:', number_of_days)
|
||||
|
||||
if auto_weekend and number_of_days < 0:
|
||||
end_time_stamp += 604800
|
||||
end_time_date = time.strftime('%Y-%m-%d', time.localtime(end_time_stamp))
|
||||
item = item.strip()[:-10] + end_time_date
|
||||
number_of_days += 7
|
||||
|
||||
item_format = re.sub(' +', ' ', item.strip())
|
||||
done.write(item_format)
|
||||
for i in range(16 - len(item_format)):
|
||||
done.write(' ')
|
||||
|
||||
done.write('|')
|
||||
|
||||
|
||||
def format_col_2(item):
|
||||
done.write(' ')
|
||||
done.write(item.strip())
|
||||
for i in range(12 - 1 - len(item.strip())):
|
||||
done.write(' ')
|
||||
|
||||
done.write('|')
|
||||
|
||||
|
||||
def format_col_3(item):
|
||||
global number_of_days
|
||||
# print('number_of_days-3:', number_of_days)
|
||||
item = 'T:' + str(number_of_days)
|
||||
|
||||
done.write(' ')
|
||||
done.write(item.strip())
|
||||
for i in range(10 - 1 - len(item.strip())):
|
||||
done.write(' ')
|
||||
|
||||
done.write('|')
|
||||
|
||||
|
||||
def format_col_4(item):
|
||||
done.write(' ')
|
||||
try:
|
||||
with open('/tmp/col.log', 'r') as obj_col:
|
||||
width = int(obj_col.readline().strip())
|
||||
except:
|
||||
width = 125
|
||||
|
||||
item_format = re.sub(' +', ' ', item.strip())
|
||||
len_of_task = utf8_length(item_format)
|
||||
# get the width of current terminal
|
||||
left_white = width - 11 - 12 - 10 - 5 - len_of_task - 8
|
||||
done.write(item_format + ' ' * left_white + '\n')
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
number_of_days = 0
|
||||
item_no = 1
|
||||
# 0 signifies that task without priority, and 1 vice versa
|
||||
with open('/opt/logs/TODO/todo.txt', 'r') as todo, open('/tmp/tmp.txt', 'w') as done:
|
||||
for line in todo.readlines():
|
||||
col = 1
|
||||
auto_weekend = 0
|
||||
|
||||
if line.strip() == '':
|
||||
continue
|
||||
elif 'Happy weekend~' in line:
|
||||
auto_weekend = 1
|
||||
else:
|
||||
pass
|
||||
|
||||
for item in line.strip().split('|'):
|
||||
if col == 1:
|
||||
format_col_1(item)
|
||||
elif col == 2:
|
||||
format_col_2(item)
|
||||
elif col == 3:
|
||||
format_col_3(item)
|
||||
elif col == 4:
|
||||
format_col_4(item)
|
||||
else:
|
||||
break
|
||||
|
||||
col += 1
|
||||
item_no += 1
|
||||
|
||||
|
||||
os.system('mv /tmp/tmp.txt /opt/logs/TODO/todo.txt')
|
||||
tidy_done()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
15
todo/watch_todo.sh
Normal file
15
todo/watch_todo.sh
Normal file
@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
|
||||
tput cols > /tmp/col.log
|
||||
python3 /opt/scripts/todo/todo_format.py
|
||||
result=$?
|
||||
rm -rf /tmp/col.log
|
||||
|
||||
exit $result
|
||||
|
||||
# while :; do
|
||||
# echo $COLUMNS > /tmp/col.log
|
||||
# python3 /opt/scripts/todo/todo_format.py
|
||||
# sleep 1
|
||||
# done
|
||||
|
37
update/backups.sh
Normal file
37
update/backups.sh
Normal file
@ -0,0 +1,37 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "========================================================================="
|
||||
echo Start Time: `date`
|
||||
set -x
|
||||
|
||||
cp -rf /var/spool/cron/crontabs/ /opt/configs/
|
||||
cp -rf /usr/local/nginx/conf/nginx.conf /opt/configs/nginx/nginx.conf
|
||||
cp -rf /usr/local/nginx/conf/domain_confs/ /opt/configs/nginx/
|
||||
cp -rf /etc/docker/daemon.json /opt/configs/conf/
|
||||
cp -rf /etc/sysctl.conf /opt/configs/conf/
|
||||
cp -rf /etc/pip.conf /opt/configs/conf/
|
||||
cp -rf /etc/apt/sources.list /opt/configs/conf/
|
||||
cp -rf /root/.acme.sh/*ecc /opt/configs/acme/
|
||||
|
||||
cd /opt && t=`date +%Y%m%dT%H%M%S`
|
||||
rsync -av apps configs logs scripts websites wd/72-Backups/CrossChain/VPS/ > /opt/logs/rsync/rsync_${t}.log
|
||||
cd /opt/logs/rsync/
|
||||
let count=`ls | wc -l`
|
||||
if [[ $count -gt 10 ]]; then
|
||||
rsync_logs=()
|
||||
for((i=1;i<=$count;i++)); do
|
||||
rsync_logs[$i]=`ls | sort -r | head -n $i | tail -1`
|
||||
done
|
||||
|
||||
for i in `seq 11 $count`; do
|
||||
rm -rf ${rsync_logs[$i]}
|
||||
done
|
||||
fi
|
||||
|
||||
# tar -I pixz -cf $backup_dir/vps-${t}.tar.xz -C /opt configs data logs scripts source-code websites > /dev/null 2>&1
|
||||
set +x
|
||||
echo End Time: `date`
|
||||
echo "========================================================================="
|
||||
echo
|
||||
echo
|
||||
|
17
update/calibre.sh
Normal file
17
update/calibre.sh
Normal file
@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
|
||||
# get the pid of calibre, and write it into file /tmp/calibre_pids
|
||||
ps -ef | grep '/opt/apps/calibre/venv/bin/cps' | grep -v grep | awk '{print $2}' > /tmp/calibre_pids
|
||||
|
||||
# if not exist, signifying that there is no calibre process
|
||||
if [[ ! -s /tmp/calibre_pids ]]; then
|
||||
echo -e "\e[1;31mCannot terminate Calibre process cause there is no such things, will run calibre later automatically.\e[0m"
|
||||
else
|
||||
for calibre_pid in `cat /tmp/calibre_pids`; do
|
||||
kill -9 $calibre_pid > /dev/null 2>&1
|
||||
done
|
||||
fi
|
||||
|
||||
nohup /opt/apps/calibre/venv/bin/python /opt/apps/calibre/venv/bin/cps > /dev/null 2>&1 &
|
||||
echo -e "\e[1;32mCalibre process started successfully\e[0m"
|
||||
|
64
update/dash_rand_logo.sh
Normal file
64
update/dash_rand_logo.sh
Normal file
@ -0,0 +1,64 @@
|
||||
#!/bin/bash
|
||||
|
||||
# 设置图片数量和输出文件名
|
||||
IMAGE_NUM_ALL=`ls /opt/apps/localcr/A_dashboard/local/icons | wc -l`
|
||||
IMAGE_NUM_NEEDED=`cat /root/services.yaml | grep -E '[0-9]{1,3}.jpg' | wc -l`
|
||||
if [[ $IMAGE_NUM_NEEDED -gt $IMAGE_NUM_ALL ]]; then
|
||||
alarm='Dashboard:\nThere is NOT enough logos to use, please check ASAP.'
|
||||
bash /opt/scripts/alert/sendmsg.sh "$alarm"
|
||||
exit 233
|
||||
fi
|
||||
|
||||
# 初始化数组
|
||||
declare -a arr=()
|
||||
|
||||
# 随机选择图片名,不重复
|
||||
for ((i=0; i<$IMAGE_NUM_NEEDED; i++)); do
|
||||
while true; do
|
||||
# 随机生成图片名
|
||||
RAND=$((RANDOM%IMAGE_NUM_ALL))
|
||||
IMG_NAME=$RAND.jpg
|
||||
|
||||
# 判断图片名是否已经被选择过
|
||||
chosen=0
|
||||
for num in "${arr[@]}"; do
|
||||
if [ "$num" == "$IMG_NAME" ]; then
|
||||
chosen=1
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
# 如果图片名未被选择过,则将其添加到数组中
|
||||
if [ "$chosen" -eq 0 ]; then
|
||||
arr=(${arr[@]} $IMG_NAME)
|
||||
break
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
# 需要修改的文件的路径和名称
|
||||
filename_origin="/opt/apps/localcr/A_dashboard/local/config/services.yaml"
|
||||
filename_copy="/opt/apps/localcr/A_dashboard/local/config/services_copy.yaml"
|
||||
# filename_origin="/root/services.yaml"
|
||||
# filename_copy="/root/services_copy.yaml"
|
||||
cp $filename_origin $filename_copy
|
||||
|
||||
# 循环处理所有符合指定模式的行
|
||||
let count=0
|
||||
lineno=1
|
||||
while read line; do
|
||||
# 获取文件名和路径
|
||||
oldname=$(echo $line | grep -Eo '[0-9]{1,3}.jpg')
|
||||
|
||||
if [[ $oldname != '' ]]; then
|
||||
newname=${arr[$count]}
|
||||
# 替换文件名
|
||||
sed -i "${lineno}s/\/$oldname/\/$newname/" $filename_origin
|
||||
let count=count+1
|
||||
fi
|
||||
let lineno=lineno+1
|
||||
done < $filename_copy
|
||||
|
||||
rm $filename_copy
|
||||
|
||||
|
38
update/jekyll_content_update.sh
Normal file
38
update/jekyll_content_update.sh
Normal file
@ -0,0 +1,38 @@
|
||||
#!/bin/bash
|
||||
|
||||
function bash_update() {
|
||||
echo $directory$filename $action
|
||||
rm -rf /opt/websites/just-the-docs/bash
|
||||
jekyll b -s /opt/apps/document/bash -d /opt/websites/just-the-docs/bash
|
||||
}
|
||||
|
||||
function python_update() {
|
||||
echo $directory$filename $action
|
||||
rm -rf /opt/websites/just-the-docs/python
|
||||
jekyll b -s /opt/apps/document/python -d /opt/websites/just-the-docs/python
|
||||
}
|
||||
|
||||
|
||||
function blog_update() {
|
||||
echo $directory$filename $action
|
||||
rm -rf /opt/websites/blog
|
||||
jekyll b -s /opt/apps/blog/ -d /opt/websites/blog/
|
||||
echo -e '\n'
|
||||
}
|
||||
|
||||
|
||||
echo -e '\n\n==================================================================' >> /opt/logs/jekyll_update.log
|
||||
date >> /opt/logs/jekyll_update.log
|
||||
if [[ $1 == 'blog' ]]; then
|
||||
blog_update >> /opt/logs/jekyll_update.log
|
||||
elif [[ $1 == 'python' ]]; then
|
||||
python_update >> /opt/logs/jekyll_update.log
|
||||
elif [[ $1 == 'bash' ]]; then
|
||||
bash_update >> /opt/logs/jekyll_update.log
|
||||
else
|
||||
echo Wrong >> /opt/logs/jekyll_update.log
|
||||
exit 2
|
||||
fi
|
||||
|
||||
|
||||
|
12
update/jekyll_update.sh
Normal file
12
update/jekyll_update.sh
Normal file
@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
inotify-hookable \
|
||||
--watch-directories /opt/apps/blog \
|
||||
--watch-directories /opt/apps/document/python \
|
||||
--watch-directories /opt/apps/document/bash \
|
||||
--ignore-paths /opt/apps/blog/.git/ \
|
||||
--ignore-paths /opt/apps/blog/img/avatar.jpg \
|
||||
--on-modify-path-command "(^/opt/apps/blog/.*)=(bash /opt/scripts/update/jekyll_content_update.sh blog)" \
|
||||
--on-modify-path-command "(^/opt/apps/document/python/.*)=(bash /opt/scripts/update/jekyll_content_update.sh python)" \
|
||||
--on-modify-path-command "(^/opt/apps/document/bash/.*)=(bash /opt/scripts/update/jekyll_content_update.sh 'bash')"
|
||||
|
62
update/nav_rand_logo.sh
Normal file
62
update/nav_rand_logo.sh
Normal file
@ -0,0 +1,62 @@
|
||||
#!/bin/bash
|
||||
|
||||
# 设置图片数量和输出文件名
|
||||
IMAGE_NUM_ALL=`ls /opt/websites/nav/assets/images/logos | wc -l`
|
||||
IMAGE_NUM_NEEDED=`cat /opt/websites/nav/index.html | grep -E '[0-9]{1,3}.jpg' | wc -l`
|
||||
if [[ $IMAGE_NUM_NEEDED -gt $IMAGE_NUM_ALL ]]; then
|
||||
alarm='Navigation:\nThere is NOT enough logos to use, please check ASAP.'
|
||||
bash /opt/scripts/alert/sendmsg.sh "$alarm"
|
||||
exit 233
|
||||
fi
|
||||
|
||||
# 初始化数组
|
||||
declare -a arr=()
|
||||
|
||||
# 随机选择图片名,不重复
|
||||
for ((i=0; i<$IMAGE_NUM_NEEDED; i++)); do
|
||||
while true; do
|
||||
# 随机生成图片名
|
||||
RAND=$((RANDOM%IMAGE_NUM_ALL))
|
||||
IMG_NAME=$RAND.jpg
|
||||
|
||||
# 判断图片名是否已经被选择过
|
||||
chosen=0
|
||||
for num in "${arr[@]}"; do
|
||||
if [ "$num" == "$IMG_NAME" ]; then
|
||||
chosen=1
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
# 如果图片名未被选择过,则将其添加到数组中
|
||||
if [ "$chosen" -eq 0 ]; then
|
||||
arr=(${arr[@]} $IMG_NAME)
|
||||
break
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
# 需要修改的文件的路径和名称
|
||||
filename_origin="/opt/websites/nav/index.html"
|
||||
filename_copy="/opt/websites/nav/index_copy.html"
|
||||
cp $filename_origin $filename_copy
|
||||
|
||||
# 循环处理所有符合指定模式的行
|
||||
let count=0
|
||||
lineno=1
|
||||
while read line; do
|
||||
# 获取文件名和路径
|
||||
oldname=$(echo $line | grep -Eo '[0-9]{1,3}.jpg')
|
||||
|
||||
if [[ $oldname != '' ]]; then
|
||||
newname=${arr[$count]}
|
||||
# 替换文件名
|
||||
sed -i "${lineno}s/\/$oldname/\/$newname/" $filename_origin
|
||||
let count=count+1
|
||||
fi
|
||||
let lineno=lineno+1
|
||||
done < $filename_copy
|
||||
|
||||
rm $filename_copy
|
||||
|
||||
|
22
update/renew.sh
Normal file
22
update/renew.sh
Normal file
@ -0,0 +1,22 @@
|
||||
#!/bin/bash
|
||||
|
||||
# get latest hosts for accelerate github visiting
|
||||
curl https://raw.hellogithub.com/hosts >> /opt/logs/hosts
|
||||
|
||||
# renew images.json file for background picture of homepage
|
||||
cd /opt/websites/homepage/
|
||||
node assets/js/bing.js > /dev/null 2>&1
|
||||
|
||||
# random logo for navigation
|
||||
bash /opt/scripts/update/nav_rand_logo.sh
|
||||
|
||||
# random logo for dash
|
||||
bash /opt/scripts/update/dash_rand_logo.sh
|
||||
|
||||
# logo renew
|
||||
let numOfAvatar=`ls /opt/websites/nav/assets/images/logos/ | wc -l`
|
||||
let randNumber=$RANDOM%$numOfAvatar
|
||||
cp /opt/websites/nav/assets/images/logos/${randNumber}.jpg /opt/websites/homepage/assets/img/logo.jpg
|
||||
cp /opt/websites/nav/assets/images/logos/${randNumber}.jpg /opt/websites/nav/assets/images/logos/avatar.jpg
|
||||
cp /opt/websites/nav/assets/images/logos/${randNumber}.jpg /opt/apps/blog/img/avatar.jpg
|
||||
|
80
utool/date2n.sh
Normal file
80
utool/date2n.sh
Normal file
@ -0,0 +1,80 @@
|
||||
#!/bin/bash
|
||||
|
||||
# 自定义颜色显示
|
||||
c_br='\e[1;31m' # bold red
|
||||
c_bg='\e[1;32m' # bold green
|
||||
c_by='\e[1;33m' # bold yellow
|
||||
c_bb='\e[1;34m' # bold blue
|
||||
c_bp='\e[1;35m' # bold purple
|
||||
c_bc='\e[1;36m' # bold cyan
|
||||
c_bir='\e[1;3;31m' # * bold italic red
|
||||
c_big='\e[1;3;32m' # bold italic cyan
|
||||
c_bib='\e[1;3;34m' # * bold italic cyan
|
||||
c_bic='\e[1;3;36m' # bold italic cyan
|
||||
c_e='\e[0m' # reset
|
||||
|
||||
function usage() {
|
||||
echo -e "${c_bir}将日期转换成十进制和十六进制时间戳,输入日期格式可参考如下,中间空格可替换成[a-zA-Z@#%^*:]中的任意一个单字符:${c_e}"
|
||||
echo -e " 2023/03/09 09:29:02"
|
||||
echo -e " 2023-03-09 09:29:02"
|
||||
echo -e " 09/03/2023 09:29:02"
|
||||
echo -e " 09/Mar/2023 09:29:02"
|
||||
exit 4
|
||||
}
|
||||
|
||||
|
||||
ts=$@
|
||||
|
||||
if [[ $ts =~ (([J|j]an)|([F|f]eb)|([M|m]ar)|([A|a]pr)|([M|m]ay)|([J|j]un)|([J|j]ul)|([A|a]ug)|([S|s]ep)|([O|o]ct)|([N|n]ov)|([D|d]ec)) ]]; then
|
||||
[[ ${#ts} -ne 20 ]] && usage
|
||||
else
|
||||
[[ ${#ts} -ne 19 ]] && usage
|
||||
fi
|
||||
|
||||
# 2023/03/09 09:29:02
|
||||
# 2023-03-09 09:29:02
|
||||
fmt1="^[0-9]{4}[-/][0-9]{2}[-/][0-9]{2}[a-zA-Z@#%^*:]{0,1}[[:space:]]{0,1}[0-9]{2}:[0-9]{2}:[0-9]{2}$"
|
||||
# 09/03/2023:09:29:02
|
||||
fmt2="^[0-9]{2}/[0-9]{2}/[0-9]{4}[a-zA-Z@#%^*:]{0,1}[[:space:]]{0,1}[0-9]{2}:[0-9]{2}:[0-9]{2}$"
|
||||
# 09/Mar/2023:09:29:02
|
||||
fmt3="^[0-9]{2}/(([J|j]an)|([F|f]eb)|([M|m]ar)|([A|a]pr)|([M|m]ay)|([J|j]un)|([J|j]ul)|([A|a]ug)|([S|s]ep)|([O|o]ct)|([N|n]ov)|([D|d]ec))/[0-9]{4}[a-zA-Z@#%^*:]{0,1}[[:space:]]{0,1}[0-9]{2}:[0-9]{2}:[0-9]{2}$"
|
||||
if [[ $ts =~ $fmt1 || $ts =~ $fmt2 ]]; then
|
||||
ts=${ts:0:10}' '${ts:11}
|
||||
dec=`date -d "$ts" +%s`
|
||||
hex=`echo "obase=16; $dec" | bc`
|
||||
echo "十进制的时间戳 - $dec"
|
||||
echo "十六进制时间戳 - 0x$hex - $hex"
|
||||
echo "十六进制时间戳 - 0x${hex,,} - ${hex,,}"
|
||||
|
||||
elif [[ $ts =~ $fmt3 ]]; then
|
||||
day=${ts:0:2}
|
||||
month=${ts:3:3}
|
||||
left=${ts:7}
|
||||
[[ $month =~ ^[J|j]an$ ]] && month='01'
|
||||
[[ $month =~ ^[F|f]eb$ ]] && month='02'
|
||||
[[ $month =~ ^[M|m]ar$ ]] && month='03'
|
||||
[[ $month =~ ^[A|a]pr$ ]] && month='04'
|
||||
[[ $month =~ ^[M|m]ay$ ]] && month='05'
|
||||
[[ $month =~ ^[J|j]un$ ]] && month='06'
|
||||
[[ $month =~ ^[J|j]ul$ ]] && month='07'
|
||||
[[ $month =~ ^[A|a]ug$ ]] && month='08'
|
||||
[[ $month =~ ^[S|s]ep$ ]] && month='09'
|
||||
[[ $month =~ ^[O|o]ct$ ]] && month='10'
|
||||
[[ $month =~ ^[N|n]ov$ ]] && month='11'
|
||||
[[ $month =~ ^[D|d]ec$ ]] && month='12'
|
||||
ts=$month'/'$day'/'$left
|
||||
ts=${ts:0:10}' '${ts:11}
|
||||
dec=`date -d "$ts" +%s`
|
||||
hex=`echo "obase=16; $dec" | bc`
|
||||
echo "十进制的时间 - $dec"
|
||||
echo "十六进制时间 - 0x${hex} - $hex"
|
||||
echo "十六进制时间 - 0x${hex,,} - ${hex,,}"
|
||||
else
|
||||
echo -e "${c_br}请检查输入的时间符合正常规则,退出...${c_e}"
|
||||
usage
|
||||
exit 10
|
||||
fi
|
||||
|
||||
|
||||
|
||||
|
28
utool/genpw.sh
Normal file
28
utool/genpw.sh
Normal file
@ -0,0 +1,28 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo -e "\e[1;3;31mNeed ONE and only ONE digital parameter.\e[0m"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! [[ $1 -gt 0 && $1 -lt 80 ]] 2>/dev/null; then
|
||||
echo -e "\e[1;3;31mNeed ONE DIGITAL parameter, which must be greater than 0 and lower than 80.\e[0m"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
# total 80 bits password
|
||||
pwgen_0=`pwgen -sync`
|
||||
pwgen_1=`pwgen -sync`
|
||||
pwgen_2=`pwgen -sync`
|
||||
pwgen_3=`pwgen -sync`
|
||||
pwgen_4=`pwgen -sync`
|
||||
pwgen_5=`pwgen -sync`
|
||||
pwgen_6=`pwgen -sync`
|
||||
pwgen_7=`pwgen -sync`
|
||||
pwgen_8=`pwgen -sync`
|
||||
pwgen_9=`pwgen -sync`
|
||||
|
||||
pwgen_80=${pwgen_0}${pwgen_1}${pwgen_2}${pwgen_3}${pwgen_4}${pwgen_5}${pwgen_6}${pwgen_7}${pwgen_8}${pwgen_9}
|
||||
echo ${pwgen_80:0:${1}}
|
||||
|
||||
|
13
utool/ipports.sh
Normal file
13
utool/ipports.sh
Normal file
@ -0,0 +1,13 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [[ $1 == 'port' ]]; then
|
||||
find /opt/apps/localcr/ -type f -name "*:*:*" | \
|
||||
awk -F '/' '{print $NF}' | \
|
||||
awk -F':' 'BEGIN{print "Local", "Container", "Application"} {print $1, $2, $3}' | \
|
||||
column -t | \
|
||||
sort -nk 1
|
||||
elif [[ $1 == 'ip' ]]; then
|
||||
find /opt/apps/localcr/ -type f -name "10.10.0.*" | awk -F '/' '{print $(NF-1), $NF}' | column -t | sort -k2
|
||||
else
|
||||
:
|
||||
fi
|
35
utool/number2d.sh
Normal file
35
utool/number2d.sh
Normal file
@ -0,0 +1,35 @@
|
||||
#!/bin/bash
|
||||
|
||||
# 自定义颜色显示
|
||||
c_br='\e[1;31m' # bold red
|
||||
c_bg='\e[1;32m' # bold green
|
||||
c_by='\e[1;33m' # bold yellow
|
||||
c_bb='\e[1;34m' # bold blue
|
||||
c_bp='\e[1;35m' # bold purple
|
||||
c_bc='\e[1;36m' # bold cyan
|
||||
c_bir='\e[1;3;31m' # * bold italic red
|
||||
c_big='\e[1;3;32m' # bold italic cyan
|
||||
c_bib='\e[1;3;34m' # * bold italic cyan
|
||||
c_bic='\e[1;3;36m' # bold italic cyan
|
||||
c_e='\e[0m' # reset
|
||||
|
||||
|
||||
number=$1
|
||||
|
||||
if [[ $number =~ ^[0-9]{1,11}$ ]]; then
|
||||
date -d@$number +'%Y-%m-%d %H:%M:%S'
|
||||
elif [[ $number =~ ^0[x|X][0-9a-fA-F]{1,9}$ ]]; then
|
||||
hex=`printf "%d" $number`
|
||||
date -d@$hex +'%Y-%m-%d %H:%M:%S'
|
||||
elif [[ $number =~ ^[0-9a-fA-F]{1,9}$ ]]; then
|
||||
hex=`printf "%d" '0x'$number`
|
||||
date -d@$hex +'%Y-%m-%d %H:%M:%S'
|
||||
else
|
||||
echo -e "${c_br}请输入正确的十进制--11位以内,或者十六进制数字--9位以内:${c_e}"
|
||||
echo -e " 十进制的时间戳 - 1678523385"
|
||||
echo -e " 十六进制时间戳 - 0x640c3bf9/0X640C3BF9/0x640C3bf9/640c3bf9"
|
||||
echo -e "${c_bb}有歧义时,使用'0x'或者'0X'来区分十进制和十六进制${c_e}"
|
||||
exit 233
|
||||
fi
|
||||
|
||||
|
56
utool/toolbox.sh
Normal file
56
utool/toolbox.sh
Normal file
@ -0,0 +1,56 @@
|
||||
# You may uncomment the following lines if you want `ls' to be colorized:
|
||||
export LS_OPTIONS='--color=auto'
|
||||
eval "`dircolors`"
|
||||
alias ls='ls $LS_OPTIONS'
|
||||
alias ll='ls $LS_OPTIONS -ahl'
|
||||
alias l='ls $LS_OPTIONS -lA'
|
||||
|
||||
PS1='\e[1;32m[ $? \u@\h \W]\$ \e[0m'
|
||||
alias cls='clear'
|
||||
alias ll='ls -alh'
|
||||
alias l='ll'
|
||||
alias ..='cd ..; ls'
|
||||
alias ...='cd ../..; ls'
|
||||
alias cdblog='cd /opt/source-code/blog/_posts'
|
||||
alias cdscripts='cd /opt/scripts'
|
||||
alias cdconfigs='cd /opt/configs'
|
||||
alias cdfrp='cd /opt/source-code/frpc/'
|
||||
alias cdalist='cd /opt/webdav/alist/CTC-Client/Manford/ && ls'
|
||||
alias cdone='cd /opt/webdav/onedrive/ && ls'
|
||||
alias cdwd='cd /opt/webdav/wd/ && ls'
|
||||
alias cdman='cd /opt/source-code/manford && ls'
|
||||
alias conf='cd /usr/local/nginx/conf'
|
||||
alias so='source ~/.bashrc'
|
||||
alias python='python3'
|
||||
alias py='python3'
|
||||
alias vi='vim'
|
||||
alias g='vim'
|
||||
alias txl='tmux ls'
|
||||
alias txn='tmux new -s'
|
||||
alias txa='tmux at -t'
|
||||
alias acme.sh=~/.acme.sh/acme.sh
|
||||
alias tt='/opt/scripts/todo/todo.sh -d /opt/scripts/todo/todo.cfg'
|
||||
alias tdate='date +%Y-%m-%d'
|
||||
alias jtddate='date +"%Y-%m-%d %H:%M:%S"'
|
||||
complete -F _todo tt
|
||||
alias cdpython='cd /opt/source-code/document/python/ && ls'
|
||||
export TIMETAGGER_CREDENTIALS='timetagger:$2a$08$s6ZkrdZGmwNADKY3K9X0jOgGWu4XMSVCGs4qbqMTupYRaUM2n4RKq'
|
||||
|
||||
export Ali_Key='LTAI5tMoM6J3Nzoi6JbT9waY'
|
||||
export Ali_Secret='N89cC4JpxdBL1Hqr8WhefIPRVcKEAs'
|
||||
|
||||
export CF_Key="f0971f82ebc8d6dfffc1a4871759f6b17fd1a"
|
||||
export CF_Email="xgdfmf@gmail.com"
|
||||
|
||||
export EDITOR='/usr/bin/vim'
|
||||
|
||||
if [ -f /etc/bash_completion ]; then
|
||||
. /etc/bash_completion
|
||||
fi
|
||||
# [[ ! -d /tmp/rclone ]] && mkdir -p /tmp/rclone
|
||||
|
||||
#. /opt/scripts/rclone/rclone_bash_completion.sh
|
||||
. /opt/scripts/todo/todo_completion
|
||||
|
||||
source /opt/source-code/v2ray-4.34.0/envfile
|
||||
|
29
utool/usage.sh
Normal file
29
utool/usage.sh
Normal file
@ -0,0 +1,29 @@
|
||||
#!/bin/bash
|
||||
|
||||
let col=`tput cols`
|
||||
if [[ $col -lt 120 ]]; then
|
||||
echo -e "\e[1;3;31mYour screen width is too small to show the usage info neatly. So make the display window maximized.\e[0m"
|
||||
read -p "Press any key to continue..."
|
||||
echo ''
|
||||
fi
|
||||
|
||||
|
||||
echo -e "\e[1;32mDESCRIPTION:\e[0m"
|
||||
echo -e "\e[3;32mutool -- a self-defined command line interface, which is used to facilitate operating the system, supports the following options. In the description part, where there is a leading asterisk signifies that this option must take an argument.\e[0m"
|
||||
echo -e "\e[1;4m \e[0m"
|
||||
echo -e "\e[37;40m|\e[0m\e[1;4;37;40mOption| Description |Option| Description \e[0m\e[37;40m|\e[0m"
|
||||
echo -e "\e[37;40m| -a | print all local ports of using for now | -n | |\e[0m"
|
||||
echo -e "\e[37;40m| -b | generate two-factor key of GITHUB | -o | |\e[0m"
|
||||
echo -e "\e[37;40m| -c | print all intranet IP using in docker | -p |*generate password of length 1~79 |\e[0m"
|
||||
echo -e "\e[37;40m| -d | show external IP of this machine | -q | |\e[0m"
|
||||
echo -e "\e[37;40m| -e |*show IP location | -r | |\e[0m"
|
||||
echo -e "\e[37;40m| -f |*ganerate md5 value of the input string | -s | |\e[0m"
|
||||
echo -e "\e[37;40m| -g | | -t | |\e[0m"
|
||||
echo -e "\e[37;40m| -h | show this help information | -u | |\e[0m"
|
||||
echo -e "\e[37;40m| -i |*get domain's registration info | -v | |\e[0m"
|
||||
echo -e "\e[37;40m| -j |*convert a (hexa)decimal to specific time format | -w | |\e[0m"
|
||||
echo -e "\e[37;40m| -k |*convert multi-formats time to a decimal | -x | |\e[0m"
|
||||
echo -e "\e[37;40m| -l | | -y | |\e[0m"
|
||||
echo -e "\e[37;40m|\e[0m\e[4;37;40m -m | | -z | \e[0m\e[37;40m|\e[0m\n"
|
||||
|
||||
|
178
utool/utool.py
Executable file
178
utool/utool.py
Executable file
@ -0,0 +1,178 @@
|
||||
#!/usr/bin/python3
|
||||
# coding: utf-8
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
|
||||
c_title = '\033[1;4;31;42m' # title color
|
||||
c_br = '\033[1;31m' # bold red
|
||||
c_bg = '\033[1;32m' # bold green
|
||||
c_by = '\033[1;33m' # bold yellow
|
||||
c_bb = '\033[1;34m' # bold blue
|
||||
c_bp = '\033[1;35m' # bold purple
|
||||
c_bc = '\033[1;36m' # bold cyan
|
||||
c_bir= '\033[1;3;31m' # * bold italic red
|
||||
c_bib = '\033[1;3;34m' # * bold italic cyan
|
||||
c_bic = '\033[1;3;36m' # bold italic cyan
|
||||
c_e = '\033[0m' # reset
|
||||
|
||||
if len(sys.argv) == 2:
|
||||
opt_1 = sys.argv[1]
|
||||
opt_rest = None
|
||||
elif len(sys.argv) > 2:
|
||||
opt_1 = sys.argv[1]
|
||||
opt_rest = sys.argv[2:]
|
||||
else:
|
||||
opt_1 = None
|
||||
opt_rest = None
|
||||
|
||||
opt_index = ['-do_not_use_this', '-a', '-b', '-c', '-d', '-e', '-f', '-g', '-h', '-i', '-j', '-k', '-l', '-m',
|
||||
'-n', '-o', '-p', '-q', '-r', '-s', '-t', '-u', '-v', '-w', '-x', '-y', '-z']
|
||||
|
||||
if opt_1 not in opt_index:
|
||||
os.system('/usr/bin/bash /opt/scripts/utool/usage.sh')
|
||||
exit(100)
|
||||
|
||||
if opt_1 == '-a':
|
||||
if opt_rest != None:
|
||||
print(f"This option({opt_1}) will print all ports of now using for local, and accept NO parameter.")
|
||||
exit(opt_index.index(opt_1))
|
||||
|
||||
os.system('/usr/bin/bash /opt/scripts/utool/ipports.sh port')
|
||||
|
||||
elif opt_1 == '-b':
|
||||
if opt_rest != None:
|
||||
print(f"This option({opt_1}) will generate a two-factor auth-key for github login, and accept NO parameter.")
|
||||
exit(opt_index.index(opt_1))
|
||||
|
||||
os.system("/usr/bin/oathtool -b --totp 'G3NHHFO2L2LZ5W2R'")
|
||||
|
||||
elif opt_1 == '-c':
|
||||
if opt_rest != None:
|
||||
print(f"This option({opt_1}) will print all intranet IP using in docker, and accept NO parameter.")
|
||||
exit(opt_index.index(opt_1))
|
||||
|
||||
os.system('/usr/bin/bash /opt/scripts/utool/ipports.sh ip')
|
||||
|
||||
elif opt_1 == '-d':
|
||||
if opt_rest != None:
|
||||
print(f"This option({opt_1}) will return the current IP of local machine, and accept NO parameter.")
|
||||
exit(opt_index.index(opt_1))
|
||||
|
||||
os.system("/usr/bin/python3 /opt/scripts/roll_api/get_self_ip.py")
|
||||
|
||||
elif opt_1 == '-e':
|
||||
if opt_rest == None:
|
||||
print(f"This option({opt_1}) will return the IP info, and accept at least one IP parameter.")
|
||||
exit(opt_index.index(opt_1))
|
||||
|
||||
for opt_2 in opt_rest:
|
||||
os.environ['opt_2'] = opt_2
|
||||
os.system('/usr/bin/python3 /opt/scripts/roll_api/get_ip.py $opt_2')
|
||||
|
||||
elif opt_1 == '-f':
|
||||
if len(sys.argv) < 3:
|
||||
print(f"{c_br}需要至少一个字符串作为输入,退出...{c_e}\n")
|
||||
exit(opt_index.index(opt_1))
|
||||
|
||||
param=''
|
||||
for item in sys.argv[2:]:
|
||||
param += item
|
||||
|
||||
os.environ['param'] = param
|
||||
print(f"{c_by}Warning: 输入字符串中间的任何空白符将会被删除,要保留的话,需手动将字符串用单引号括起来{c_e}")
|
||||
print(f"本次计算MD5值的字符串为 - {param}\n本次计算得到的MD5的值为 - ", end='')
|
||||
# print(f"本次计算得到的MD5的值为 - ", end='')
|
||||
os.system('echo -n $param | md5sum | cut -d " " -f 1')
|
||||
|
||||
elif opt_1 == '-g':
|
||||
print(f"This option({opt_1}) is reserved now, nothing is bound on it.")
|
||||
exit(opt_index.index(opt_1))
|
||||
elif opt_1 == '-h':
|
||||
os.system('/usr/bin/bash /opt/scripts/utool/usage.sh')
|
||||
exit(opt_index.index(opt_1))
|
||||
elif opt_1 == '-i':
|
||||
if opt_rest == None:
|
||||
print(f"This option({opt_1}) will return the domain registration info, and accept at least one domain parameter.")
|
||||
exit(opt_index.index(opt_1))
|
||||
|
||||
for opt_2 in opt_rest:
|
||||
os.environ['opt_2'] = opt_2
|
||||
os.system('/usr/bin/python3 /opt/scripts/roll_api/domain_reg_check.py $opt_2')
|
||||
|
||||
elif opt_1 == '-j':
|
||||
if len(sys.argv) == 2:
|
||||
print(f"{c_br}-j选项:将十进制或者十六进制数字作为入参,转换成标准时间格式,只接收第一个参数,其他参数将被丢弃,有歧义时,使用'0x'或者'0X'来区分十进制和十六进制,退出...{c_e}\n")
|
||||
exit(opt_index.index(opt_1))
|
||||
|
||||
os.environ['param'] = sys.argv[2]
|
||||
os.system('/usr/bin/bash /opt/scripts/utool/number2d.sh $param')
|
||||
|
||||
elif opt_1 == '-k':
|
||||
if len(sys.argv) < 3:
|
||||
print(f"{c_br}-k选项:将输入的时间转换成十进制和十六进制,需要一个字符串格式的时间作为输入,退出...{c_e}\n")
|
||||
exit(opt_index.index(opt_1))
|
||||
|
||||
param=''
|
||||
for item in sys.argv[2:]:
|
||||
param += item + ' '
|
||||
|
||||
os.environ['param'] = param
|
||||
os.system('/usr/bin/bash /opt/scripts/utool/date2n.sh $param')
|
||||
|
||||
elif opt_1 == '-l':
|
||||
print(f"This option({opt_1}) is reserved now, nothing is bound on it.")
|
||||
exit(opt_index.index(opt_1))
|
||||
elif opt_1 == '-m':
|
||||
print(f"This option({opt_1}) is reserved now, nothing is bound on it.")
|
||||
exit(opt_index.index(opt_1))
|
||||
elif opt_1 == '-n':
|
||||
print(f"This option({opt_1}) is reserved now, nothing is bound on it.")
|
||||
exit(opt_index.index(opt_1))
|
||||
elif opt_1 == '-o':
|
||||
print(f"This option({opt_1}) is reserved now, nothing is bound on it.")
|
||||
exit(opt_index.index(opt_1))
|
||||
elif opt_1 == '-p':
|
||||
if opt_rest == None:
|
||||
opt_rest = ['',]
|
||||
|
||||
opt_2 = opt_rest[0]
|
||||
os.environ['opt_2'] = opt_2
|
||||
os.system('/usr/bin/bash /opt/scripts/utool/genpw.sh $opt_2')
|
||||
|
||||
elif opt_1 == '-q':
|
||||
print(f"This option({opt_1}) is reserved now, nothing is bound on it.")
|
||||
exit(opt_index.index(opt_1))
|
||||
elif opt_1 == '-r':
|
||||
print(f"This option({opt_1}) is reserved now, nothing is bound on it.")
|
||||
exit(opt_index.index(opt_1))
|
||||
elif opt_1 == '-s':
|
||||
print(f"This option({opt_1}) is reserved now, nothing is bound on it.")
|
||||
exit(opt_index.index(opt_1))
|
||||
elif opt_1 == '-t':
|
||||
print(f"This option({opt_1}) is reserved now, nothing is bound on it.")
|
||||
exit(opt_index.index(opt_1))
|
||||
elif opt_1 == '-u':
|
||||
print(f"This option({opt_1}) is reserved now, nothing is bound on it.")
|
||||
exit(opt_index.index(opt_1))
|
||||
elif opt_1 == '-v':
|
||||
print(f"This option({opt_1}) is reserved now, nothing is bound on it.")
|
||||
exit(opt_index.index(opt_1))
|
||||
elif opt_1 == '-w':
|
||||
print(f"This option({opt_1}) is reserved now, nothing is bound on it.")
|
||||
exit(opt_index.index(opt_1))
|
||||
elif opt_1 == '-x':
|
||||
print(f"This option({opt_1}) is reserved now, nothing is bound on it.")
|
||||
exit(opt_index.index(opt_1))
|
||||
elif opt_1 == '-y':
|
||||
print(f"This option({opt_1}) is reserved now, nothing is bound on it.")
|
||||
exit(opt_index.index(opt_1))
|
||||
elif opt_1 == '-z':
|
||||
print(f"This option({opt_1}) is reserved now, nothing is bound on it.")
|
||||
exit(opt_index.index(opt_1))
|
||||
else:
|
||||
print("This line SHOULD NOT be executed, please check carefully.")
|
||||
exit(255)
|
||||
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user