This repository has been archived on 2025-02-25. You can view files and clone it, but cannot push or open issues or pull requests.
gitea 04bd1238d2 v0.2.0.5(2024/07/31)
此版本改动较大,公共部分做了规整,放置到新建文件夹 commons 当中,并所有自定义模块引入 logging 模块,记录重要信息
1. [t_change_ui: clibs.py]
   - 调整代码组织结构,新增模块,将公共函数以及类合并入此
   - 将一些常量放入该模块
   - 引入logging/concurrent_log_handler模块,并作初始化操作,供其他模块使用,按50M切割,最多保留10份
   - prj_to_xcore函数设置工程名部分重写,修复了多个prj工程可能不能执行的问题
2. [t_change_ui: openapi.py]
   - 完全重写了 get_from_id 函数,使更精准
   - 在 msg_storage 函数中,增加 logger,保留所有响应消息
   - 删除 heartbeat 函数中的日志保存功能部分
   - 心跳再次修改为 2s...
3. [t_change_ui: aio.py]
   - 增加了日志初始化部分
   - detect_network 函数中修改重新实例化HR间隔为 4s,对应心跳
4. [t_change_ui: do_brake.py]
   - 使用一直打开曲线的方法规避解决了 OOM 的问题,同时修改数据处理方式,只取最后 12s
5. [t_change_ui: do_current.py]
   - 保持电流,只取最后 15s
6. [t_change_ui: all the part]: 引入 commons 包,并定制了 logging 输出,后续持续优化
2024-07-31 08:05:36 +08:00

284 lines
12 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

# coding: utf-8
from os.path import isfile
from sys import argv
from openpyxl import load_workbook
from time import time, sleep, strftime, localtime
from threading import Thread
from pandas import read_csv
from logging import getLogger
from commons import clibs
logger = getLogger(__file__)
def check_files(path, raw_data_dirs, result_files, w2t):
# 功能:检查数据文件以及结果文件的合规性
# 参数:数据文件夹,结果文件
# 返回值:-
if len(result_files) != 4:
for result_file in result_files:
w2t(result_file)
w2t("需要有四个文件,包括三个结果文件,以及一个配置文件,请确认!", 0, 2, 'red')
for result_file in result_files:
if result_file.endswith('configs.xlsx'):
result_files.remove(result_file)
break
else:
w2t("未找到配置文件,请确认!", 0, 8, 'red')
prefix = []
for result_file in result_files:
prefix.append(result_file.split('\\')[-1].split('_')[0])
if not sorted(prefix) == sorted(['reach33', 'reach66', 'reach100']):
msg = f"""请关闭所有相关数据文件,并检查工作目录 {path} 下,有且只允许有类似如下三个文件:
1. reach33_XXX制动性能测试.xlsx
2. reach66_XXX制动性能测试.xlsx
3. reach100_XX制动性能测试.xlsx"""
w2t(msg, 0, 3, 'red')
for raw_data_dir in raw_data_dirs:
components = raw_data_dir.split('\\')[-1].split('_')
sorted(components)
if components[0] not in ['reach33', 'reach66', 'reach100'] or \
components[1] not in ['load33', 'load66', 'load100'] or \
components[2] not in ['speed33', 'speed66', 'speed100']:
msg = f"""报错信息:数据目录 {raw_data_dir} 命名不合规,请参考如下形式:
命名规则reachAA_loadBB_speedCC
规则解释AA/BB/CC 指的是臂展/负载/速度的比例例如reach66_load100_speed3366%臂展100%负载以及33%速度情况下的测试结果文件夹"""
w2t(msg, 0, 4, 'red')
_, raw_data_files = clibs.traversal_files(raw_data_dir, w2t)
if len(raw_data_files) != 3:
msg = f"数据目录 {raw_data_dir} 下数据文件个数错误,每个数据目录下有且只能有三个以 .data 为后缀的数据文件"
w2t(msg, 0, 5, 'red')
for raw_data_file in raw_data_files:
if not raw_data_file.split('\\')[-1].endswith('.data'):
msg = f"数据文件 {raw_data_file} 后缀错误,每个数据目录下有且只能有三个以 .data 为后缀的数据文件"
w2t(msg, 0, 6, 'red')
w2t("数据目录合规性检查结束,未发现问题......")
def get_configs(configfile, w2t):
axis = configfile.split('\\')[-2][-1]
if axis not in ['1', '2', '3']:
w2t("被处理的根文件夹命名必须是 [Jj][123] 的格式", 0, 9, 'red')
else:
axis = int(axis)
_wb = load_workbook(configfile, read_only=True)
_ws = _wb['Target']
rr = float(_ws.cell(row=2, column=axis+1).value)
av = float(_ws.cell(row=3, column=axis+1).value)
return av, rr
def now_doing_msg(docs, flag, w2t):
# 功能:输出正在处理的文件或目录
# 参数文件或目录start 或 done 标识
# 返回值:-
now = strftime('%Y-%m-%d %H:%M:%S', localtime(time()))
file_type = 'file' if isfile(docs) else 'dir'
if flag == 'start' and file_type == 'dir':
w2t(f"[{now}] 正在处理目录 {docs} 中的数据......")
elif flag == 'start' and file_type == 'file':
w2t(f"[{now}] 正在处理文件 {docs} 中的数据......")
elif flag == 'done' and file_type == 'dir':
w2t(f"[{now}] 目录 {docs} 数据文件已处理完毕")
elif flag == 'done' and file_type == 'file':
w2t(f"[{now}] 文件 {docs} 数据已处理完毕")
def w2t_local(msg, wait, w2t):
while True:
global stop
if stop == 0 and wait != 0:
sleep(1)
w2t(msg, wait, 0, 'orange')
else:
break
def copy_data_to_result(df, ws_result, row_start, row_end, vel, trq, estop):
# 功能:将数据文件中有效数据拷贝至结果文件对应的 sheet
# 参数:如上
# 返回值:-
# 结果文件数据清零
data = []
for _row in range(row_start, row_end + 1):
data.append(df.iloc[_row, vel-1])
data.append(df.iloc[_row, trq-1])
data.append(df.iloc[_row, estop-1])
i = 0
row_max = 2000 if row_end-row_start < 2000 else row_end-row_start+20
for _row in range(2, row_max):
try:
ws_result.cell(row=_row, column=1).value = data[i]
ws_result.cell(row=_row, column=2).value = data[i+1]
ws_result.cell(row=_row, column=3).value = data[i+2]
i += 3
except:
ws_result.cell(row=_row, column=1).value = None
ws_result.cell(row=_row, column=2).value = None
ws_result.cell(row=_row, column=3).value = None
def find_row_start(data_file, df, conditions, av, rr, vel, estop, w2t):
# 功能:查找数据文件中有效数据的行号,也即最后一个速度下降的点位
# 参数:如上
# 返回值:速度下降点位,最后的数据点位
ratio = float(conditions[2].removeprefix('speed'))/100
av_max = av * ratio
row_max = df.index[-1]
threshold = 0.95
for _row in range(row_max, -1, -1):
if df.iloc[_row, estop-1] != 0:
row_start = _row-20 if _row-20 > 0 else 0
break
else:
w2t(f"数据文件 {data_file} 采集的数据中没有 ESTOP 为非 0 的情况,需要确认", 0, 9, 'red')
for _row in range(row_start, row_max):
speed_row = (df.iloc[_row, vel-1] * 180) / 3.1415926 * rr * 60 / 360
if abs(speed_row) < 1:
row_end = _row+100 if _row+100 <= row_max else row_max
break
else:
w2t(f"数据文件 {data_file} 最后的速度未降为零 ", 0, 10, 'red')
av_estop = abs((df.iloc[row_start-10:row_start+10, vel-1].abs().mean() * 180) / 3.1415926)
if abs(av_estop/av_max) < threshold:
filename = data_file.split('\\')[-1]
w2t(f"[av_estop: {av_estop:.2f} | shouldbe: {av_max:.2f}] 数据文件 {filename} 触发 ESTOP 时未采集到指定百分比的最大速度,需要检查", 0, 0, '#8A2BE2')
return row_start, row_end
def find_result_sheet_name(conditions, count):
# 功能获取结果文件准确的sheet页名称
# 参数:臂展和速度的列表
# 返回值结果文件对应的sheet name
# 33%负载_33%速度_1 - ['loadxx', 'reachxx', 'speedxx']
load = conditions[0].removeprefix('load')
speed = conditions[2].removeprefix('speed')
result_sheet_name = f"{load}%负载_{speed}%速度_{count}"
return result_sheet_name
def single_file_process(data_file, wb_result, count, av, rr, vel, trq, estop, w2t):
# 功能:完成单个数据文件的处理
# 参数:如上
# 返回值:-
df = read_csv(data_file, sep='\t')
conditions = sorted(data_file.split('\\')[-2].split('_')) # ['loadxx', 'reachxx', 'speedxx']
result_sheet_name = find_result_sheet_name(conditions, count)
ws_result = wb_result[result_sheet_name]
row_start, row_end = find_row_start(data_file, df, conditions, av, rr, vel, estop, w2t)
copy_data_to_result(df, ws_result, row_start, row_end, vel, trq, estop)
def data_process(result_file, raw_data_dirs, av, rr, vel, trq, estop, w2t):
# 功能:完成一个结果文件的数据处理
# 参数:结果文件,数据目录,以及预读取的参数
# 返回值:-
file_name = result_file.split('\\')[-1]
w2t(f"正在打开文件 {file_name} 需要 1min 左右", 1, 0, 'orange')
global stop
stop = 0
t_excel = clibs.GetThreadResult(load_workbook, args=(result_file, ))
t_wait = Thread(target=w2t_local, args=('.', 1, w2t))
t_excel.start()
t_wait.start()
t_excel.join()
wb_result = t_excel.get_result()
stop = 1
sleep(1.1)
w2t('')
prefix = result_file.split('\\')[-1].split('_')[0]
for raw_data_dir in raw_data_dirs:
if raw_data_dir.split('\\')[-1].split('_')[0] == prefix:
now_doing_msg(raw_data_dir, 'start', w2t)
_, data_files = clibs.traversal_files(raw_data_dir, w2t)
# 数据文件串行处理模式---------------------------------
# count = 1
# for data_file in data_files:
# now_doing_msg(data_file, 'start', w2t)
# single_file_process(data_file, wb_result, count, av, rr, vel, trq, estop, w2t)
# count += 1
# now_doing_msg(data_file, 'done', w2t)
# ---------------------------------------------------
# 数据文件并行处理模式---------------------------------
threads = [
Thread(target=single_file_process, args=(data_files[0], wb_result, 1, av, rr, vel, trq, estop, w2t)),
Thread(target=single_file_process, args=(data_files[1], wb_result, 2, av, rr, vel, trq, estop, w2t)),
Thread(target=single_file_process, args=(data_files[2], wb_result, 3, av, rr, vel, trq, estop, w2t))
]
[t.start() for t in threads]
[t.join() for t in threads]
# ---------------------------------------------------
now_doing_msg(raw_data_dir, 'done', w2t)
now_doing_msg(result_file, 'done', w2t)
w2t(f"正在保存文件 {file_name} 需要 1min 左右", 1, 0, 'orange')
stop = 0
t_excel = Thread(target=wb_result.save, args=(result_file, ))
t_wait = Thread(target=w2t_local, args=('.', 1, w2t))
t_excel.start()
t_wait.start()
t_excel.join()
stop = 1
sleep(1.1)
w2t('\n')
def main(path, vel, trq, estop, w2t):
# 功能:执行处理所有数据文件
# 参数initialization函数的返回值
# 返回值:-
time_start = time()
raw_data_dirs, result_files = clibs.traversal_files(path, w2t)
try:
# threads = []
check_files(path, raw_data_dirs, result_files, w2t)
av, rr = get_configs(path + '\\configs.xlsx', w2t)
prefix = []
for raw_data_dir in raw_data_dirs:
prefix.append(raw_data_dir.split('\\')[-1].split("_")[0])
for result_file in result_files:
if result_file.split('\\')[-1].split('_')[0] not in set(prefix):
continue
else:
now_doing_msg(result_file, 'start', w2t)
data_process(result_file, raw_data_dirs, av, rr, vel, trq, estop, w2t)
# threads.append(Thread(target=data_process, args=(result_file, raw_data_dirs, av, rr, vel, trq, estop, w2t)))
# [t.start() for t in threads]
# [t.join() for t in threads]
except Exception as Err:
msg = f"出现错误:{Err}\n程序运行错误,请检查配置文件是否准确设定,以及数据文件组织是否正确,也有可能是结果文件损坏,尝试重新复制一份,再运行!"
w2t(msg, 0, 11, 'red')
w2t("----------------------------------------------------------")
w2t("全部处理完毕")
time_end = time()
time_total = time_end - time_start
msg = f"数据处理时间:{time_total // 3600:02.0f} h {time_total % 3600 // 60:02.0f} m {time_total % 60:02.0f} s"
w2t(msg)
if __name__ == "__main__":
stop = 0
main(*argv[1:])