制动性能测试和数据处理已完成

This commit is contained in:
2025-01-16 19:28:57 +08:00
parent 2413d6d305
commit 26f01635df
55 changed files with 1588 additions and 1251 deletions

View File

@ -1,13 +1,14 @@
import openpyxl
import json
import os.path
import time
import pandas
import threading
import openpyxl
import re
from common import clibs
def check_files(path, rawdata_dirs, result_files, w2t):
def check_files(rawdata_dirs, result_files, w2t):
msg_wrong = "需要有四个文件和若干个数据文件夹,可参考如下确认:\n"
msg_wrong += "1. reach33_XXXXXXX.xlsx\n2. reach66_XXXXXXX.xlsx\n3. reach100_XXXXXXX.xlsx\n4. *.cfg\n"
msg_wrong += "- reach33_load33_speed33\nreach33_load33_speed66\n......\nreach100_load100_speed66\nreach100_load100_speed100\n"
@ -33,8 +34,10 @@ def check_files(path, rawdata_dirs, result_files, w2t):
reach_s = ['reach33', 'reach66', 'reach100']
load_s = ['load33', 'load66', 'load100']
speed_s = ['speed33', 'speed66', 'speed100']
prefix = []
for rawdata_dir in rawdata_dirs:
components = rawdata_dir.split("/")[-1].split('_') # reach_load_speed
prefix.append(components[0])
if components[0] not in reach_s or components[1] not in load_s or components[2] not in speed_s:
msg = f"报错信息:数据目录 {rawdata_dir} 命名不合规,请参考如下形式\n"
msg += "命名规则reachAA_loadBB_speedCCAA/BB/CC 指的是臂展/负载/速度的比例\n"
@ -47,161 +50,139 @@ def check_files(path, rawdata_dirs, result_files, w2t):
w2t(msg, "red", "WrongDataFile")
for rawdata_file in rawdata_files:
if not rawdata_file.endswith(".data"):
msg = f"数据文件 {rawdata_file} 后缀错误,每个数据目录下有且只能有三个以 .data 为后缀的数据文件"
msg = f"数据文件 {rawdata_file} 后缀错误,每个数据目录下有且只能有三个以 .data 为后缀的数据文件\n"
w2t(msg, "red", "WrongDataFile")
w2t("数据目录合规性检查结束,未发现问题......")
result_files = []
for _ in [reach33_file, reach66_file, reach100_file]:
if _.split("/")[-1].split("_")[0] in set(prefix):
result_files.append(_)
w2t("数据目录合规性检查结束,未发现问题......\n")
return config_file, result_files
def get_configs(configfile, w2t):
axis = configfile.split('\\')[-2][-1]
if axis not in ['1', '2', '3']:
w2t("被处理的根文件夹命名必须是 [Jj][123] 的格式", 0, 9, 'red')
else:
axis = int(axis)
def get_configs(config_file, w2t):
try:
with open(config_file, mode="r", encoding="utf-8") as f_config:
configs = json.load(f_config)
except Exception as Err:
clibs.insert_logdb("ERROR", "current", f"get_config: 无法打开 {config_file},获取配置文件参数错误 {Err}")
w2t(f"无法打开 {config_file}", color="red", desc="OpenFileError")
_wb = load_workbook(configfile, read_only=True)
_ws = _wb['Target']
rr = float(_ws.cell(row=2, column=axis + 1).value)
av = float(_ws.cell(row=3, column=axis + 1).value)
p_dir = config_file.split('/')[-2]
if not re.match("^[jJ][123]$", p_dir):
w2t("被处理的根文件夹命名必须是 [Jj][123] 的格式", "red", "DirNameError")
axis = int(p_dir[-1])
rrs = [abs(_) for _ in configs["TRANSMISSION"]["REDUCTION_RATIO_NUMERATOR"]] # 减速比rr for reduction ratio
avs = configs["MOTION"]["JOINT_MAX_SPEED"]
rr = rrs[axis-1]
av = avs[axis-1]
return av, rr
def now_doing_msg(docs, flag, w2t):
# 功能:输出正在处理的文件或目录
# 参数文件或目录start 或 done 标识
# 返回值:-
now = strftime('%Y-%m-%d %H:%M:%S', localtime(time()))
file_type = 'file' if isfile(docs) else 'dir'
now = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
file_type = 'file' if os.path.isfile(docs) else 'dir'
if flag == 'start' and file_type == 'dir':
w2t(f"[{now}] 正在处理目录 {docs} 中的数据......")
w2t(f"[{now}] 正在处理目录 {docs} 中的数据......\n")
elif flag == 'start' and file_type == 'file':
w2t(f"[{now}] 正在处理文件 {docs} 中的数据......")
w2t(f"[{now}] 正在处理文件 {docs} 中的数据......\n")
elif flag == 'done' and file_type == 'dir':
w2t(f"[{now}] 目录 {docs} 数据文件已处理完毕")
w2t(f"[{now}] 目录 {docs} 数据文件已处理完毕\n")
elif flag == 'done' and file_type == 'file':
w2t(f"[{now}] 文件 {docs} 数据已处理完毕")
w2t(f"[{now}] 文件 {docs} 数据已处理完毕\n")
def w2t_local(msg, wait, w2t):
while True:
global stop
if stop == 0 and wait != 0:
sleep(1)
w2t(msg, wait, 0, 'orange')
else:
break
def copy_data_to_result(df, ws_result, row_start, row_end, vel, trq, estop):
# 功能:将数据文件中有效数据拷贝至结果文件对应的 sheet
# 参数:如上
# 返回值:-
# 结果文件数据清零
def data2result(df, ws_result, row_start, row_end, vel, trq, estop):
data = []
for _row in range(row_start, row_end + 1):
data.append(df.iloc[_row, vel - 1])
data.append(df.iloc[_row, trq - 1])
data.append(df.iloc[_row, estop - 1])
for row in range(row_start, row_end):
data.append(df.iloc[row, vel - 1])
data.append(df.iloc[row, trq - 1])
data.append(df.iloc[row, estop - 1])
i = 0
row_max = 2000 if row_end - row_start < 2000 else row_end - row_start + 20
for _row in range(2, row_max):
row_max = 1000 if row_end - row_start < 1000 else row_end - row_start + 100
for row in range(2, row_max):
try:
ws_result.cell(row=_row, column=1).value = data[i]
ws_result.cell(row=_row, column=2).value = data[i + 1]
ws_result.cell(row=_row, column=3).value = data[i + 2]
ws_result.cell(row=row, column=1).value = data[i]
ws_result.cell(row=row, column=2).value = data[i + 1]
ws_result.cell(row=row, column=3).value = data[i + 2]
i += 3
except:
ws_result.cell(row=_row, column=1).value = None
ws_result.cell(row=_row, column=2).value = None
ws_result.cell(row=_row, column=3).value = None
except Exception:
ws_result.cell(row=row, column=1).value = None
ws_result.cell(row=row, column=2).value = None
ws_result.cell(row=row, column=3).value = None
def find_row_start(data_file, df, conditions, av, rr, vel, estop, w2t):
# 功能:查找数据文件中有效数据的行号,也即最后一个速度下降的点位
# 参数:如上
# 返回值:速度下降点位,最后的数据点位
def get_row_range(data_file, df, conditions, av, rr, vel, estop, w2t):
row_start, row_end = 0, 0
ratio = float(conditions[2].removeprefix('speed')) / 100
av_max = av * ratio
row_max = df.index[-1]
threshold = 0.95
for _row in range(row_max, -1, -1):
if df.iloc[_row, estop - 1] != 0:
row_start = _row - 20 if _row - 20 > 0 else 0
for row in range(df.index[-1] - 1, -1, -10):
if df.iloc[row, estop - 1] != 0:
row_start = row - 20 if row - 20 > 0 else 0 # 急停前找 20 个点
break
else:
w2t(f"数据文件 {data_file} 采集的数据中没有 ESTOP 为非 0 的情况,需要确认", 0, 9, 'red')
w2t(f"数据文件 {data_file} 采集的数据中没有 ESTOP 为非 0 的情况,需要确认\n", "red", "StartNotFoundError")
for _row in range(row_start, row_max):
speed_row = (df.iloc[_row, vel - 1] * 180) / 3.1415926 * rr * 60 / 360
for row in range(row_start, df.index[-1] - 1, 10):
speed_row = df.iloc[row, vel - 1] * clibs.RADIAN * rr * 60 / 360
if abs(speed_row) < 1:
row_end = _row + 100 if _row + 100 <= row_max else row_max
row_end = row + 100 if row + 100 <= df.index[-1] - 1 else df.index[-1] - 1
break
else:
w2t(f"数据文件 {data_file} 最后的速度未降为零 ", 0, 10, 'red')
w2t(f"数据文件 {data_file} 最后的速度未降为零\n", "red", "SpeedNotZeroError")
av_estop = abs((df.iloc[row_start - 10:row_start + 10, vel - 1].abs().mean() * 180) / 3.1415926)
av_estop = abs(df.iloc[row_start - 20:row_start, vel - 1].abs().mean() * clibs.RADIAN)
if abs(av_estop / av_max) < threshold:
filename = data_file.split('\\')[-1]
w2t(f"[av_estop: {av_estop:.2f} | shouldbe: {av_max:.2f}] 数据文件 {filename} 触发 ESTOP 时未采集到指定百分比的最大速度,需要检查", 0, 0, '#8A2BE2')
filename = data_file.split("/")[-1]
w2t(f"[av_estop: {av_estop:.2f} | shouldbe: {av_max:.2f}] 数据文件 {filename} 触发 ESTOP 时未采集到指定百分比的最大速度,需要检查\n", "#8A2BE2")
return row_start, row_end
def find_result_sheet_name(conditions, count):
# 功能获取结果文件准确的sheet页名称
# 参数:臂展和速度的列表
# 返回值结果文件对应的sheet name
# 33%负载_33%速度_1 - ['loadxx', 'reachxx', 'speedxx']
load = conditions[0].removeprefix('load')
def get_shtname(conditions, count):
# 33%负载_33%速度_1 - reach/load/speed
load = conditions[1].removeprefix('load')
speed = conditions[2].removeprefix('speed')
result_sheet_name = f"{load}%负载_{speed}%速度_{count}"
return result_sheet_name
def single_file_process(data_file, wb_result, count, av, rr, vel, trq, estop, w2t):
# 功能:完成单个数据文件的处理
# 参数:如上
# 返回值:-
df = read_csv(data_file, sep='\t')
def single_file_process(data_file, wb, count, av, rr, vel, trq, estop, w2t):
df = pandas.read_csv(data_file, sep='\t')
conditions = data_file.split("/")[-2].split("_") # reach/load/speed
shtname = get_shtname(conditions, count)
ws = wb[shtname]
conditions = sorted(data_file.split('\\')[-2].split('_')) # ['loadxx', 'reachxx', 'speedxx']
result_sheet_name = find_result_sheet_name(conditions, count)
ws_result = wb_result[result_sheet_name]
row_start, row_end = find_row_start(data_file, df, conditions, av, rr, vel, estop, w2t)
copy_data_to_result(df, ws_result, row_start, row_end, vel, trq, estop)
row_start, row_end = get_row_range(data_file, df, conditions, av, rr, vel, estop, w2t)
data2result(df, ws, row_start, row_end, vel, trq, estop)
def data_process(result_file, raw_data_dirs, av, rr, vel, trq, estop, w2t):
# 功能:完成一个结果文件的数据处理
# 参数:结果文件,数据目录,以及预读取的参数
# 返回值:-
file_name = result_file.split('\\')[-1]
w2t(f"正在打开文件 {file_name} 需要 1min 左右", 1, 0, 'orange')
def data_process(result_file, rawdata_dirs, av, rr, vel, trq, estop, w2t):
filename = result_file.split("/")[-1]
global stop
stop = 0
t_excel = clibs.GetThreadResult(load_workbook, args=(result_file,))
t_wait = Thread(target=w2t_local, args=('.', 1, w2t))
clibs.stop = True
w2t(f"正在打开文件 {filename} 需要 1min 左右......\n", "blue")
t_excel = clibs.GetThreadResult(openpyxl.load_workbook, args=(result_file, ))
t_excel.daemon = True
t_excel.start()
t_wait.start()
t_excel.join()
wb_result = t_excel.get_result()
stop = 1
sleep(1.1)
w2t('')
t_progress = threading.Thread(target=clibs.tl_prg, args=("Processing......", ))
t_progress.daemon = True
t_progress.start()
wb = t_excel.get_result()
prefix = result_file.split('\\')[-1].split('_')[0]
for raw_data_dir in raw_data_dirs:
if raw_data_dir.split('\\')[-1].split('_')[0] == prefix:
now_doing_msg(raw_data_dir, 'start', w2t)
_, data_files = clibs.traversal_files(raw_data_dir, w2t)
prefix = filename.split('_')[0]
for rawdata_dir in rawdata_dirs:
if rawdata_dir.split("/")[-1].split('_')[0] == prefix:
now_doing_msg(rawdata_dir, 'start', w2t)
_, data_files = clibs.traversal_files(rawdata_dir, w2t)
# 数据文件串行处理模式---------------------------------
# count = 1
# for data_file in data_files:
@ -212,59 +193,41 @@ def data_process(result_file, raw_data_dirs, av, rr, vel, trq, estop, w2t):
# ---------------------------------------------------
# 数据文件并行处理模式---------------------------------
threads = [
Thread(target=single_file_process, args=(data_files[0], wb_result, 1, av, rr, vel, trq, estop, w2t)),
Thread(target=single_file_process, args=(data_files[1], wb_result, 2, av, rr, vel, trq, estop, w2t)),
Thread(target=single_file_process, args=(data_files[2], wb_result, 3, av, rr, vel, trq, estop, w2t))
threading.Thread(target=single_file_process, args=(data_files[0], wb, 1, av, rr, vel, trq, estop, w2t)),
threading.Thread(target=single_file_process, args=(data_files[1], wb, 2, av, rr, vel, trq, estop, w2t)),
threading.Thread(target=single_file_process, args=(data_files[2], wb, 3, av, rr, vel, trq, estop, w2t))
]
[t.start() for t in threads]
[t.join() for t in threads]
# ---------------------------------------------------
now_doing_msg(raw_data_dir, 'done', w2t)
now_doing_msg(rawdata_dir, 'done', w2t)
now_doing_msg(result_file, 'done', w2t)
w2t(f"正在保存文件 {file_name} 需要 1min 左右", 1, 0, 'orange')
stop = 0
t_excel = Thread(target=wb_result.save, args=(result_file,))
t_wait = Thread(target=w2t_local, args=('.', 1, w2t))
w2t(f"正在保存文件 {filename} 需要 1min 左右......\n\n", "blue")
t_excel = threading.Thread(target=wb.save, args=(result_file, ))
t_excel.daemon = True
t_excel.start()
t_wait.start()
t_excel.join()
stop = 1
sleep(1.1)
w2t('\n')
wb.close()
clibs.stop = False
t_progress.join()
def main():
# path, vel, trq, estop, w2t
time_start = time.time()
path = clibs.data_dp["_path"]
vel = int(clibs.data_dp["_vel"])
trq = int(clibs.data_dp["_trq"])
estop = int(clibs.data_dp["_estop"])
w2t = clibs.w2t
insert_logdb = clibs.insert_logdb
time_start = time.time()
rawdata_dirs, result_files = clibs.traversal_files(path, w2t)
config_file, result_files = check_files(rawdata_dirs, result_files, w2t)
av, rr = get_configs(config_file, w2t)
# threads = []
check_files(path, rawdata_dirs, result_files, w2t)
# av, rr = get_configs(path + '\\configs.xlsx', w2t)
#
# prefix = []
# for raw_data_dir in rawdata_dirs:
# prefix.append(raw_data_dir.split('\\')[-1].split("_")[0])
#
# for result_file in result_files:
# if result_file.split('\\')[-1].split('_')[0] not in set(prefix):
# continue
# else:
# now_doing_msg(result_file, 'start', w2t)
# data_process(result_file, raw_data_dirs, av, rr, vel, trq, estop, w2t)
# # threads.append(Thread(target=data_process, args=(result_file, raw_data_dirs, av, rr, vel, trq, estop, w2t)))
# # [t.start() for t in threads]
# # [t.join() for t in threads]
for result_file in result_files:
data_process(result_file, rawdata_dirs, av, rr, vel, trq, estop, w2t)
w2t("----------------------------------------------------------\n")
w2t("全部处理完毕\n")
w2t("-"*60 + "\n全部处理完毕\n")
time_end = time.time()
time_total = time_end - time_start
msg = f"数据处理时间:{time_total // 3600:02.0f} h {time_total % 3600 // 60:02.0f} m {time_total % 60:02.0f} s\n"

View File

@ -4,6 +4,7 @@ import openpyxl
import pandas
import re
import csv
import time
from common import clibs
@ -161,12 +162,10 @@ def current_cycle(data_files, vel, trq, trqh, sensor, rrs, rcs, params, w2t, ins
t_excel.daemon = True
t_excel.start()
t_excel.join()
wb.close()
clibs.stop = False
t_progress.join()
w2t("----------------------------------------------------------\n")
w2t("全部处理完毕")
def find_point(data_file, df, flag, row_s, row_e, threshold, step, end_point, skip_scale, axis, seq, w2t, insert_logdb):
if flag == "lt":
@ -255,7 +254,7 @@ def p_single(wb, single, vel, trq, sensor, rrs, w2t, insert_logdb):
row_e = df.index[-1]
row_s = row_e - end_point
speed_avg = df.iloc[row_s:row_e].abs().mean()
if speed_avg < 2:
if speed_avg < threshold:
# 第一次过滤:消除速度为零的数据,找到速度即将大于零的上升临界点
row_s, row_e = find_point(data_file, df, "lt", row_s, row_e, threshold, step, end_point, skip_scale, axis, "pre-1", w2t, insert_logdb)
row_e -= end_point*skip_scale
@ -281,7 +280,7 @@ def p_single(wb, single, vel, trq, sensor, rrs, w2t, insert_logdb):
# 正式第三次采集:消除速度大于零的数据,找到速度即将趋近于零的下降临界点
row_s, row_e = find_point(data_file, df, "gt", row_s, row_e, threshold, step, end_point, skip_scale, axis, 3, w2t, insert_logdb)
row_start = get_row_number(threshold, "start", df, row_s, row_e, axis, insert_logdb)
elif speed_avg > 2:
elif speed_avg > threshold:
# 第一次过滤:消除速度大于零的数据,找到速度即将趋近于零的下降临界点
row_s, row_e = find_point(data_file, df, "gt", row_s, row_e, threshold, step, end_point, skip_scale, axis, "pre-1", w2t, insert_logdb)
row_e -= end_point*skip_scale
@ -409,6 +408,7 @@ def get_configs(config_file, w2t, insert_logdb):
def main():
time_start = time.time()
sub = clibs.data_dp["_sub"]
path = clibs.data_dp["_path"]
vel = int(clibs.data_dp["_vel"])
@ -429,6 +429,12 @@ def main():
elif sub == "cycle":
current_cycle(data_files, vel, trq, trqh, sensor, rrs, rcs, params, w2t, insert_logdb)
w2t("-"*60 + "\n全部处理完毕\n")
time_end = time.time()
time_total = time_end - time_start
msg = f"数据处理时间:{time_total // 3600:02.0f} h {time_total % 3600 // 60:02.0f} m {time_total % 60:02.0f} s\n"
w2t(msg)
if __name__ == '__main__':
main()