basically done
This commit is contained in:
0
codes/analysis/__init__.py
Normal file
0
codes/analysis/__init__.py
Normal file
227
codes/analysis/brake.py
Normal file
227
codes/analysis/brake.py
Normal file
@ -0,0 +1,227 @@
|
||||
import json
|
||||
import os.path
|
||||
import time
|
||||
import pandas
|
||||
from PySide6.QtCore import Signal, QThread
|
||||
import openpyxl
|
||||
import re
|
||||
from codes.common import clibs
|
||||
|
||||
|
||||
class BrakeDataProcess(QThread):
|
||||
output = Signal(str, str)
|
||||
|
||||
def __init__(self, dir_path, /):
|
||||
super().__init__()
|
||||
self.dir_path = dir_path
|
||||
self.idx = 0
|
||||
|
||||
def logger(self, level, module, content, color="black", error="", flag="both"):
|
||||
flag = "cursor" if level.upper() == "DEBUG" else "both"
|
||||
clibs.logger(level, module, content, color, flag, signal=self.output)
|
||||
if level.upper() == "ERROR":
|
||||
raise Exception(f"{error} | {content}")
|
||||
|
||||
@clibs.handle_exception
|
||||
def check_files(self, rawdata_dirs, result_files):
|
||||
msg_wrong = "需要有四个文件和若干个数据文件夹,可参考如下确认:<br>"
|
||||
msg_wrong += "- reach33/66/100_XXXXXXX.xlsx<br>- *.cfg<br>"
|
||||
msg_wrong += "- reach33_load33_speed33<br>- reach33_load33_speed66<br>...<br>- reach100_load100_speed66<br>- reach100_load100_speed100<br>"
|
||||
|
||||
if len(result_files) != 4 or len(rawdata_dirs) == 0:
|
||||
self.logger("ERROR", "brake", msg_wrong, "red", "InitFileError")
|
||||
|
||||
config_file, reach33_file, reach66_file, reach100_file = None, None, None, None
|
||||
for result_file in result_files:
|
||||
filename = result_file.split("/")[-1]
|
||||
if re.match(".*\\.cfg", filename):
|
||||
config_file = result_file
|
||||
elif filename.startswith("reach33_") and filename.endswith(".xlsx"):
|
||||
reach33_file = result_file
|
||||
elif filename.startswith("reach66_") and filename.endswith(".xlsx"):
|
||||
reach66_file = result_file
|
||||
elif filename.startswith("reach100_") and filename.endswith(".xlsx"):
|
||||
reach100_file = result_file
|
||||
else:
|
||||
if not (config_file and reach33_file and reach66_file and reach100_file):
|
||||
self.logger("ERROR", "brake", msg_wrong, "red", "InitFileError")
|
||||
|
||||
reach_s = ['reach33', 'reach66', 'reach100']
|
||||
load_s = ['load33', 'load66', 'load100']
|
||||
speed_s = ['speed33', 'speed66', 'speed100']
|
||||
prefix = []
|
||||
for rawdata_dir in rawdata_dirs:
|
||||
components = rawdata_dir.split("/")[-1].split('_') # reach_load_speed
|
||||
prefix.append(components[0])
|
||||
if components[0] not in reach_s or components[1] not in load_s or components[2] not in speed_s:
|
||||
msg = f"报错信息:数据目录 {rawdata_dir} 命名不合规,请参考如下形式<br>"
|
||||
msg += "命名规则:reachAA_loadBB_speedCC,AA/BB/CC 指的是臂展/负载/速度的比例<br>"
|
||||
msg += "规则解释:reach66_load100_speed33,表示 66% 臂展,100% 负载以及 33% 速度情况下的测试结果文件夹<br>"
|
||||
self.logger("ERROR", "brake", msg, "red", "WrongDataFolder")
|
||||
|
||||
_, rawdata_files = clibs.traversal_files(rawdata_dir, self.output)
|
||||
if len(rawdata_files) != 3:
|
||||
msg = f"数据目录 {rawdata_dir} 下数据文件个数错误,每个数据目录下有且只能有三个以 .data 为后缀的数据文件"
|
||||
self.logger("ERROR", "brake", msg, "red", "WrongDataFile")
|
||||
|
||||
for rawdata_file in rawdata_files:
|
||||
if not rawdata_file.endswith(".data"):
|
||||
msg = f"数据文件 {rawdata_file} 后缀错误,每个数据目录下有且只能有三个以 .data 为后缀的数据文件"
|
||||
self.logger("ERROR", "brake", msg, "red", "WrongDataFile")
|
||||
|
||||
result_files = []
|
||||
for _ in [reach33_file, reach66_file, reach100_file]:
|
||||
if _.split("/")[-1].split("_")[0] in set(prefix):
|
||||
result_files.append(_)
|
||||
|
||||
self.logger("INFO", "brake", "数据目录合规性检查结束,未发现问题......", "green")
|
||||
return config_file, result_files
|
||||
|
||||
@clibs.handle_exception
|
||||
def get_configs(self, config_file):
|
||||
try:
|
||||
with open(config_file, mode="r", encoding="utf-8") as f_config:
|
||||
configs = json.load(f_config)
|
||||
|
||||
p_dir = config_file.split('/')[-2]
|
||||
if not re.match("^[jJ][123]$", p_dir):
|
||||
self.logger("ERROR", "brake", "被处理的根文件夹命名必须是 [Jj][123] 的格式", "red", "DirNameError")
|
||||
|
||||
axis = int(p_dir[-1]) # 要处理的轴
|
||||
rrs = [abs(_) for _ in configs["TRANSMISSION"]["REDUCTION_RATIO_NUMERATOR"]] # 减速比,rr for reduction ratio
|
||||
avs = configs["MOTION"]["JOINT_MAX_SPEED"]
|
||||
rr = rrs[axis-1]
|
||||
av = avs[axis-1]
|
||||
return av, rr
|
||||
except Exception as Err:
|
||||
self.logger("ERROR", "brake", f"无法打开 {config_file},或者使用了错误的机型配置文件,需检查<br>{Err}", "red", "OpenFileError")
|
||||
|
||||
def now_doing_msg(self, docs, flag):
|
||||
now = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
|
||||
file_type = 'file' if os.path.isfile(docs) else 'dir'
|
||||
if flag == 'start' and file_type == 'dir':
|
||||
self.logger("INFO", "brake", f"[{now}] 正在处理目录 {docs} 中的数据......")
|
||||
elif flag == 'start' and file_type == 'file':
|
||||
self.logger("INFO", "brake", f"[{now}] 正在处理文件 {docs} 中的数据......")
|
||||
elif flag == 'done' and file_type == 'dir':
|
||||
self.logger("INFO", "brake", f"[{now}] 目录 {docs} 数据文件已处理完毕")
|
||||
elif flag == 'done' and file_type == 'file':
|
||||
self.logger("INFO", "brake", f"[{now}] 文件 {docs} 数据已处理完毕")
|
||||
|
||||
@staticmethod
|
||||
@clibs.handle_exception
|
||||
def data2result(df, ws_result, row_start, row_end):
|
||||
data = []
|
||||
for row in range(row_start, row_end):
|
||||
data.append(df.iloc[row, 0])
|
||||
data.append(df.iloc[row, 1])
|
||||
data.append(df.iloc[row, 2])
|
||||
|
||||
i = 0
|
||||
row_max = 1000 if row_end - row_start < 1000 else row_end - row_start + 100
|
||||
for row in range(2, row_max):
|
||||
try:
|
||||
ws_result.cell(row=row, column=1).value = data[i]
|
||||
ws_result.cell(row=row, column=2).value = data[i + 1]
|
||||
ws_result.cell(row=row, column=3).value = data[i + 2]
|
||||
i += 3
|
||||
except Exception:
|
||||
ws_result.cell(row=row, column=1).value = None
|
||||
ws_result.cell(row=row, column=2).value = None
|
||||
ws_result.cell(row=row, column=3).value = None
|
||||
|
||||
@clibs.handle_exception
|
||||
def get_row_range(self, data_file, df, conditions, av, rr):
|
||||
row_start, row_end = 0, 0
|
||||
ratio = float(conditions[2].removeprefix('speed')) / 100
|
||||
av_max = av * ratio
|
||||
threshold = 0.95
|
||||
|
||||
for row in range(df.index[-1] - 1, -1, -10):
|
||||
if df.iloc[row, 2] != 0:
|
||||
row_start = row - 20 if row - 20 > 0 else 0 # 急停前找 20 个点
|
||||
break
|
||||
else:
|
||||
self.logger("ERROR", "brake", f"数据文件 {data_file} 采集的数据中没有 ESTOP 为非 0 的情况,需要确认", "red", "StartNotFoundError")
|
||||
|
||||
for row in range(row_start, df.index[-1] - 1, 10):
|
||||
speed_row = df.iloc[row, 0] * clibs.RADIAN * rr * 60 / 360
|
||||
if abs(speed_row) < 1:
|
||||
row_end = row + 100 if row + 100 <= df.index[-1] - 1 else df.index[-1] - 1
|
||||
break
|
||||
else:
|
||||
self.logger("ERROR", "brake", f"数据文件 {data_file} 最后的速度未降为零", "red", "SpeedNotZeroError")
|
||||
|
||||
av_estop = abs(df.iloc[row_start - 20:row_start, 0].abs().mean() * clibs.RADIAN)
|
||||
if abs(av_estop / av_max) < threshold:
|
||||
filename = data_file.split("/")[-1]
|
||||
msg = f"[av_estop: {av_estop:.2f} | shouldbe: {av_max:.2f}] 数据文件 {filename} 触发 ESTOP 时未采集到指定百分比的最大速度,需要检查"
|
||||
self.logger("WARNING", "brake", msg, "#8A2BE2")
|
||||
|
||||
return row_start, row_end
|
||||
|
||||
@staticmethod
|
||||
@clibs.handle_exception
|
||||
def get_shtname(conditions, count):
|
||||
# 33%负载_33%速度_1 - reach/load/speed
|
||||
load = conditions[1].removeprefix('load')
|
||||
speed = conditions[2].removeprefix('speed')
|
||||
result_sheet_name = f"{load}%负载_{speed}%速度_{count}"
|
||||
|
||||
return result_sheet_name
|
||||
|
||||
@clibs.handle_exception
|
||||
def single_file_process(self, data_file, wb, count, av, rr):
|
||||
df = pandas.read_csv(data_file, sep='\t')
|
||||
conditions = data_file.split("/")[-2].split("_") # reach/load/speed
|
||||
shtname = self.get_shtname(conditions, count)
|
||||
ws = wb[shtname]
|
||||
|
||||
row_start, row_end = self.get_row_range(data_file, df, conditions, av, rr)
|
||||
self.data2result(df, ws, row_start, row_end)
|
||||
|
||||
@clibs.handle_exception
|
||||
def data_process(self, result_file, rawdata_dirs, av, rr):
|
||||
filename = result_file.split("/")[-1]
|
||||
self.logger("INFO", "brake", f"正在打开文件 {filename},这可能需要一些时间......", "blue")
|
||||
try:
|
||||
wb = openpyxl.load_workbook(result_file)
|
||||
except Exception as Err:
|
||||
self.logger("ERROR", "brake", f"{filename}文件打开失败,可能是文件已损坏,确认后重新执行!<br>{Err}", "red", "CannotOpenFile")
|
||||
|
||||
prefix = filename.split('_')[0]
|
||||
for rawdata_dir in rawdata_dirs:
|
||||
if rawdata_dir.split("/")[-1].split('_')[0] == prefix:
|
||||
self.now_doing_msg(rawdata_dir, 'start')
|
||||
_, data_files = clibs.traversal_files(rawdata_dir, self.output)
|
||||
for idx in range(3):
|
||||
self.single_file_process(data_files[idx], wb, idx+1, av, rr)
|
||||
# threads = [
|
||||
# threading.Thread(target=self.single_file_process, args=(data_files[0], wb, 1, av, rr)),
|
||||
# threading.Thread(target=self.single_file_process, args=(data_files[1], wb, 2, av, rr)),
|
||||
# threading.Thread(target=self.single_file_process, args=(data_files[2], wb, 3, av, rr))
|
||||
# ]
|
||||
# [t.start() for t in threads]
|
||||
# [t.join() for t in threads]
|
||||
self.now_doing_msg(rawdata_dir, 'done')
|
||||
|
||||
self.logger("INFO", "brake", f"正在保存文件 {filename},这可能需要一些时间......<br>", "blue")
|
||||
wb.save(result_file)
|
||||
wb.close()
|
||||
|
||||
@clibs.handle_exception
|
||||
def processing(self):
|
||||
time_start = time.time()
|
||||
clibs.running[self.idx] = 1
|
||||
|
||||
rawdata_dirs, result_files = clibs.traversal_files(self.dir_path, self.output)
|
||||
config_file, result_files = self.check_files(rawdata_dirs, result_files)
|
||||
av, rr = self.get_configs(config_file)
|
||||
|
||||
for result_file in result_files:
|
||||
self.data_process(result_file, rawdata_dirs, av, rr)
|
||||
|
||||
self.logger("INFO", "brake", "-"*60 + "<br>全部处理完毕<br>", "purple")
|
||||
time_total = time.time() - time_start
|
||||
msg = f"处理时间:{time_total // 3600:02.0f} h {time_total % 3600 // 60:02.0f} m {time_total % 60:02.0f} s"
|
||||
self.logger("INFO", "brake", msg)
|
436
codes/analysis/current.py
Normal file
436
codes/analysis/current.py
Normal file
@ -0,0 +1,436 @@
|
||||
import json
|
||||
import openpyxl
|
||||
import pandas
|
||||
import re
|
||||
import csv
|
||||
from PySide6.QtCore import Signal, QThread
|
||||
import time
|
||||
from codes.common import clibs
|
||||
|
||||
|
||||
class CurrentDataProcess(QThread):
|
||||
output = Signal(str, str)
|
||||
|
||||
def __init__(self, dir_path, proc, /):
|
||||
super().__init__()
|
||||
self.dir_path = dir_path
|
||||
self.proc = proc
|
||||
self.idx = 1
|
||||
|
||||
def logger(self, level, module, content, color="black", error="", flag="both"):
|
||||
flag = "cursor" if level.upper() == "DEBUG" else "both"
|
||||
clibs.logger(level, module, content, color, flag, signal=self.output)
|
||||
if level.upper() == "ERROR":
|
||||
raise Exception(f"{error} | {content}")
|
||||
|
||||
@clibs.handle_exception
|
||||
def initialization(self):
|
||||
_, data_files = clibs.traversal_files(self.dir_path, self.output)
|
||||
count, config_file = 0, None
|
||||
for data_file in data_files:
|
||||
filename = data_file.split("/")[-1]
|
||||
if re.match(".*\\.cfg", filename):
|
||||
config_file = data_file
|
||||
count += 1
|
||||
elif filename == "T_电机电流.xlsx":
|
||||
count += 1
|
||||
else:
|
||||
if not re.match("^j[1-7].*\\.data$", filename):
|
||||
msg = f"不合规 {data_file}<br>"
|
||||
msg += "所有数据文件必须以 j[1-7]_ 开头,以 .data 结尾,比如j1_abcdef.data,请检查整改后重新运行"
|
||||
self.logger("ERROR", "current", msg, "red", "FilenameIllegal")
|
||||
|
||||
if count != 2:
|
||||
msg = "需要有一个机型配置文件\"*.cfg\",以及一个数据处理文件\"T_电机电流.xlsx\"表格,请检查整改后重新运行"
|
||||
self.logger("ERROR", "current", msg, "red", "FilenameIllegal")
|
||||
|
||||
return data_files, config_file
|
||||
|
||||
@clibs.handle_exception
|
||||
def current_max(self, data_files, rts):
|
||||
self.logger("INFO", "current", f"正在处理最大转矩值逻辑......")
|
||||
current = {1: [], 2: [], 3: [], 4: [], 5: [], 6: []}
|
||||
for data_file in data_files:
|
||||
if data_file.endswith(".data"):
|
||||
df = pandas.read_csv(data_file, sep="\t")
|
||||
else:
|
||||
continue
|
||||
|
||||
self.logger("DEBUG", "current", f"正在处理 {data_file} ...")
|
||||
cols = len(df.columns)
|
||||
axis = int(data_file.split("/")[-1].split("_")[0].removeprefix("j"))
|
||||
rt = rts[axis-1]
|
||||
self.logger("DEBUG", "current", f"最大列数为 {cols},{axis} 轴的额定转矩为 {rt}")
|
||||
|
||||
col = df.columns.values[clibs.c_servo_trq-1] # 获取 "device_servo_trq_feedback"
|
||||
c_max = df[col].abs().max()
|
||||
|
||||
scale = 1000
|
||||
_ = abs(c_max/scale*rt)
|
||||
current[axis].append(_)
|
||||
self.logger("DEBUG", "current", f"{data_file}: {_:.2f}")
|
||||
self.logger("DEBUG", "current", f"获取到的列名为 {col},最大转矩为 {_}")
|
||||
|
||||
with open(data_file, "a+") as f_data:
|
||||
csv_writer = csv.writer(f_data, delimiter="\t")
|
||||
csv_writer.writerow([""] * (cols-1) + [_])
|
||||
|
||||
for axis, cur in current.items():
|
||||
if not cur:
|
||||
continue
|
||||
else:
|
||||
_ = ""
|
||||
for value in cur:
|
||||
_ += f"{value:.4f} "
|
||||
self.logger("INFO", "current", f"{axis}轴最大转矩数据:{_}")
|
||||
|
||||
self.logger("DEBUG", "current", f"获取最大转矩值结束 current_max = {current}")
|
||||
self.logger("INFO", "current", f"最大转矩数据处理完毕......")
|
||||
return current
|
||||
|
||||
@clibs.handle_exception
|
||||
def current_avg(self, data_files, rts):
|
||||
self.logger("INFO", "current", f"正在处理平均转矩值逻辑......")
|
||||
current = {1: [], 2: [], 3: [], 4: [], 5: [], 6: []}
|
||||
for data_file in data_files:
|
||||
if data_file.endswith(".data"):
|
||||
df = pandas.read_csv(data_file, sep="\t")
|
||||
else:
|
||||
continue
|
||||
|
||||
self.logger("DEBUG", "current", f"正在处理 {data_file} ...")
|
||||
cols = len(df.columns)
|
||||
axis = int(data_file.split("/")[-1].split("_")[0].removeprefix("j"))
|
||||
rt = rts[axis-1]
|
||||
self.logger("DEBUG", "current", f"最大列数为 {cols},{axis} 轴的额定转矩为 {rt}")
|
||||
|
||||
col = df.columns.values[clibs.c_servo_trq-1]
|
||||
c_std = df[col].std()
|
||||
c_avg = df[col].mean()
|
||||
|
||||
scale = 1000
|
||||
_ = (abs(c_avg)+c_std*3)/scale*rt
|
||||
current[axis].append(_)
|
||||
self.logger("DEBUG", "current", f"{data_file}: {_:.2f}")
|
||||
self.logger("DEBUG", "current", f"获取到的列名为 {col},平均转矩为 {_}")
|
||||
|
||||
with open(data_file, "a+") as f_data:
|
||||
csv_writer = csv.writer(f_data, delimiter="\t")
|
||||
csv_writer.writerow([""] * (cols-1) + [_])
|
||||
|
||||
for axis, cur in current.items():
|
||||
if not cur:
|
||||
continue
|
||||
else:
|
||||
_ = ""
|
||||
for value in cur:
|
||||
_ += f"{value:.4f} "
|
||||
self.logger("INFO", "current", f"{axis}轴平均转矩数据:{_}")
|
||||
|
||||
self.logger("DEBUG", "current", f"获取平均转矩值结束 current_avg = {current}")
|
||||
self.logger("INFO", "current", f"平均转矩数据处理完毕......")
|
||||
return current
|
||||
|
||||
@clibs.handle_exception
|
||||
def current_cycle(self, data_files, rrs, rts, params):
|
||||
result, hold, single, scenario, dur_time = None, [], [], [], 0
|
||||
for data_file in data_files:
|
||||
filename = data_file.split("/")[-1]
|
||||
if filename == "T_电机电流.xlsx":
|
||||
result = data_file
|
||||
elif re.match("j[1-7]_hold_.*\\.data", filename):
|
||||
hold.append(data_file)
|
||||
elif re.match("j[1-7]_s_.*\\.data", filename):
|
||||
scenario.append(data_file)
|
||||
dur_time = float(filename.split("_")[3])
|
||||
elif re.match("j[1-7]_.*\\.data", filename):
|
||||
single.append(data_file)
|
||||
|
||||
clibs.stop, filename = True, result.split("/")[-1]
|
||||
self.logger("INFO", "current", f"正在打开文件 {filename},这可能需要一些时间......", "blue")
|
||||
try:
|
||||
wb = openpyxl.load_workbook(result)
|
||||
except Exception as Err:
|
||||
self.logger("ERROR", "current", f"{filename}文件打开失败,可能是文件已损坏,确认后重新执行!<br>{Err}", "red", "CannotOpenFile")
|
||||
|
||||
ws = wb["统计"]
|
||||
for idx in range(len(params)-1):
|
||||
row = idx + 2
|
||||
for col in range(2, 8):
|
||||
ws.cell(row=row, column=col).value = params[idx][col-2]
|
||||
ws.cell(row=1, column=1).value = params[-1]
|
||||
|
||||
if hold:
|
||||
avg = self.current_avg(hold, rts)
|
||||
for axis, cur_value in avg.items():
|
||||
sht_name = f"J{axis}"
|
||||
wb[sht_name]["P4"].value = float(cur_value[0])
|
||||
|
||||
if dur_time == 0:
|
||||
self.p_single(wb, single, rrs)
|
||||
else:
|
||||
self.p_scenario(wb, scenario, rrs, dur_time)
|
||||
|
||||
self.logger("INFO", "current", f"正在保存文件 {filename},这可能需要一些时间......", "blue")
|
||||
wb.save(result)
|
||||
wb.close()
|
||||
|
||||
@clibs.handle_exception
|
||||
def find_point(self, data_file, df, flag, row_s, row_e, threshold, step, end_point, skip_scale, axis, seq):
|
||||
if flag == "lt":
|
||||
while row_e > end_point:
|
||||
speed_avg = df.iloc[row_s:row_e].abs().mean()
|
||||
if speed_avg < threshold:
|
||||
row_e -= step
|
||||
row_s -= step
|
||||
continue
|
||||
else:
|
||||
# one more time,如果连续两次 200 个点的平均值都大于 threshold,说明已经到了临界点了(其实也不一定,只不过相对遇到一次就判定临界点更安全一点点)
|
||||
# 从实际数据看,这开逻辑很小概率能触发到
|
||||
speed_avg = df.iloc[row_s-end_point*skip_scale:row_e-end_point*skip_scale].abs().mean()
|
||||
if speed_avg < threshold:
|
||||
self.logger("WARNING", "current", f"【lt】{axis} 轴第 {seq} 次查找数据可能有异常,row_s = {row_s}, row_e = {row_e}!", "purple")
|
||||
return row_s, row_e
|
||||
else:
|
||||
self.logger("ERROR", "current", f"{data_file} 数据有误,需要检查,无法找到第 {seq} 个有效点......", "red", "AnchorNotFound")
|
||||
elif flag == "gt":
|
||||
while row_e > end_point:
|
||||
speed_avg = df.iloc[row_s:row_e].abs().mean()
|
||||
if speed_avg > threshold:
|
||||
row_e -= step
|
||||
row_s -= step
|
||||
continue
|
||||
else:
|
||||
# one more time,如果连续两次 200 个点的平均值都小于 threshold,说明已经到了临界点了(其实也不一定,只不过相对遇到一次就判定临界点更安全一点点)
|
||||
# 从实际数据看,这开逻辑很小概率能触发到
|
||||
speed_avg = df.iloc[row_s-end_point*skip_scale:row_e-end_point*skip_scale].abs().mean()
|
||||
if speed_avg > threshold:
|
||||
self.logger("WARNING", "current", f"【gt】{axis} 轴第 {seq} 次查找数据可能有异常,row_s = {row_s}, row_e = {row_e}!", "purple")
|
||||
return row_s, row_e
|
||||
else:
|
||||
self.logger("ERROR", "current", f"{data_file} 数据有误,需要检查,无法找到第 {seq} 个有效点......", "red", "AnchorNotFound")
|
||||
|
||||
@clibs.handle_exception
|
||||
def get_row_number(self, threshold, flag, df, row_s, row_e, axis):
|
||||
count_1, count_2 = 0, 0
|
||||
if flag == "start" or flag == "end":
|
||||
for number in df.iloc[row_s:row_e].abs():
|
||||
count_2 += 1
|
||||
if number > threshold:
|
||||
count_1 += 1
|
||||
if count_1 == 10:
|
||||
return row_s + count_2 - 10
|
||||
else:
|
||||
count_1 = 0
|
||||
elif flag == "middle":
|
||||
for number in df.iloc[row_s:row_e].abs():
|
||||
count_2 += 1
|
||||
if number < threshold: # 唯一的区别
|
||||
count_1 += 1
|
||||
if count_1 == 10:
|
||||
return row_s + count_2 - 10
|
||||
else:
|
||||
count_1 = 0
|
||||
|
||||
places = {"start": "起点", "middle": "中间点", "end": "终点"} # 因为是终点数据,所以可能有异常
|
||||
self.logger("DEBUG", "current", f"{axis} 轴获取{places[flag]}数据 {row_e} 可能有异常,需关注!", "purple")
|
||||
return row_e
|
||||
|
||||
@clibs.handle_exception
|
||||
def p_single(self, wb, single, rrs):
|
||||
# 1. 先找到第一个速度为零的点,数据从后往前找,一开始就是零的情况不予考虑
|
||||
# 2. 记录第一个点的位置,继续向前查找第二个速度为零的点,同理,一开始为零的点不予考虑
|
||||
# 3. 记录第二个点的位置,并将其中的数据拷贝至对应位置
|
||||
for data_file in single:
|
||||
axis = int(data_file.split("/")[-1].split("_")[0].removeprefix("j"))
|
||||
sht_name = f"J{axis}"
|
||||
ws = wb[sht_name]
|
||||
pandas.set_option("display.precision", 2)
|
||||
df_origin = pandas.read_csv(data_file, sep="\t")
|
||||
rr = rrs[axis-1]
|
||||
addition = 180 / 3.1415926 * 60 / 360 * rr
|
||||
|
||||
col_names = list(df_origin.columns)
|
||||
df = df_origin[col_names[clibs.c_joint_vel-1]].multiply(addition)
|
||||
|
||||
step = 50 # 步进值
|
||||
end_point = 200 # 有效数值的数目
|
||||
threshold = 5 # 200个点的平均阈值线
|
||||
skip_scale = 2
|
||||
row_start, row_middle, row_end = 0, 0, 0
|
||||
row_e = df.index[-1]
|
||||
row_s = row_e - end_point
|
||||
speed_avg = df.iloc[row_s:row_e].abs().mean()
|
||||
if speed_avg < threshold:
|
||||
# 第一次过滤:消除速度为零的数据,找到速度即将大于零的上升临界点
|
||||
row_s, row_e = self.find_point(data_file, df, "lt", row_s, row_e, threshold, step, end_point, skip_scale, axis, "pre-1")
|
||||
row_e -= end_point*skip_scale
|
||||
row_s -= end_point*skip_scale
|
||||
# 第二次过滤:消除速度大于零的数据,找到速度即将趋近于零的下降临界点
|
||||
row_s, row_e = self.find_point(data_file, df, "gt", row_s, row_e, threshold, step, end_point, skip_scale, axis, "pre-2")
|
||||
row_e -= end_point*skip_scale
|
||||
row_s -= end_point*skip_scale
|
||||
# 第三次过滤:消除速度为零的数据,找到速度即将大于零的上升临界点
|
||||
row_s, row_e = self.find_point(data_file, df, "lt", row_s, row_e, threshold, step, end_point, skip_scale, axis, "pre-3")
|
||||
row_e -= end_point*skip_scale
|
||||
row_s -= end_point*skip_scale
|
||||
# 正式第一次采集:消除速度大于零的数据,找到速度即将趋近于零的下降临界点
|
||||
row_s, row_e = self.find_point(data_file, df, "gt", row_s, row_e, threshold, step, end_point, skip_scale, axis, 1)
|
||||
row_end = self.get_row_number(threshold, "end", df, row_s, row_e, axis)
|
||||
row_e -= end_point*skip_scale
|
||||
row_s -= end_point*skip_scale
|
||||
# 正式第二次采集:消除速度为零的数据,找到速度即将大于零的上升临界点
|
||||
row_s, row_e = self.find_point(data_file, df, "lt", row_s, row_e, threshold, step, end_point, skip_scale, axis, 2)
|
||||
row_middle = self.get_row_number(threshold, "middle", df, row_s, row_e, axis)
|
||||
row_e -= end_point*skip_scale
|
||||
row_s -= end_point*skip_scale
|
||||
# 正式第三次采集:消除速度大于零的数据,找到速度即将趋近于零的下降临界点
|
||||
row_s, row_e = self.find_point(data_file, df, "gt", row_s, row_e, threshold, step, end_point, skip_scale, axis, 3)
|
||||
row_start = self.get_row_number(threshold, "start", df, row_s, row_e, axis)
|
||||
elif speed_avg > threshold:
|
||||
# 第一次过滤:消除速度大于零的数据,找到速度即将趋近于零的下降临界点
|
||||
row_s, row_e = self.find_point(data_file, df, "gt", row_s, row_e, threshold, step, end_point, skip_scale, axis, "pre-1")
|
||||
row_e -= end_point*skip_scale
|
||||
row_s -= end_point*skip_scale
|
||||
# 第二次过滤:消除速度为零的数据,找到速度即将大于零的上升临界点
|
||||
row_s, row_e = self.find_point(data_file, df, "lt", row_s, row_e, threshold, step, end_point, skip_scale, axis, "pre-2")
|
||||
row_e -= end_point*skip_scale
|
||||
row_s -= end_point*skip_scale
|
||||
# 第一次正式采集:消除速度大于零的数据,找到速度即将趋近于零的下降临界点
|
||||
row_s, row_e = self.find_point(data_file, df, "gt", row_s, row_e, threshold, step, end_point, skip_scale, axis, 1)
|
||||
row_end = self.get_row_number(threshold, "end", df, row_s, row_e, axis)
|
||||
row_e -= end_point*skip_scale
|
||||
row_s -= end_point*skip_scale
|
||||
# 第二次正式采集:消除速度为零的数据,找到速度即将大于零的上升临界点
|
||||
row_s, row_e = self.find_point(data_file, df, "lt", row_s, row_e, threshold, step, end_point, skip_scale, axis, 2)
|
||||
row_middle = self.get_row_number(threshold, "middle", df, row_s, row_e, axis)
|
||||
row_e -= end_point*skip_scale
|
||||
row_s -= end_point*skip_scale
|
||||
# 第三次正式采集:消除速度大于零的数据,找到速度即将趋近于零的下降临界点
|
||||
row_s, row_e = self.find_point(data_file, df, "gt", row_s, row_e, threshold, step, end_point, skip_scale, axis, 3)
|
||||
row_start = self.get_row_number(threshold, "start", df, row_s, row_e, axis)
|
||||
|
||||
self.logger("DEBUG", "current", f"{axis} 轴起点:{row_start}")
|
||||
self.logger("DEBUG", "current", f"{axis} 轴中间点:{row_middle}")
|
||||
self.logger("DEBUG", "current", f"{axis} 轴终点:{row_end}")
|
||||
self.logger("DEBUG", "current", f"{axis} 轴数据非零段点数:{row_middle-row_start+1}")
|
||||
self.logger("DEBUG", "current", f"{axis} 轴数据为零段点数:{row_end-row_middle+1}")
|
||||
if abs(row_end+row_start-2*row_middle) > 1000:
|
||||
self.logger("DEBUG", "current", f"{axis} 轴数据占空比异常!", "purple")
|
||||
|
||||
data, first_c, second_c, third_c, fourth_c = [], clibs.c_joint_vel-1, clibs.c_servo_trq-1, clibs.c_sensor_trq-1, clibs.c_estimate_trans_trq-1
|
||||
for row in range(row_start, row_end+1):
|
||||
data.append(df_origin.iloc[row, first_c])
|
||||
data.append(df_origin.iloc[row, second_c])
|
||||
data.append(df_origin.iloc[row, third_c])
|
||||
data.append(df_origin.iloc[row, fourth_c])
|
||||
|
||||
i = 0
|
||||
for row in ws.iter_rows(min_row=2, min_col=2, max_row=150000, max_col=5):
|
||||
for cell in row:
|
||||
try:
|
||||
if i % 4 == 0:
|
||||
ws.cell((i//4)+2, 1).value = float(((i//4)+1)/1000)
|
||||
_ = f"{data[i]:.2f}"
|
||||
cell.value = float(_)
|
||||
i += 1
|
||||
except Exception:
|
||||
if i % 4 == 0:
|
||||
ws.cell((i//4)+2, 1).value = None
|
||||
cell.value = None
|
||||
i += 1
|
||||
|
||||
@clibs.handle_exception
|
||||
def p_scenario(self, wb, scenario, rrs, dur_time):
|
||||
self.logger("INFO", "current", f"本次处理的是电机电流场景数据,场景运动周期为 {dur_time}s", "blue")
|
||||
for data_file in scenario:
|
||||
cycle = 0.001
|
||||
axis = int(data_file.split("/")[-1].split("_")[0].removeprefix("j"))
|
||||
sht_name = f"J{axis}"
|
||||
ws = wb[sht_name]
|
||||
pandas.set_option("display.precision", 2)
|
||||
df_origin = pandas.read_csv(data_file, sep="\t")
|
||||
rr = rrs[axis-1]
|
||||
addition = 180 / 3.1415926 * 60 / 360 * rr
|
||||
|
||||
col_names = list(df_origin.columns)
|
||||
df = df_origin[col_names[clibs.c_joint_vel-1]].multiply(addition)
|
||||
|
||||
row_start = 3000
|
||||
row_end = row_start + int(dur_time/cycle)
|
||||
if row_end > df.index[-1]:
|
||||
self.logger("ERROR", "current", f"位置超限:{data_file} 共有 {df.index[-1]} 条数据,无法取到第 {row_end} 条数据,需要确认场景周期时间...", "blue", "DataOverLimit")
|
||||
|
||||
data, first_c, second_c, third_c, fourth_c = [], clibs.c_joint_vel-1, clibs.c_servo_trq-1, clibs.c_sensor_trq-1, clibs.c_estimate_trans_trq-1
|
||||
for row in range(row_start, row_end+1):
|
||||
data.append(df_origin.iloc[row, first_c])
|
||||
data.append(df_origin.iloc[row, second_c])
|
||||
data.append(df_origin.iloc[row, third_c])
|
||||
data.append(df_origin.iloc[row, fourth_c])
|
||||
|
||||
i = 0
|
||||
for row in ws.iter_rows(min_row=2, min_col=2, max_row=250000, max_col=5):
|
||||
for cell in row:
|
||||
try:
|
||||
if i % 4 == 0:
|
||||
ws.cell((i//4)+2, 1).value = float(((i//4)+1)/1000)
|
||||
_ = f"{data[i]:.2f}"
|
||||
cell.value = float(_)
|
||||
i += 1
|
||||
except Exception:
|
||||
cell.value = None
|
||||
if i % 4 == 0:
|
||||
ws.cell((i//4)+2, 1).value = None
|
||||
i += 1
|
||||
|
||||
@clibs.handle_exception
|
||||
def get_configs(self, config_file):
|
||||
try:
|
||||
if re.match("^[NXEC]B.*", config_file.split("/")[-1]):
|
||||
robot_type = "工业"
|
||||
else:
|
||||
robot_type = "协作"
|
||||
|
||||
with open(config_file, mode="r", encoding="utf-8") as f_config:
|
||||
configs = json.load(f_config)
|
||||
|
||||
version = configs["VERSION"]
|
||||
sc = [0.001, 0.001, 0.001, 0.001, 0.001, 0.001] # 采样周期,sc for sample cycle
|
||||
r_rrs = configs["TRANSMISSION"]["REDUCTION_RATIO_NUMERATOR"] # 减速比,rr for reduction ratio
|
||||
m_avs = configs["MOTION"]["JOINT_MAX_SPEED"]
|
||||
m_stall_ts = configs["MOTOR"]["STALL_TORQUE"] # 电机堵转转矩
|
||||
m_rts = configs["MOTOR"]["RATED_TORQUE"] # 电机额定转矩rt for rated torque
|
||||
m_max_ts = configs["MOTOR"]["PEAK_TORQUE"] # 电机峰值转矩
|
||||
m_r_rpms = configs["MOTOR"]["RATED_SPEED"] # 电机额定转速
|
||||
m_max_rpms = configs["MOTOR"]["MAX_SPEED"] # 电机最大转速
|
||||
r_max_sst = configs["TRANSMISSION"]["MAX_TORQUE_FOR_START_AND_STOP"] # 减速器最大启停转矩,sst for start and stop torque
|
||||
r_max_t = configs["TRANSMISSION"]["MAX_PEAK_TORQUE"] # 减速器瞬时最大转矩
|
||||
r_avg_t = configs["TRANSMISSION"]["MAX_AVERAGE_TORQUE"] # 减速器平均负载转矩允许最大值
|
||||
|
||||
self.logger("INFO", "current", f"get_configs: 机型文件版本 {config_file}_{version}")
|
||||
self.logger("INFO", "current", f"get_configs: 减速比 {r_rrs}")
|
||||
self.logger("INFO", "current", f"get_configs: 额定转矩 {m_rts}")
|
||||
self.logger("INFO", "current", f"get_configs: 最大角速度 {m_avs}")
|
||||
return sc, r_rrs, m_avs, m_stall_ts, m_rts, m_max_ts, m_r_rpms, m_max_rpms, r_max_sst, r_max_t, r_avg_t, robot_type
|
||||
except Exception as Err:
|
||||
self.logger("ERROR", "current", f"get_config: 无法打开 {config_file},或获取配置文件参数错误 {Err}", "red", "OpenFileError")
|
||||
|
||||
@clibs.handle_exception
|
||||
def processing(self):
|
||||
time_start = time.time()
|
||||
clibs.running[self.idx] = 1
|
||||
|
||||
data_files, config_file = self.initialization()
|
||||
params = self.get_configs(config_file)
|
||||
rts, rrs = params[4], params[1]
|
||||
if self.proc == "最大值":
|
||||
self.current_max(data_files, rts)
|
||||
elif self.proc == "平均值":
|
||||
self.current_avg(data_files, rts)
|
||||
elif self.proc == "周期":
|
||||
self.current_cycle(data_files, rrs, rts, params)
|
||||
|
||||
self.logger("INFO", "current", "-"*60 + "<br>全部处理完毕<br>", "purple")
|
||||
time_total = time.time() - time_start
|
||||
msg = f"数据处理时间:{time_total // 3600:02.0f} h {time_total % 3600 // 60:02.0f} m {time_total % 60:02.0f} s\n"
|
||||
self.logger("INFO", "current", msg)
|
214
codes/analysis/iso.py
Normal file
214
codes/analysis/iso.py
Normal file
@ -0,0 +1,214 @@
|
||||
import pdfplumber
|
||||
import openpyxl
|
||||
import os
|
||||
import time
|
||||
from PySide6.QtCore import Signal, QThread
|
||||
from codes.common import clibs
|
||||
|
||||
|
||||
class IsoDataProcess(QThread):
|
||||
output = Signal(str, str)
|
||||
|
||||
def __init__(self, dir_path, /):
|
||||
super().__init__()
|
||||
self.dir_path = dir_path
|
||||
self.idx = 2
|
||||
|
||||
def logger(self, level, module, content, color="black", error="", flag="both"):
|
||||
flag = "cursor" if level.upper() == "DEBUG" else "both"
|
||||
clibs.logger(level, module, content, color, flag, signal=self.output)
|
||||
if level.upper() == "ERROR":
|
||||
raise Exception(f"{error} | {content}")
|
||||
|
||||
def p_iso(self, file, p_files, ws, tmpfile):
|
||||
p_files.append(file)
|
||||
|
||||
pdf = pdfplumber.open(file)
|
||||
with open(tmpfile, mode="w", encoding="utf-8") as fb:
|
||||
for page in pdf.pages:
|
||||
fb.write(page.extract_text())
|
||||
with open(tmpfile, mode="r", encoding="utf-8") as fb:
|
||||
lines = fb.readlines()
|
||||
lines = [line for line in lines if not line.startswith("Page ")]
|
||||
for line in lines:
|
||||
if line.strip() == "Pose Accuracy and Repeatability":
|
||||
index = lines.index(line)
|
||||
ws.cell(row=3, column=7).value = float(lines[index+4].split()[1])
|
||||
ws.cell(row=4, column=7).value = float(lines[index+5].split()[1])
|
||||
ws.cell(row=5, column=7).value = float(lines[index+6].split()[1])
|
||||
ws.cell(row=6, column=7).value = float(lines[index+7].split()[1])
|
||||
ws.cell(row=7, column=7).value = float(lines[index+8].split()[1])
|
||||
|
||||
ws.cell(row=8, column=7).value = float(lines[index+4].split()[2])
|
||||
ws.cell(row=9, column=7).value = float(lines[index+5].split()[2])
|
||||
ws.cell(row=10, column=7).value = float(lines[index+6].split()[2])
|
||||
ws.cell(row=11, column=7).value = float(lines[index+7].split()[2])
|
||||
ws.cell(row=12, column=7).value = float(lines[index+8].split()[2])
|
||||
elif line.strip() == "Pose Accuracy Variation":
|
||||
index = lines.index(line)
|
||||
ws.cell(row=13, column=7).value = float(lines[index+4].split()[1])
|
||||
ws.cell(row=14, column=7).value = float(lines[index+5].split()[1])
|
||||
ws.cell(row=15, column=7).value = float(lines[index+6].split()[1])
|
||||
elif line.strip() == "Distance Accuracy":
|
||||
index = lines.index(line)
|
||||
ws.cell(row=16, column=7).value = float(lines[index + 4].split()[1])
|
||||
ws.cell(row=17, column=7).value = float(lines[index + 4].split()[2])
|
||||
elif line.strip() == "Stabilisation Time and Overshoot":
|
||||
index = lines.index(line)
|
||||
ws.cell(row=18, column=7).value = float(lines[index + 7].split()[3])
|
||||
ws.cell(row=19, column=7).value = float(lines[index + 7].split()[2])
|
||||
elif line.strip() == "Velocity Accuracy and Repeatability":
|
||||
index = lines.index(line)
|
||||
ws.cell(row=20, column=7).value = float(lines[index + 4].split()[1])
|
||||
ws.cell(row=21, column=7).value = float(lines[index + 4].split()[2])
|
||||
ws.cell(row=22, column=7).value = float(lines[index + 4].split()[3])
|
||||
elif line.strip()[:31] == "Path Accuracy and Repeatability":
|
||||
index = lines.index(line)
|
||||
ws.cell(row=29, column=7).value = float(lines[index + 4].split()[1])
|
||||
ws.cell(row=30, column=7).value = float(lines[index + 4].split()[2])
|
||||
elif line.strip() == "Corner Overshoot and Roundoff":
|
||||
index = lines.index(line)
|
||||
ws.cell(row=35, column=7).value = float(lines[index + 4].split()[1])
|
||||
ws.cell(row=36, column=7).value = float(lines[index + 4].split()[2])
|
||||
elif line.strip() == "Robot Weaving":
|
||||
index = lines.index(line)
|
||||
ws.cell(row=41, column=7).value = float(lines[index + 4].split()[2])
|
||||
ws.cell(row=42, column=7).value = float(lines[index + 4].split()[3])
|
||||
ws.cell(row=43, column=7).value = float(lines[index + 4].split()[4])
|
||||
else:
|
||||
pass
|
||||
pdf.close()
|
||||
|
||||
def p_iso_100(self, file, p_files, ws, tmpfile):
|
||||
p_files.append(file)
|
||||
|
||||
pdf = pdfplumber.open(file)
|
||||
with open(tmpfile, mode="w", encoding="utf-8") as fb:
|
||||
for page in pdf.pages:
|
||||
fb.write(page.extract_text())
|
||||
with open(tmpfile, mode="r", encoding="utf-8") as fb:
|
||||
lines = fb.readlines()
|
||||
lines = [line for line in lines if not line.startswith("Page ")]
|
||||
for line in lines:
|
||||
if line.strip() == "Velocity Accuracy and Repeatability":
|
||||
index = lines.index(line)
|
||||
ws.cell(row=26, column=7).value = float(lines[index + 4].split()[1])
|
||||
ws.cell(row=27, column=7).value = float(lines[index + 4].split()[2])
|
||||
ws.cell(row=28, column=7).value = float(lines[index + 4].split()[3])
|
||||
elif line.strip()[:31] == "Path Accuracy and Repeatability":
|
||||
index = lines.index(line)
|
||||
ws.cell(row=33, column=7).value = float(lines[index + 4].split()[1])
|
||||
ws.cell(row=34, column=7).value = float(lines[index + 4].split()[2])
|
||||
elif line.strip() == "Corner Overshoot and Roundoff":
|
||||
index = lines.index(line)
|
||||
ws.cell(row=39, column=7).value = float(lines[index + 4].split()[1])
|
||||
ws.cell(row=40, column=7).value = float(lines[index + 4].split()[2])
|
||||
elif line.strip() == "Robot Weaving":
|
||||
index = lines.index(line)
|
||||
ws.cell(row=47, column=7).value = float(lines[index + 4].split()[2])
|
||||
ws.cell(row=48, column=7).value = float(lines[index + 4].split()[3])
|
||||
ws.cell(row=49, column=7).value = float(lines[index + 4].split()[4])
|
||||
else:
|
||||
pass
|
||||
pdf.close()
|
||||
|
||||
def p_iso_1000(self, file, p_files, ws, tmpfile):
|
||||
p_files.append(file)
|
||||
|
||||
pdf = pdfplumber.open(file)
|
||||
with open(tmpfile, mode="w", encoding="utf-8") as fb:
|
||||
for page in pdf.pages:
|
||||
fb.write(page.extract_text())
|
||||
with open(tmpfile, mode="r", encoding="utf-8") as fb:
|
||||
lines = fb.readlines()
|
||||
lines = [line for line in lines if not line.startswith("Page ")]
|
||||
for line in lines:
|
||||
if line.strip() == "Velocity Accuracy and Repeatability":
|
||||
index = lines.index(line)
|
||||
ws.cell(row=23, column=7).value = float(lines[index + 4].split()[1])
|
||||
ws.cell(row=24, column=7).value = float(lines[index + 4].split()[2])
|
||||
ws.cell(row=25, column=7).value = float(lines[index + 4].split()[3])
|
||||
elif line.strip()[:31] == "Path Accuracy and Repeatability":
|
||||
index = lines.index(line)
|
||||
ws.cell(row=31, column=7).value = float(lines[index + 4].split()[1])
|
||||
ws.cell(row=32, column=7).value = float(lines[index + 4].split()[2])
|
||||
elif line.strip() == "Corner Overshoot and Roundoff":
|
||||
index = lines.index(line)
|
||||
ws.cell(row=37, column=7).value = float(lines[index + 4].split()[1])
|
||||
ws.cell(row=38, column=7).value = float(lines[index + 4].split()[2])
|
||||
elif line.strip() == "Robot Weaving":
|
||||
index = lines.index(line)
|
||||
ws.cell(row=44, column=7).value = float(lines[index + 4].split()[2])
|
||||
ws.cell(row=45, column=7).value = float(lines[index + 4].split()[3])
|
||||
ws.cell(row=46, column=7).value = float(lines[index + 4].split()[4])
|
||||
else:
|
||||
pass
|
||||
pdf.close()
|
||||
|
||||
def initialization(self):
|
||||
dirs, files = clibs.traversal_files(self.dir_path, self.output)
|
||||
if len(dirs) != 0:
|
||||
self.logger("ERROR", "iso", f"init: 工作目录下不可以有文件夹!", "red", "InitFileError")
|
||||
|
||||
for file in files:
|
||||
file = file.lower()
|
||||
if file.endswith("iso-results.xlsx"):
|
||||
pass
|
||||
elif file.endswith("iso-v1000.pdf"):
|
||||
pass
|
||||
elif file.endswith("iso-v100.pdf"):
|
||||
pass
|
||||
elif file.endswith("iso.pdf"):
|
||||
pass
|
||||
else:
|
||||
self.logger("ERROR", "iso", f"init: 工作目录下只允许有如下四个文件,不区分大小写,pdf文件最少有一个!<br>1. iso-results.xlsx<br>2. ISO.pdf<br>3. ISO-V100.pdf<br>4. ISO-V1000.pdf", "red", "InitFileError")
|
||||
|
||||
return files
|
||||
|
||||
def processing(self):
|
||||
time_start = time.time()
|
||||
clibs.running[self.idx] = 1
|
||||
|
||||
files = self.initialization()
|
||||
filename = f"{self.dir_path}/iso-results.xlsx"
|
||||
tmpfile = f"{self.dir_path}/data.txt"
|
||||
wb, ws = None, None
|
||||
try:
|
||||
wb = openpyxl.load_workbook(filename)
|
||||
ws = wb.active
|
||||
for i in range(3, 50):
|
||||
ws.cell(row=i, column=7).value = None
|
||||
except Exception as Err:
|
||||
self.logger("ERROR", "iso", f"main: 无法打开文件 {filename}<br>{Err}", "red", "FileOpenError")
|
||||
|
||||
p_files = []
|
||||
for file in files:
|
||||
if file.split("/")[-1].lower() == "iso.pdf":
|
||||
self.logger("INFO", "iso", f"正在处理{file}......")
|
||||
self.p_iso(file, p_files, ws, tmpfile)
|
||||
self.logger("INFO", "iso", f"文件{file}已处理完毕。")
|
||||
|
||||
elif file.split("/")[-1].lower() == "iso-v100.pdf":
|
||||
self.logger("INFO", "iso", f"正在处理{file}......")
|
||||
self.p_iso_100(file, p_files, ws, tmpfile)
|
||||
self.logger("INFO", "iso", f"文件{file}已处理完毕。")
|
||||
|
||||
elif file.split("/")[-1].lower() == "iso-v1000.pdf":
|
||||
self.logger("INFO", "iso", f"正在处理{file}......")
|
||||
self.p_iso_1000(file, p_files, ws, tmpfile)
|
||||
self.logger("INFO", "iso", f"文件{file}已处理完毕。")
|
||||
|
||||
else:
|
||||
pass
|
||||
wb.save(filename)
|
||||
wb.close()
|
||||
|
||||
if len(p_files) == 0:
|
||||
self.logger("ERROR", "iso", f"目录 {self.dir_path} 下没有需要处理的文件,需要确认......", "red", "FileNotFound")
|
||||
else:
|
||||
os.remove(tmpfile)
|
||||
|
||||
self.logger("INFO", "current-processing", "-" * 60 + "<br>全部处理完毕<br>", "purple")
|
||||
time_total = time.time() - time_start
|
||||
msg = f"数据处理时间:{time_total // 3600:02.0f} h {time_total % 3600 // 60:02.0f} m {time_total % 60:02.0f} s\n"
|
||||
self.logger("INFO", "current-processing", msg)
|
161
codes/analysis/wavelogger.py
Normal file
161
codes/analysis/wavelogger.py
Normal file
@ -0,0 +1,161 @@
|
||||
import pandas
|
||||
import csv
|
||||
import openpyxl
|
||||
import chardet
|
||||
import time
|
||||
from PySide6.QtCore import Signal, QThread
|
||||
from codes.common import clibs
|
||||
|
||||
|
||||
class WaveloggerDataProcess(QThread):
|
||||
output = Signal(str, str)
|
||||
|
||||
def __init__(self, dir_path, /):
|
||||
super().__init__()
|
||||
self.dir_path = dir_path
|
||||
self.idx = 3
|
||||
|
||||
def logger(self, level, module, content, color="black", error="", flag="both"):
|
||||
flag = "cursor" if level.upper() == "DEBUG" else "both"
|
||||
clibs.logger(level, module, content, color, flag, signal=self.output)
|
||||
if level.upper() == "ERROR":
|
||||
raise Exception(f"{error} | {content}")
|
||||
|
||||
def find_point(self, bof, step, margin, threshold, pos, data_file, flag, df, row):
|
||||
# bof: backward or forward
|
||||
# pos: used for debug
|
||||
# flag: greater than or lower than
|
||||
row_target = None
|
||||
row_origin = len(df) - margin + 1
|
||||
if flag == "gt":
|
||||
while 0 < row < row_origin:
|
||||
value = float(df.iloc[row, 2])
|
||||
if value > threshold:
|
||||
row = row - step if bof == "backward" else row + step
|
||||
continue
|
||||
else:
|
||||
row_target = row - step if bof == "backward" else row + step
|
||||
break
|
||||
else:
|
||||
if bof == "backward":
|
||||
self.logger("ERROR", "wavelogger", f"find_point-gt: [{pos}] 在 {data_file} 中,无法正确识别数据,需要确认...", "red", "DataError")
|
||||
elif bof == "forward":
|
||||
row_target = row + margin # to end while loop in function `single_file_proc`
|
||||
elif flag == "lt":
|
||||
while 0 < row < row_origin:
|
||||
value = float(df.iloc[row, 2])
|
||||
if value < threshold:
|
||||
row = row - step if bof == "backward" else row + step
|
||||
continue
|
||||
else:
|
||||
row_target = row - step if bof == "backward" else row + step
|
||||
break
|
||||
else:
|
||||
if bof == "backward":
|
||||
self.logger("ERROR", "wavelogger", f"find_point-lt: [{pos}] 在 {data_file} 中,无法正确识别数据,需要确认...", "red", "DataError")
|
||||
elif bof == "forward":
|
||||
row_target = row + margin # to end while loop in function `single_file_proc`
|
||||
return row_target
|
||||
|
||||
def get_cycle_info(self, data_file, step, margin, threshold):
|
||||
# end -> middle: low
|
||||
# middle -> start: high
|
||||
# 1. 从最后读取数据,无论是大于1还是小于1,都舍弃,找到相反的值的起始点
|
||||
# 2. 从起始点,继续往前寻找,找到与之数值相反的中间点
|
||||
# 3. 从中间点,继续往前寻找,找到与之数值相反的结束点,至此,得到了高低数值的时间区间以及一轮的周期时间
|
||||
with open(data_file, "rb") as f:
|
||||
raw_data = f.read(1000)
|
||||
result = chardet.detect(raw_data)
|
||||
encoding = result['encoding']
|
||||
csv_reader = csv.reader(open(data_file, encoding=encoding))
|
||||
begin = int(next(csv_reader)[1])
|
||||
df = pandas.read_csv(data_file, sep=",", encoding=encoding, skip_blank_lines=False, header=begin - 1, on_bad_lines="skip")
|
||||
row = len(df) - margin
|
||||
if float(df.iloc[row, 2]) < threshold:
|
||||
row = self.find_point("backward", step, margin, threshold, "a1", data_file, "lt", df, row)
|
||||
|
||||
_row = self.find_point("backward", step, margin, threshold, "a2", data_file, "gt", df, row)
|
||||
_row = self.find_point("backward", step, margin, threshold, "a3", data_file, "lt", df, _row)
|
||||
row_end = self.find_point("backward", step, margin, threshold, "a4", data_file, "gt", df, _row)
|
||||
row_middle = self.find_point("backward", step, margin, threshold, "a5", data_file, "lt", df, row_end)
|
||||
row_start = self.find_point("backward", step, margin, threshold, "a6", data_file, "gt", df, row_middle)
|
||||
# print(f"row_end = {row_end}")
|
||||
# print(f"row_middle = {row_middle}")
|
||||
# print(f"row_start = {row_start}")
|
||||
return row_end-row_middle, row_middle-row_start, row_end-row_start, df
|
||||
|
||||
def initialization(self):
|
||||
_, data_files = clibs.traversal_files(self.dir_path, self.output)
|
||||
|
||||
for data_file in data_files:
|
||||
if not data_file.lower().endswith(".csv"):
|
||||
self.logger("ERROR", "wavelogger", f"init: {data_file} 文件后缀错误,只允许 .csv 文件,需要确认!", "red", "FileTypeError")
|
||||
|
||||
return data_files
|
||||
|
||||
def preparation(self, data_file, step, margin, threshold, wb):
|
||||
shtname = data_file.split("/")[-1].split(".")[0]
|
||||
ws = wb.create_sheet(shtname)
|
||||
low, high, cycle, df = self.get_cycle_info(data_file, step, margin, threshold)
|
||||
|
||||
return ws, df, low, high, cycle
|
||||
|
||||
def single_file_proc(self, ws, data_file, step, threshold, margin, data_length, df, cycle):
|
||||
row, row_lt, row_gt, count, count_i, data = 1, 1, 1, 1, 1, {}
|
||||
row_max = len(df) - margin
|
||||
while row < row_max:
|
||||
if count not in data.keys():
|
||||
data[count] = []
|
||||
|
||||
value = float(df.iloc[row, 2])
|
||||
if value < threshold:
|
||||
row_lt = self.find_point("forward", step, margin, threshold, "c"+str(row), data_file, "lt", df, row)
|
||||
start = int(row_gt + (row_lt - row_gt - data_length) / 2)
|
||||
end = start + data_length
|
||||
value = df.iloc[start:end, 2].astype(float).mean() + 3 * df.iloc[start:end, 2].astype(float).std()
|
||||
if value > 1:
|
||||
msg = f"\n"
|
||||
self.logger("WARNING", "wavelogger", f"{data_file} 文件第 {count} 轮 第 {count_i} 个数据可能有问题,需人工手动确认,确认有问题可删除,无问题则保留", "purple")
|
||||
|
||||
data[count].append(value)
|
||||
count_i += 1
|
||||
else:
|
||||
row_gt = self.find_point("forward", step, margin, threshold, "c"+str(row), data_file, "gt", df, row)
|
||||
if row_gt - row_lt > cycle * 2:
|
||||
count += 1
|
||||
count_i = 1
|
||||
row = max(row_gt, row_lt)
|
||||
for i in range(2, 10):
|
||||
ws.cell(row=1, column=i).value = f"第{i-1}次测试"
|
||||
ws.cell(row=i, column=1).value = f"第{i-1}次精度变化"
|
||||
|
||||
for i in sorted(data.keys()):
|
||||
row, column = 2, i + 1
|
||||
for value in data[i]:
|
||||
ws.cell(row=row, column=column).value = float(value)
|
||||
row += 1
|
||||
|
||||
def execution(self, data_files):
|
||||
self.logger("INFO", "wavelogger", "正在处理中......", "blue")
|
||||
wb = openpyxl.Workbook()
|
||||
step, margin, data_length, threshold = 5, 50, 50, 5
|
||||
for data_file in data_files:
|
||||
ws, df, low, high, cycle = self.preparation(data_file, step, margin, threshold, wb)
|
||||
self.single_file_proc(ws, data_file, step, threshold, margin, data_length, df, cycle)
|
||||
|
||||
wd = "/".join(data_files[0].split("/")[:-1])
|
||||
filename = wd + "/result.xlsx"
|
||||
wb.save(filename)
|
||||
wb.close()
|
||||
|
||||
def processing(self):
|
||||
time_start = time.time()
|
||||
clibs.running[self.idx] = 1
|
||||
|
||||
data_files = self.initialization()
|
||||
self.execution(data_files)
|
||||
|
||||
self.logger("INFO", "wavelogger", "-" * 60 + "<br>全部处理完毕<br>", "purple")
|
||||
time_total = time.time() - time_start
|
||||
msg = f"数据处理时间:{time_total // 3600:02.0f} h {time_total % 3600 // 60:02.0f} m {time_total % 60:02.0f} s\n"
|
||||
self.logger("INFO", "wavelogger", msg)
|
Reference in New Issue
Block a user