转向从机型配置文件获取参数
This commit is contained in:
@ -1,3 +1,4 @@
|
||||
import json
|
||||
import threading
|
||||
import openpyxl
|
||||
import pandas
|
||||
@ -8,26 +9,26 @@ from common import clibs
|
||||
|
||||
def initialization(path, w2t, insert_logdb):
|
||||
_, data_files = clibs.traversal_files(path, w2t)
|
||||
count = 0
|
||||
count, config_file = 0, None
|
||||
for data_file in data_files:
|
||||
filename = data_file.split("/")[-1]
|
||||
if filename == "configs.xlsx":
|
||||
if re.match(".*\\.cfg", filename):
|
||||
config_file = filename
|
||||
count += 1
|
||||
elif filename == "T_电机电流.xlsx":
|
||||
...
|
||||
count += 1
|
||||
else:
|
||||
if not re.match("j[1-7].*\\.data", filename):
|
||||
msg = f"不合规 {data_file}\n"
|
||||
msg += "所有数据文件必须以 j[1-7]_ 开头,以 .data 结尾,比如j1_abcdef.data\n配置文件需要命名为\"configs.xlsx\",结果文件需要命名为\"T_电机电流.xlsx\"\n"
|
||||
msg += "需要有配置文件\"configs.xlsx\"表格,以及数据处理文件\"T_电机电流.xlsx\"表格,请检查整改后重新运行\n"
|
||||
msg += "所有数据文件必须以 j[1-7]_ 开头,以 .data 结尾,比如j1_abcdef.data,请检查整改后重新运行\n"
|
||||
w2t(msg, "red", "FilenameIllegal")
|
||||
|
||||
if count != 1:
|
||||
msg = "需要有配置文件\"configs.xlsx\"表格,以及数据处理文件\"T_电机电流.xlsx\"表格,请检查整改后重新运行\n"
|
||||
if count != 2:
|
||||
msg = "需要有一个机型配置文件\"*.cfg\",以及一个数据处理文件\"T_电机电流.xlsx\"表格,请检查整改后重新运行\n"
|
||||
w2t(msg, "red", "FilenameIllegal")
|
||||
|
||||
insert_logdb("INFO", "current", f"current: 获取必要文件:{data_files}")
|
||||
return data_files
|
||||
return data_files, config_file
|
||||
|
||||
|
||||
def current_max(data_files, rcs, trq, w2t, insert_logdb):
|
||||
@ -71,7 +72,7 @@ def current_max(data_files, rcs, trq, w2t, insert_logdb):
|
||||
return current
|
||||
|
||||
|
||||
def current_avg(data_files, rcs, trq, w2t, insert_logdb):
|
||||
def current_avg(data_files, rcs, trqh, w2t, insert_logdb):
|
||||
insert_logdb("INFO", "current", "AVG: 正在处理平均电流值逻辑...")
|
||||
current = {1: [], 2: [], 3: [], 4: [], 5: [], 6: []}
|
||||
for data_file in data_files:
|
||||
@ -86,7 +87,7 @@ def current_avg(data_files, rcs, trq, w2t, insert_logdb):
|
||||
rca = rcs[axis-1]
|
||||
insert_logdb("INFO", "current", f"AVG: 最大列数为 {cols},{axis} 轴的额定电流为 {rca}")
|
||||
|
||||
col = df.columns.values[trq-1]
|
||||
col = df.columns.values[trqh-1]
|
||||
c_std = df[col].std()
|
||||
c_avg = df[col].mean()
|
||||
|
||||
@ -113,7 +114,7 @@ def current_avg(data_files, rcs, trq, w2t, insert_logdb):
|
||||
return current
|
||||
|
||||
|
||||
def current_cycle(data_files, vel, trq, trqh, rrs, rcs, rpms, w2t, insert_logdb):
|
||||
def current_cycle(data_files, vel, trq, trqh, sensor, rrs, rcs, params, w2t, insert_logdb):
|
||||
result, hold, single, scenario, dur_time = None, [], [], [], 0
|
||||
for data_file in data_files:
|
||||
filename = data_file.split("/")[-1]
|
||||
@ -137,6 +138,12 @@ def current_cycle(data_files, vel, trq, trqh, rrs, rcs, rpms, w2t, insert_logdb)
|
||||
t_progress.start()
|
||||
wb = t_excel.get_result()
|
||||
|
||||
ws = wb["统计"]
|
||||
for idx in range(len(params)):
|
||||
row = idx + 2
|
||||
for col in range(2, 8):
|
||||
ws.cell(row=row, column=col).value = params[idx][col-2]
|
||||
|
||||
if hold:
|
||||
avg = current_avg(hold, rcs, trqh, w2t, insert_logdb)
|
||||
for axis, cur_value in avg.items():
|
||||
@ -144,9 +151,9 @@ def current_cycle(data_files, vel, trq, trqh, rrs, rcs, rpms, w2t, insert_logdb)
|
||||
wb[sht_name]["O4"].value = float(cur_value[0])
|
||||
|
||||
if dur_time == 0:
|
||||
p_single(wb, single, vel, rrs, w2t, insert_logdb)
|
||||
p_single(wb, single, vel, trq, sensor, rrs, w2t, insert_logdb)
|
||||
else:
|
||||
p_scenario(wb, scenario, vel, rrs, dur_time, w2t, insert_logdb)
|
||||
p_scenario(wb, scenario, vel, trq, sensor, rrs, dur_time, w2t)
|
||||
|
||||
clibs.stop = True
|
||||
w2t(f"正在保存文件 {result},需要 10s 左右......\n")
|
||||
@ -227,12 +234,12 @@ def get_row_number(threshold, flag, df, row_s, row_e, axis, insert_logdb):
|
||||
else:
|
||||
count_1 = 0
|
||||
|
||||
places = {"start": "起点", "middle": "中间点", "end": "终点"}
|
||||
places = {"start": "起点", "middle": "中间点", "end": "终点"} # 因为是终点数据,所以可能有异常
|
||||
insert_logdb("WARNING", "current", f"{axis} 轴获取{places[flag]}数据 {row_e} 可能有异常,需关注!")
|
||||
return row_e
|
||||
|
||||
|
||||
def p_single(wb, single, vel, rrs, w2t, insert_logdb):
|
||||
def p_single(wb, single, vel, trq, sensor, rrs, w2t, insert_logdb):
|
||||
# 1. 先找到第一个速度为零的点,数据从后往前找,一开始就是零的情况不予考虑
|
||||
# 2. 记录第一个点的位置,继续向前查找第二个速度为零的点,同理,一开始为零的点不予考虑
|
||||
# 3. 记录第二个点的位置,并将其中的数据拷贝至对应位置
|
||||
@ -313,11 +320,11 @@ def p_single(wb, single, vel, rrs, w2t, insert_logdb):
|
||||
if abs(row_end+row_start-2*row_middle) > 1000:
|
||||
insert_logdb("WARNING", "current", f"{axis} 轴数据占空比异常!")
|
||||
|
||||
data = []
|
||||
data, first_c, second_c, third_c = [], vel-1, trq-1, sensor-1
|
||||
for row in range(row_start, row_end+1):
|
||||
data.append(df_origin.iloc[row, 0])
|
||||
data.append(df_origin.iloc[row, 1])
|
||||
data.append(df_origin.iloc[row, 2])
|
||||
data.append(df_origin.iloc[row, first_c])
|
||||
data.append(df_origin.iloc[row, second_c])
|
||||
data.append(df_origin.iloc[row, third_c])
|
||||
|
||||
i = 0
|
||||
for row in ws.iter_rows(min_row=2, min_col=2, max_row=150000, max_col=4):
|
||||
@ -328,14 +335,14 @@ def p_single(wb, single, vel, rrs, w2t, insert_logdb):
|
||||
_ = f"{data[i]:.2f}"
|
||||
cell.value = float(_)
|
||||
i += 1
|
||||
except Exception as Err:
|
||||
except Exception:
|
||||
if i % 3 == 0:
|
||||
ws.cell((i//3)+2, 1).value = None
|
||||
cell.value = None
|
||||
i += 1
|
||||
|
||||
|
||||
def p_scenario(wb, scenario, vel, rrs, dur_time, w2t, insert_logdb):
|
||||
def p_scenario(wb, scenario, vel, trq, sensor, rrs, dur_time, w2t):
|
||||
for data_file in scenario:
|
||||
cycle = 0.001
|
||||
axis = int(data_file.split("/")[-1].split("_")[0].removeprefix("j"))
|
||||
@ -354,11 +361,11 @@ def p_scenario(wb, scenario, vel, rrs, dur_time, w2t, insert_logdb):
|
||||
if row_end > df.index[-1]:
|
||||
w2t(f"位置超限:{data_file} 共有 {df.index[-1]} 条数据,无法取到第 {row_end} 条数据,需要确认场景周期时间...", "red", "DataOverLimit")
|
||||
|
||||
data = []
|
||||
data, first_c, second_c, third_c = [], vel-1, trq-1, sensor-1
|
||||
for row in range(row_start, row_end+1):
|
||||
data.append(df_origin.iloc[row, 0])
|
||||
data.append(df_origin.iloc[row, 1])
|
||||
data.append(df_origin.iloc[row, 2])
|
||||
data.append(df_origin.iloc[row, first_c])
|
||||
data.append(df_origin.iloc[row, second_c])
|
||||
data.append(df_origin.iloc[row, third_c])
|
||||
|
||||
i = 0
|
||||
for row in ws.iter_rows(min_row=2, min_col=2, max_row=250000, max_col=4):
|
||||
@ -369,34 +376,44 @@ def p_scenario(wb, scenario, vel, rrs, dur_time, w2t, insert_logdb):
|
||||
_ = f"{data[i]:.2f}"
|
||||
cell.value = float(_)
|
||||
i += 1
|
||||
except Exception as Err:
|
||||
except Exception:
|
||||
cell.value = None
|
||||
if i % 3 == 0:
|
||||
ws.cell((i//3)+2, 1).value = None
|
||||
i += 1
|
||||
|
||||
|
||||
def get_configs(configfile, w2t, insert_logdb):
|
||||
def get_configs(config_file, w2t, insert_logdb):
|
||||
try:
|
||||
wb = openpyxl.load_workbook(configfile, read_only=True)
|
||||
ws = wb["Target"]
|
||||
with open(config_file, mode="r", encoding="utf-8") as f_config:
|
||||
configs = json.load(f_config)
|
||||
except Exception as Err:
|
||||
insert_logdb("ERROR", "current", f"无法打开 {configfile},获取配置文件参数错误 {Err}")
|
||||
w2t(f"无法打开 {configfile}", color="red", desc="OpenFileError")
|
||||
insert_logdb("ERROR", "current", f"get_config: 无法打开 {config_file},获取配置文件参数错误 {Err}")
|
||||
w2t(f"无法打开 {config_file}", color="red", desc="OpenFileError")
|
||||
|
||||
# 最大角速度,额定电流,减速比,额定转速
|
||||
rrs, avs, rcs, rpms = [], [], [], []
|
||||
for i in range(2, 8):
|
||||
rrs.append(abs(float(ws.cell(row=2, column=i).value)))
|
||||
avs.append(abs(float(ws.cell(row=3, column=i).value)))
|
||||
rpms.append(abs(float(ws.cell(row=4, column=i).value)))
|
||||
rcs.append(abs(float(ws.cell(row=6, column=i).value)))
|
||||
version = configs["VERSION"]
|
||||
rcs = [abs(_) for _ in configs["MOTOR"]["RATED_TORQUE"]] # 电机额定电流,rc for rated current
|
||||
m_max_rcs = [] # 电机最大电流
|
||||
m_hold_rcs = [] # 电机堵转电流
|
||||
m_rts = [] # 电机额定转矩rt for rated torque
|
||||
m_max_rts = [] # 电机峰值转矩
|
||||
m_r_rpms = [] # 电机额定转速
|
||||
m_max_rpms = [] # 电机最大转速
|
||||
m_tcs = [] # 电机转矩常数,tc for torque constant
|
||||
rrs = [abs(_) for _ in configs["TRANSMISSION"]["REDUCTION_RATIO_NUMERATOR"]] # 减速比,rr for reduction ratio
|
||||
r_max_sst = [] # 减速器最大启停转矩,sst for start and stop torque
|
||||
r_max_t = [] # 减速器瞬时最大转矩
|
||||
sc = [] # 采样周期,sc for sample cycle
|
||||
r_rts = [] # 减速器额定转矩
|
||||
r_r_rpms = [] # 减速器额定转速
|
||||
r_life_cycle = [] # 减速器L10寿命
|
||||
r_avg_t = [] # 减速器平均负载转矩允许最大值
|
||||
|
||||
insert_logdb("INFO", "current", f"current: 获取减速比:{rrs}")
|
||||
insert_logdb("INFO", "current", f"current: 获取角速度:{avs}")
|
||||
insert_logdb("INFO", "current", f"current: 获取额定电流:{rcs}")
|
||||
insert_logdb("INFO", "current", f"current: 获取额定转速:{rpms}")
|
||||
return rrs, avs, rcs, rpms
|
||||
insert_logdb("INFO", "current", f"get_configs: 机型文件版本 {config_file}_{version}")
|
||||
insert_logdb("INFO", "current", f"get_configs: 减速比 {rrs}")
|
||||
insert_logdb("INFO", "current", f"get_configs: 额定电流 {rcs}")
|
||||
return rcs, m_max_rcs, m_hold_rcs, m_rts, m_max_rts, m_r_rpms, m_max_rpms, m_tcs, rrs, r_max_sst, r_max_t, sc, r_rts, r_r_rpms, r_life_cycle, r_avg_t
|
||||
|
||||
|
||||
def main():
|
||||
@ -405,18 +422,20 @@ def main():
|
||||
vel = int(clibs.data_dp["_vel"])
|
||||
trq = int(clibs.data_dp["_trq"])
|
||||
trqh = int(clibs.data_dp["_trqh"])
|
||||
sensor = int(clibs.data_dp["_sensor"])
|
||||
w2t = clibs.w2t
|
||||
insert_logdb = clibs.insert_logdb
|
||||
insert_logdb("INFO", "current", "current: 参数初始化成功")
|
||||
|
||||
data_files = initialization(path, w2t, insert_logdb)
|
||||
rrs, avs, rcs, rpms = get_configs(path + "\\configs.xlsx", w2t, insert_logdb)
|
||||
data_files, config_file = initialization(path, w2t, insert_logdb)
|
||||
params = get_configs(f"{path}/{config_file}", w2t, insert_logdb)
|
||||
rcs, rrs = params[0], params[8]
|
||||
if sub == "max":
|
||||
current_max(data_files, rcs, trq, w2t, insert_logdb)
|
||||
elif sub == "avg":
|
||||
current_avg(data_files, rcs, trq, w2t, insert_logdb)
|
||||
current_avg(data_files, rcs, trqh, w2t, insert_logdb)
|
||||
elif sub == "cycle":
|
||||
current_cycle(data_files, vel, trq, trqh, rrs, rcs, rpms, w2t, insert_logdb)
|
||||
current_cycle(data_files, vel, trq, trqh, sensor, rrs, rcs, params, w2t, insert_logdb)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
@ -1,5 +1,183 @@
|
||||
|
||||
import pdfplumber
|
||||
import openpyxl
|
||||
import os
|
||||
from common import clibs
|
||||
def main():
|
||||
print("iso")
|
||||
|
||||
|
||||
def p_iso(file, p_files, ws, tmpfile):
|
||||
p_files.append(file)
|
||||
|
||||
pdf = pdfplumber.open(file)
|
||||
with open(tmpfile, mode="w", encoding="utf-8") as fb:
|
||||
for page in pdf.pages:
|
||||
fb.write(page.extract_text())
|
||||
with open(tmpfile, mode="r", encoding="utf-8") as fb:
|
||||
lines = fb.readlines()
|
||||
lines = [line for line in lines if not line.startswith("Page ")]
|
||||
for line in lines:
|
||||
if line.strip() == "Pose Accuracy and Repeatability":
|
||||
index = lines.index(line)
|
||||
ws.cell(row=3, column=7).value = float(lines[index+4].split()[1])
|
||||
ws.cell(row=4, column=7).value = float(lines[index+5].split()[1])
|
||||
ws.cell(row=5, column=7).value = float(lines[index+6].split()[1])
|
||||
ws.cell(row=6, column=7).value = float(lines[index+7].split()[1])
|
||||
ws.cell(row=7, column=7).value = float(lines[index+8].split()[1])
|
||||
|
||||
ws.cell(row=8, column=7).value = float(lines[index+4].split()[2])
|
||||
ws.cell(row=9, column=7).value = float(lines[index+5].split()[2])
|
||||
ws.cell(row=10, column=7).value = float(lines[index+6].split()[2])
|
||||
ws.cell(row=11, column=7).value = float(lines[index+7].split()[2])
|
||||
ws.cell(row=12, column=7).value = float(lines[index+8].split()[2])
|
||||
elif line.strip() == "Pose Accuracy Variation":
|
||||
index = lines.index(line)
|
||||
ws.cell(row=13, column=7).value = float(lines[index+4].split()[1])
|
||||
ws.cell(row=14, column=7).value = float(lines[index+5].split()[1])
|
||||
ws.cell(row=15, column=7).value = float(lines[index+6].split()[1])
|
||||
elif line.strip() == "Distance Accuracy":
|
||||
index = lines.index(line)
|
||||
ws.cell(row=16, column=7).value = float(lines[index + 4].split()[1])
|
||||
ws.cell(row=17, column=7).value = float(lines[index + 4].split()[2])
|
||||
elif line.strip() == "Stabilisation Time and Overshoot":
|
||||
index = lines.index(line)
|
||||
ws.cell(row=18, column=7).value = float(lines[index + 7].split()[3])
|
||||
ws.cell(row=19, column=7).value = float(lines[index + 7].split()[2])
|
||||
elif line.strip() == "Velocity Accuracy and Repeatability":
|
||||
index = lines.index(line)
|
||||
ws.cell(row=20, column=7).value = float(lines[index + 4].split()[1])
|
||||
ws.cell(row=21, column=7).value = float(lines[index + 4].split()[2])
|
||||
ws.cell(row=22, column=7).value = float(lines[index + 4].split()[3])
|
||||
elif line.strip()[:31] == "Path Accuracy and Repeatability":
|
||||
index = lines.index(line)
|
||||
ws.cell(row=29, column=7).value = float(lines[index + 4].split()[1])
|
||||
ws.cell(row=30, column=7).value = float(lines[index + 4].split()[2])
|
||||
elif line.strip() == "Corner Overshoot and Roundoff":
|
||||
index = lines.index(line)
|
||||
ws.cell(row=35, column=7).value = float(lines[index + 4].split()[1])
|
||||
ws.cell(row=36, column=7).value = float(lines[index + 4].split()[2])
|
||||
elif line.strip() == "Robot Weaving":
|
||||
index = lines.index(line)
|
||||
ws.cell(row=41, column=7).value = float(lines[index + 4].split()[2])
|
||||
ws.cell(row=42, column=7).value = float(lines[index + 4].split()[3])
|
||||
ws.cell(row=43, column=7).value = float(lines[index + 4].split()[4])
|
||||
else:
|
||||
pass
|
||||
pdf.close()
|
||||
|
||||
|
||||
def p_iso_100(file, p_files, ws, tmpfile):
|
||||
p_files.append(file)
|
||||
|
||||
pdf = pdfplumber.open(file)
|
||||
with open(tmpfile, mode="w", encoding="utf-8") as fb:
|
||||
for page in pdf.pages:
|
||||
fb.write(page.extract_text())
|
||||
with open(tmpfile, mode="r", encoding="utf-8") as fb:
|
||||
lines = fb.readlines()
|
||||
lines = [line for line in lines if not line.startswith("Page ")]
|
||||
for line in lines:
|
||||
if line.strip() == "Velocity Accuracy and Repeatability":
|
||||
index = lines.index(line)
|
||||
ws.cell(row=26, column=7).value = float(lines[index + 4].split()[1])
|
||||
ws.cell(row=27, column=7).value = float(lines[index + 4].split()[2])
|
||||
ws.cell(row=28, column=7).value = float(lines[index + 4].split()[3])
|
||||
elif line.strip()[:31] == "Path Accuracy and Repeatability":
|
||||
index = lines.index(line)
|
||||
ws.cell(row=33, column=7).value = float(lines[index + 4].split()[1])
|
||||
ws.cell(row=34, column=7).value = float(lines[index + 4].split()[2])
|
||||
elif line.strip() == "Corner Overshoot and Roundoff":
|
||||
index = lines.index(line)
|
||||
ws.cell(row=39, column=7).value = float(lines[index + 4].split()[1])
|
||||
ws.cell(row=40, column=7).value = float(lines[index + 4].split()[2])
|
||||
elif line.strip() == "Robot Weaving":
|
||||
index = lines.index(line)
|
||||
ws.cell(row=47, column=7).value = float(lines[index + 4].split()[2])
|
||||
ws.cell(row=48, column=7).value = float(lines[index + 4].split()[3])
|
||||
ws.cell(row=49, column=7).value = float(lines[index + 4].split()[4])
|
||||
else:
|
||||
pass
|
||||
pdf.close()
|
||||
|
||||
|
||||
def p_iso_1000(file, p_files, ws, tmpfile):
|
||||
p_files.append(file)
|
||||
|
||||
pdf = pdfplumber.open(file)
|
||||
with open(tmpfile, mode="w", encoding="utf-8") as fb:
|
||||
for page in pdf.pages:
|
||||
fb.write(page.extract_text())
|
||||
with open(tmpfile, mode="r", encoding="utf-8") as fb:
|
||||
lines = fb.readlines()
|
||||
lines = [line for line in lines if not line.startswith("Page ")]
|
||||
for line in lines:
|
||||
if line.strip() == "Velocity Accuracy and Repeatability":
|
||||
index = lines.index(line)
|
||||
ws.cell(row=23, column=7).value = float(lines[index + 4].split()[1])
|
||||
ws.cell(row=24, column=7).value = float(lines[index + 4].split()[2])
|
||||
ws.cell(row=25, column=7).value = float(lines[index + 4].split()[3])
|
||||
elif line.strip()[:31] == "Path Accuracy and Repeatability":
|
||||
index = lines.index(line)
|
||||
ws.cell(row=31, column=7).value = float(lines[index + 4].split()[1])
|
||||
ws.cell(row=32, column=7).value = float(lines[index + 4].split()[2])
|
||||
elif line.strip() == "Corner Overshoot and Roundoff":
|
||||
index = lines.index(line)
|
||||
ws.cell(row=37, column=7).value = float(lines[index + 4].split()[1])
|
||||
ws.cell(row=38, column=7).value = float(lines[index + 4].split()[2])
|
||||
elif line.strip() == "Robot Weaving":
|
||||
index = lines.index(line)
|
||||
ws.cell(row=44, column=7).value = float(lines[index + 4].split()[2])
|
||||
ws.cell(row=45, column=7).value = float(lines[index + 4].split()[3])
|
||||
ws.cell(row=46, column=7).value = float(lines[index + 4].split()[4])
|
||||
else:
|
||||
pass
|
||||
pdf.close()
|
||||
|
||||
|
||||
def main():
|
||||
path = clibs.data_dp["_path"]
|
||||
w2t = clibs.w2t
|
||||
dirs, files = clibs.traversal_files(path, 1)
|
||||
|
||||
filename = f"{path}/iso-results.xlsx"
|
||||
tmpfile = f"{path}/data.txt"
|
||||
wb, ws = None, None
|
||||
try:
|
||||
wb = openpyxl.load_workbook(filename)
|
||||
ws = wb.active
|
||||
for i in range(3, 50):
|
||||
ws.cell(row=i, column=7).value = None
|
||||
except Exception as Err:
|
||||
clibs.insert_logdb("ERROR", "iso", f"main: 无法打开文件 {filename}")
|
||||
w2t(f"发生错误:{Err}", "red", "FileOpenError")
|
||||
|
||||
p_files = []
|
||||
for file in files:
|
||||
if file.endswith(".pdf") and file.split("/")[-1] == "ISO.pdf":
|
||||
w2t(f"正在处理{file}......\n")
|
||||
p_iso(file, p_files, ws, tmpfile)
|
||||
w2t(f"文件{file}已处理完毕。\n")
|
||||
|
||||
elif file.endswith(".pdf") and file.split("/")[-1] == "ISO-V100.pdf":
|
||||
w2t(f"正在处理{file}......\n")
|
||||
p_iso_100(file, p_files, ws, tmpfile)
|
||||
w2t(f"文件{file}已处理完毕。\n")
|
||||
|
||||
elif file.endswith(".pdf") and file.split("/")[-1] == "ISO-V1000.pdf":
|
||||
w2t(f"正在处理{file}......\n")
|
||||
p_iso_1000(file, p_files, ws, tmpfile)
|
||||
w2t(f"文件{file}已处理完毕。\n")
|
||||
|
||||
else:
|
||||
pass
|
||||
wb.save(filename)
|
||||
wb.close()
|
||||
|
||||
if len(p_files) == 0:
|
||||
w2t(f"目录 {path} 下没有需要处理的文件,需要确认......", "red")
|
||||
else:
|
||||
os.remove(tmpfile)
|
||||
w2t("------------------------------------------\n")
|
||||
w2t("所有文件均已处理完毕!\n")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@ -1,5 +1,148 @@
|
||||
|
||||
import pandas
|
||||
import csv
|
||||
import openpyxl
|
||||
from common import clibs
|
||||
def main():
|
||||
print("wavelogger")
|
||||
|
||||
|
||||
def find_point(bof, step, margin, threshold, pos, data_file, flag, df, row, w2t):
|
||||
# bof: backward or forward
|
||||
# pos: used for debug
|
||||
# flag: greater than or lower than
|
||||
row_target = None
|
||||
row_origin = df.index[-1] - margin + 1
|
||||
if flag == "gt":
|
||||
while 0 < row < row_origin:
|
||||
value = float(df.iloc[row, 2])
|
||||
if value > threshold:
|
||||
row = row - step if bof == "backward" else row + step
|
||||
continue
|
||||
else:
|
||||
row_target = row - step if bof == "backward" else row + step
|
||||
break
|
||||
else:
|
||||
if bof == "backward":
|
||||
clibs.insert_logdb("ERROR", "wavelogger", f"find_point-gt: [{pos}] 在 {data_file} 中,无法正确识别数据,需要确认...")
|
||||
w2t(f"[{pos}] 在 {data_file} 中,无法正确识别数据,需要确认...", "red", "DataError")
|
||||
elif bof == "forward":
|
||||
row_target = row + margin # to end while loop in function `single_file_proc`
|
||||
elif flag == "lt":
|
||||
while 0 < row < row_origin:
|
||||
value = float(df.iloc[row, 2])
|
||||
if value < threshold:
|
||||
row = row - step if bof == "backward" else row + step
|
||||
continue
|
||||
else:
|
||||
row_target = row - step if bof == "backward" else row + step
|
||||
break
|
||||
else:
|
||||
if bof == "backward":
|
||||
clibs.insert_logdb("ERROR", "wavelogger", f"find_point-lt: [{pos}] 在 {data_file} 中,无法正确识别数据,需要确认...")
|
||||
w2t(f"[{pos}] 在 {data_file} 中,无法正确识别数据,需要确认...", "red", "DataError")
|
||||
elif bof == "forward":
|
||||
row_target = row + margin # to end while loop in function `single_file_proc`
|
||||
return row_target
|
||||
|
||||
|
||||
def get_cycle_info(data_file, step, margin, threshold, w2t):
|
||||
# end -> middle: low
|
||||
# middle -> start: high
|
||||
# 1. 从最后读取数据,无论是大于1还是小于1,都舍弃,找到相反的值的起始点
|
||||
# 2. 从起始点,继续往前寻找,找到与之数值相反的中间点
|
||||
# 3. 从中间点,继续往前寻找,找到与之数值相反的结束点,至此,得到了高低数值的时间区间以及一轮的周期时间
|
||||
csv_reader = csv.reader(open(data_file))
|
||||
begin = int(next(csv_reader)[1])
|
||||
df = pandas.read_csv(data_file, sep=",", encoding="gbk", skip_blank_lines=False, header=begin - 1, on_bad_lines="skip")
|
||||
row = df.index[-1] - margin
|
||||
if float(df.iloc[row, 2]) < threshold:
|
||||
row = find_point("backward", step, margin, threshold, "a1", data_file, "lt", df, row, w2t)
|
||||
|
||||
_row = find_point("backward", step, margin, threshold, "a2", data_file, "gt", df, row, w2t)
|
||||
_row = find_point("backward", step, margin, threshold, "a3", data_file, "lt", df, _row, w2t)
|
||||
row_end = find_point("backward", step, margin, threshold, "a4", data_file, "gt", df, _row, w2t)
|
||||
row_middle = find_point("backward", step, margin, threshold, "a5", data_file, "lt", df, row_end, w2t)
|
||||
row_start = find_point("backward", step, margin, threshold, "a6", data_file, "gt", df, row_middle, w2t)
|
||||
# print(f"row_end = {row_end}")
|
||||
# print(f"row_middle = {row_middle}")
|
||||
# print(f"row_start = {row_start}")
|
||||
return row_end-row_middle, row_middle-row_start, row_end-row_start, df
|
||||
|
||||
|
||||
def initialization(path, w2t):
|
||||
_, data_files = clibs.traversal_files(path, w2t)
|
||||
|
||||
for data_file in data_files:
|
||||
if not data_file.lower().endswith(".csv"):
|
||||
clibs.insert_logdb("ERROR", "wavelogger", f"init: {data_file} 文件后缀错误,只允许 .csv 文件,需要确认!")
|
||||
w2t(f"{data_file} 文件后缀错误,只允许 .csv 文件,需要确认!", "red", "FileTypeError")
|
||||
|
||||
return data_files
|
||||
|
||||
|
||||
def preparation(data_file, step, margin, threshold, wb, w2t):
|
||||
shtname = data_file.split("/")[-1].split(".")[0]
|
||||
ws = wb.create_sheet(shtname)
|
||||
low, high, cycle, df = get_cycle_info(data_file, step, margin, threshold, w2t)
|
||||
|
||||
return ws, df, low, high, cycle
|
||||
|
||||
|
||||
def single_file_proc(ws, data_file, step, threshold, margin, data_length, df, cycle, w2t):
|
||||
row, row_lt, row_gt, count, count_i, data = 1, 1, 1, 1, 1, {}
|
||||
row_max = df.index[-1] - margin
|
||||
while row < row_max:
|
||||
if count not in data.keys():
|
||||
data[count] = []
|
||||
|
||||
value = float(df.iloc[row, 2])
|
||||
if value < threshold:
|
||||
row_lt = find_point("forward", step, margin, threshold, "c"+str(row), data_file, "lt", df, row, w2t)
|
||||
start = int(row_gt + (row_lt - row_gt - data_length) / 2)
|
||||
end = start + data_length
|
||||
value = df.iloc[start:end, 2].mean() + 3 * df.iloc[start:end, 2].std()
|
||||
if value > 1:
|
||||
msg = f"{data_file} 文件第 {count} 轮 第 {count_i} 个数据可能有问题,需人工手动确认,确认有问题可删除,无问题则保留"
|
||||
clibs.insert_logdb("WARNING", "wavelogger", msg)
|
||||
w2t(msg, "orange")
|
||||
data[count].append(value)
|
||||
count_i += 1
|
||||
else:
|
||||
row_gt = find_point("forward", step, margin, threshold, "c"+str(row), data_file, "gt", df, row, w2t)
|
||||
if row_gt - row_lt > cycle * 2:
|
||||
count += 1
|
||||
count_i = 1
|
||||
row = max(row_gt, row_lt)
|
||||
for i in range(2, 10):
|
||||
ws.cell(row=1, column=i).value = f"第{i-1}次测试"
|
||||
ws.cell(row=i, column=1).value = f"第{i-1}次精度变化"
|
||||
|
||||
for i in sorted(data.keys()):
|
||||
row, column = 2, i + 1
|
||||
for value in data[i]:
|
||||
ws.cell(row=row, column=column).value = float(value)
|
||||
row += 1
|
||||
|
||||
|
||||
def execution(data_files, w2t):
|
||||
wb = openpyxl.Workbook()
|
||||
step, margin, data_length, threshold = 5, 50, 50, 5
|
||||
for data_file in data_files:
|
||||
ws, df, low, high, cycle = preparation(data_file, step, margin, threshold, wb, w2t)
|
||||
single_file_proc(ws, data_file, step, threshold, margin, data_length, df, cycle, w2t)
|
||||
|
||||
wd = "/".join(data_files[0].split("/")[:-1])
|
||||
filename = wd + "/result.xlsx"
|
||||
wb.save(filename)
|
||||
wb.close()
|
||||
w2t("----------------------------------------\n")
|
||||
w2t("所有文件均已处理完毕\n")
|
||||
|
||||
|
||||
def main():
|
||||
path = clibs.data_dp["_path"]
|
||||
w2t = clibs.w2t
|
||||
data_files = initialization(path, w2t)
|
||||
execution(data_files, w2t)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
Reference in New Issue
Block a user