fix merge while merging from main
This commit is contained in:
@ -1 +0,0 @@
|
||||
__all__ = ['brake', 'current', 'iso', 'wavelogger']
|
@ -1,49 +1,12 @@
|
||||
# coding: utf-8
|
||||
from os import scandir
|
||||
from os.path import isfile, exists
|
||||
from os.path import isfile
|
||||
from sys import argv
|
||||
from openpyxl import load_workbook
|
||||
from time import time, sleep, strftime, localtime
|
||||
from threading import Thread
|
||||
from pandas import read_csv
|
||||
from commons import clibs
|
||||
|
||||
|
||||
class GetThreadResult(Thread):
|
||||
def __init__(self, func, args=()):
|
||||
super(GetThreadResult, self).__init__()
|
||||
self.func = func
|
||||
self.args = args
|
||||
self.result = 0
|
||||
|
||||
def run(self):
|
||||
sleep(1)
|
||||
self.result = self.func(*self.args)
|
||||
|
||||
def get_result(self):
|
||||
Thread.join(self) # 等待线程执行完毕
|
||||
try:
|
||||
return self.result
|
||||
except Exception as Err:
|
||||
return None
|
||||
|
||||
|
||||
def traversal_files(path, w2t):
|
||||
# 功能:以列表的形式分别返回指定路径下的文件和文件夹,不包含子目录
|
||||
# 参数:路径
|
||||
# 返回值:路径下的文件夹列表 路径下的文件列表
|
||||
if not exists(path):
|
||||
msg = f'数据文件夹{path}不存在,请确认后重试......'
|
||||
w2t(msg, 0, 1, 'red')
|
||||
else:
|
||||
dirs = []
|
||||
files = []
|
||||
for item in scandir(path):
|
||||
if item.is_dir():
|
||||
dirs.append(item.path)
|
||||
elif item.is_file():
|
||||
files.append(item.path)
|
||||
|
||||
return dirs, files
|
||||
logger = clibs.log_prod
|
||||
|
||||
|
||||
def check_files(path, raw_data_dirs, result_files, w2t):
|
||||
@ -83,7 +46,7 @@ def check_files(path, raw_data_dirs, result_files, w2t):
|
||||
规则解释:AA/BB/CC 指的是臂展/负载/速度的比例,例如reach66_load100_speed33:66%臂展,100%负载以及33%速度情况下的测试结果文件夹"""
|
||||
w2t(msg, 0, 4, 'red')
|
||||
|
||||
_, raw_data_files = traversal_files(raw_data_dir, w2t)
|
||||
_, raw_data_files = clibs.traversal_files(raw_data_dir, w2t)
|
||||
if len(raw_data_files) != 3:
|
||||
msg = f"数据目录 {raw_data_dir} 下数据文件个数错误,每个数据目录下有且只能有三个以 .data 为后缀的数据文件"
|
||||
w2t(msg, 0, 5, 'red')
|
||||
@ -109,6 +72,7 @@ def get_configs(configfile, w2t):
|
||||
|
||||
return av, rr
|
||||
|
||||
|
||||
def now_doing_msg(docs, flag, w2t):
|
||||
# 功能:输出正在处理的文件或目录
|
||||
# 参数:文件或目录,start 或 done 标识
|
||||
@ -228,7 +192,7 @@ def data_process(result_file, raw_data_dirs, av, rr, vel, trq, estop, w2t):
|
||||
|
||||
global stop
|
||||
stop = 0
|
||||
t_excel = GetThreadResult(load_workbook, args=(result_file, ))
|
||||
t_excel = clibs.GetThreadResult(load_workbook, args=(result_file, ))
|
||||
t_wait = Thread(target=w2t_local, args=('.', 1, w2t))
|
||||
t_excel.start()
|
||||
t_wait.start()
|
||||
@ -242,7 +206,7 @@ def data_process(result_file, raw_data_dirs, av, rr, vel, trq, estop, w2t):
|
||||
for raw_data_dir in raw_data_dirs:
|
||||
if raw_data_dir.split('\\')[-1].split('_')[0] == prefix:
|
||||
now_doing_msg(raw_data_dir, 'start', w2t)
|
||||
_, data_files = traversal_files(raw_data_dir, w2t)
|
||||
_, data_files = clibs.traversal_files(raw_data_dir, w2t)
|
||||
# 数据文件串行处理模式---------------------------------
|
||||
# count = 1
|
||||
# for data_file in data_files:
|
||||
@ -280,7 +244,7 @@ def main(path, vel, trq, estop, w2t):
|
||||
# 参数:initialization函数的返回值
|
||||
# 返回值:-
|
||||
time_start = time()
|
||||
raw_data_dirs, result_files = traversal_files(path, w2t)
|
||||
raw_data_dirs, result_files = clibs.traversal_files(path, w2t)
|
||||
|
||||
try:
|
||||
# threads = []
|
||||
|
@ -1,31 +1,13 @@
|
||||
from openpyxl import load_workbook
|
||||
from os import scandir
|
||||
from os.path import exists
|
||||
from sys import argv
|
||||
from pandas import read_csv, concat, set_option
|
||||
from re import match
|
||||
from threading import Thread
|
||||
from time import sleep
|
||||
from csv import reader, writer
|
||||
from commons import clibs
|
||||
|
||||
|
||||
class GetThreadResult(Thread):
|
||||
def __init__(self, func, args=()):
|
||||
super(GetThreadResult, self).__init__()
|
||||
self.func = func
|
||||
self.args = args
|
||||
self.result = 0
|
||||
|
||||
def run(self):
|
||||
sleep(1)
|
||||
self.result = self.func(*self.args)
|
||||
|
||||
def get_result(self):
|
||||
Thread.join(self) # 等待线程执行完毕
|
||||
try:
|
||||
return self.result
|
||||
except Exception as Err:
|
||||
return None
|
||||
logger = clibs.log_prod
|
||||
|
||||
|
||||
def w2t_local(msg, wait, w2t):
|
||||
@ -38,27 +20,8 @@ def w2t_local(msg, wait, w2t):
|
||||
break
|
||||
|
||||
|
||||
def traversal_files(path, w2t):
|
||||
# 功能:以列表的形式分别返回指定路径下的文件和文件夹,不包含子目录
|
||||
# 参数:路径
|
||||
# 返回值:路径下的文件夹列表 路径下的文件列表
|
||||
if not exists(path):
|
||||
msg = f'数据文件夹{path}不存在,请确认后重试......'
|
||||
w2t(msg, 0, 8, 'red')
|
||||
else:
|
||||
dirs = []
|
||||
files = []
|
||||
for item in scandir(path):
|
||||
if item.is_dir():
|
||||
dirs.append(item.path)
|
||||
elif item.is_file():
|
||||
files.append(item.path)
|
||||
|
||||
return dirs, files
|
||||
|
||||
|
||||
def initialization(path, sub, w2t):
|
||||
_, data_files = traversal_files(path, w2t)
|
||||
_, data_files = clibs.traversal_files(path, w2t)
|
||||
count = 0
|
||||
|
||||
for data_file in data_files:
|
||||
@ -69,8 +32,8 @@ def initialization(path, sub, w2t):
|
||||
count += 1
|
||||
else:
|
||||
if not (match('j[1-7].*\\.data', filename) or match('j[1-7].*\\.csv', filename)):
|
||||
print(f"不合规 {data_file}")
|
||||
msg = f"所有文件必须以 jx_ 开头,以 .data/csv 结尾(x取值1-7),请检查后重新运行。"
|
||||
msg = f"不合规 {data_file}\n"
|
||||
msg += f"所有文件必须以 jx_ 开头,以 .data/csv 结尾(x取值1-7),请检查后重新运行。"
|
||||
w2t(msg, 0, 6, 'red')
|
||||
|
||||
if not ((sub == 'cycle' and count == 2) or (sub != 'cycle' and count == 1)):
|
||||
@ -79,7 +42,7 @@ def initialization(path, sub, w2t):
|
||||
return data_files
|
||||
|
||||
|
||||
def current_max(data_files, rcs, trqh, w2t):
|
||||
def current_max(data_files, rcs, trq, w2t):
|
||||
current = {1: [], 2: [], 3: [], 4: [], 5: [], 6: [], 7: []}
|
||||
for data_file in data_files:
|
||||
if data_file.endswith('.data'):
|
||||
@ -93,8 +56,8 @@ def current_max(data_files, rcs, trqh, w2t):
|
||||
axis = int(data_file.split('\\')[-1].split('_')[0].removeprefix('j'))
|
||||
rca = rcs[axis-1]
|
||||
|
||||
col = df.columns.values[trqh-1]
|
||||
c_max = df[col].max()
|
||||
col = df.columns.values[trq-1]
|
||||
c_max = df[col].abs().max()
|
||||
|
||||
scale = 1 if data_file.endswith('.csv') else 1000
|
||||
_ = abs(c_max/scale*rca)
|
||||
@ -118,7 +81,7 @@ def current_max(data_files, rcs, trqh, w2t):
|
||||
return current
|
||||
|
||||
|
||||
def current_avg(data_files, rcs, trqh, w2t):
|
||||
def current_avg(data_files, rcs, trq, w2t):
|
||||
current = {1: [], 2: [], 3: [], 4: [], 5: [], 6: [], 7: []}
|
||||
for data_file in data_files:
|
||||
if data_file.endswith('.data'):
|
||||
@ -132,7 +95,7 @@ def current_avg(data_files, rcs, trqh, w2t):
|
||||
axis = int(data_file.split('\\')[-1].split('_')[0].removeprefix('j'))
|
||||
rca = rcs[axis-1]
|
||||
|
||||
col = df.columns.values[trqh - 1]
|
||||
col = df.columns.values[trq-1]
|
||||
c_std = df[col].std()
|
||||
c_avg = df[col].mean()
|
||||
|
||||
@ -158,7 +121,7 @@ def current_avg(data_files, rcs, trqh, w2t):
|
||||
return current
|
||||
|
||||
|
||||
def current_cycle(dur, data_files, rcs, vel, trq, trqh, rpms, w2t):
|
||||
def current_cycle(dur, data_files, rcs, rrs, vel, trq, trqh, rpms, w2t):
|
||||
result = None
|
||||
hold = []
|
||||
single = []
|
||||
@ -174,7 +137,7 @@ def current_cycle(dur, data_files, rcs, vel, trq, trqh, rpms, w2t):
|
||||
w2t(f"正在打开文件 {result},需要 10s 左右", 1, 0, 'orange')
|
||||
global stop
|
||||
stop = 0
|
||||
t_excel = GetThreadResult(load_workbook, args=(result, ))
|
||||
t_excel = clibs.GetThreadResult(load_workbook, args=(result, ))
|
||||
t_wait = Thread(target=w2t_local, args=('.', 1, w2t))
|
||||
t_excel.start()
|
||||
t_wait.start()
|
||||
@ -189,14 +152,14 @@ def current_cycle(dur, data_files, rcs, vel, trq, trqh, rpms, w2t):
|
||||
for axis, cur_value in avg.items():
|
||||
try:
|
||||
shtname = f"J{axis}"
|
||||
wb[shtname]["J4"].value = float(cur_value)
|
||||
wb[shtname]["J4"].value = float(cur_value[0])
|
||||
except:
|
||||
pass
|
||||
|
||||
if dur == 0:
|
||||
p_single(wb, single, vel, trq, rpms, w2t)
|
||||
p_single(wb, single, vel, trq, rpms, rrs, w2t)
|
||||
else:
|
||||
p_scenario(wb, single, vel, trq, rpms, dur, w2t)
|
||||
p_scenario(wb, single, vel, trq, rpms, rrs, dur, w2t)
|
||||
|
||||
w2t(f"正在保存文件 {result},需要 10s 左右", 1, 0, 'orange')
|
||||
stop = 0
|
||||
@ -239,7 +202,7 @@ def find_point(data_file, pos, flag, df, _row_s, _row_e, w2t, exitcode, threshol
|
||||
w2t(f"[{pos}] {data_file}数据有误,需要检查,无法找到有效起始点或结束点...", 0, 0, 'red')
|
||||
|
||||
|
||||
def p_single(wb, single, vel, trq, rpms, w2t):
|
||||
def p_single(wb, single, vel, trq, rpms, rrs, w2t):
|
||||
# 1. 先找到第一个速度为零的点,数据从后往前找,一开始就是零的情况不予考虑
|
||||
# 2. 记录第一个点的位置,继续向前查找第二个速度为零的点,同理,一开始为零的点不予考虑
|
||||
# 3. 记录第二个点的位置,并将其中的数据拷贝至对应位置
|
||||
@ -253,7 +216,7 @@ def p_single(wb, single, vel, trq, rpms, w2t):
|
||||
set_option("display.precision", 2)
|
||||
if data_file.endswith('.data'):
|
||||
df = read_csv(data_file, sep='\t')
|
||||
rr = float(wb['统计'].cell(row=2, column=axis+1).value)
|
||||
rr = rrs[axis-1]
|
||||
addition = 180 / 3.1415926 * 60 / 360 * rr
|
||||
elif data_file.endswith('.csv'):
|
||||
df = read_csv(data_file, sep=',', encoding='gbk', header=8)
|
||||
@ -270,6 +233,7 @@ def p_single(wb, single, vel, trq, rpms, w2t):
|
||||
col_names = list(df.columns)
|
||||
df_1 = df[col_names[vel-1]].multiply(rpm*addition)
|
||||
df_2 = df[col_names[trq-1]].multiply(scale)
|
||||
# print(df_1.abs().max())
|
||||
df = concat([df_1, df_2], axis=1)
|
||||
|
||||
_step = 5 if data_file.endswith('.csv') else 50
|
||||
@ -320,7 +284,7 @@ def p_single(wb, single, vel, trq, rpms, w2t):
|
||||
cell.value = None
|
||||
|
||||
|
||||
def p_scenario(wb, single, vel, trq, rpms, dur, w2t):
|
||||
def p_scenario(wb, single, vel, trq, rpms, rrs, dur, w2t):
|
||||
for data_file in single:
|
||||
cycle = 0.001
|
||||
axis = int(data_file.split('\\')[-1].split('_')[0].removeprefix('j'))
|
||||
@ -332,7 +296,7 @@ def p_scenario(wb, single, vel, trq, rpms, dur, w2t):
|
||||
set_option("display.precision", 2)
|
||||
if data_file.endswith('.data'):
|
||||
df = read_csv(data_file, sep='\t')
|
||||
rr = float(wb['统计'].cell(row=2, column=axis+1).value)
|
||||
rr = rrs[axis-1]
|
||||
addition = 180 / 3.1415926 * 60 / 360 * rr
|
||||
elif data_file.endswith('.csv'):
|
||||
df = read_csv(data_file, sep=',', encoding='gbk', header=8)
|
||||
@ -376,6 +340,7 @@ def get_configs(configfile, w2t):
|
||||
_wb = load_workbook(configfile, read_only=True)
|
||||
_ws = _wb['Target']
|
||||
rcs = []
|
||||
rrs = []
|
||||
rpms = []
|
||||
for i in range(2, 9):
|
||||
try:
|
||||
@ -388,18 +353,23 @@ def get_configs(configfile, w2t):
|
||||
except:
|
||||
rcs.append(0.0)
|
||||
|
||||
return rpms, rcs
|
||||
try:
|
||||
rrs.append(float(_ws.cell(row=2, column=i).value))
|
||||
except:
|
||||
rrs.append(0.0)
|
||||
|
||||
return rpms, rcs, rrs
|
||||
|
||||
|
||||
def main(path, sub, dur, vel, trq, trqh, w2t):
|
||||
data_files = initialization(path, sub, w2t)
|
||||
rpms, rcs = get_configs(path + '\\configs.xlsx', w2t)
|
||||
rpms, rcs, rrs = get_configs(path + '\\configs.xlsx', w2t)
|
||||
if sub == 'max':
|
||||
current_max(data_files, rcs, trqh, w2t)
|
||||
current_max(data_files, rcs, trq, w2t)
|
||||
elif sub == 'avg':
|
||||
current_avg(data_files, rcs, trqh, w2t)
|
||||
current_avg(data_files, rcs, trq, w2t)
|
||||
elif sub == 'cycle':
|
||||
current_cycle(dur, data_files, rcs, vel, trq, trqh, rpms, w2t)
|
||||
current_cycle(dur, data_files, rcs, rrs, vel, trq, trqh, rpms, w2t)
|
||||
else:
|
||||
pass
|
||||
|
||||
|
@ -1,27 +1,10 @@
|
||||
# _*_ encoding:utf-8 _*_
|
||||
import pdfplumber
|
||||
from openpyxl import load_workbook
|
||||
from os import scandir, remove
|
||||
from os.path import exists
|
||||
from os import remove
|
||||
from sys import argv
|
||||
from commons import clibs
|
||||
|
||||
|
||||
def traversal_files(path, w2t):
|
||||
# 功能:以列表的形式分别返回指定路径下的文件和文件夹,不包含子目录
|
||||
# 参数:路径
|
||||
# 返回值:路径下的文件夹列表 路径下的文件列表
|
||||
if not exists(path):
|
||||
msg = f'数据文件夹{path}不存在,请确认后重试......'
|
||||
w2t(msg, 0, 1, 'red')
|
||||
else:
|
||||
dirs = files = []
|
||||
for item in scandir(path):
|
||||
if item.is_dir():
|
||||
dirs.append(item.path)
|
||||
elif item.is_file():
|
||||
files.append(item.path)
|
||||
|
||||
return dirs, files
|
||||
logger = clibs.log_prod
|
||||
|
||||
|
||||
def p_iso(file, p_files, ws, tmpfile):
|
||||
@ -153,7 +136,7 @@ def p_iso_1000(file, p_files, ws, tmpfile):
|
||||
|
||||
|
||||
def main(path, w2t):
|
||||
dirs, files = traversal_files(path, 1)
|
||||
dirs, files = clibs.traversal_files(path, 1)
|
||||
|
||||
try:
|
||||
wb = load_workbook(path + "/iso-results.xlsx")
|
||||
|
@ -1,31 +1,10 @@
|
||||
import os
|
||||
import random
|
||||
|
||||
from pandas import read_csv
|
||||
from csv import reader
|
||||
from sys import argv
|
||||
from os.path import exists
|
||||
from os import scandir, remove
|
||||
from openpyxl import Workbook
|
||||
from random import randint
|
||||
from commons import clibs
|
||||
|
||||
def traversal_files(path, w2t):
|
||||
# 功能:以列表的形式分别返回指定路径下的文件和文件夹,不包含子目录
|
||||
# 参数:路径
|
||||
# 返回值:路径下的文件夹列表 路径下的文件列表
|
||||
if not exists(path):
|
||||
msg = f'数据文件夹{path}不存在,请确认后重试......'
|
||||
w2t(msg, 0, 1, 'red')
|
||||
else:
|
||||
dirs = []
|
||||
files = []
|
||||
for item in scandir(path):
|
||||
if item.is_dir():
|
||||
dirs.append(item.path)
|
||||
elif item.is_file():
|
||||
files.append(item.path)
|
||||
|
||||
return dirs, files
|
||||
logger = clibs.log_prod
|
||||
|
||||
|
||||
def find_point(bof, step, pos, data_file, flag, df, row, w2t):
|
||||
@ -95,7 +74,7 @@ def get_cycle_info(data_file, df, row, step, w2t):
|
||||
|
||||
|
||||
def initialization(path, w2t):
|
||||
_, data_files = traversal_files(path, w2t)
|
||||
_, data_files = clibs.traversal_files(path, w2t)
|
||||
|
||||
for data_file in data_files:
|
||||
if not data_file.lower().endswith('.csv'):
|
||||
@ -126,7 +105,7 @@ def single_file_proc(ws, data_file, df, low, high, cycle, w2t):
|
||||
_step = 5
|
||||
_data = {}
|
||||
row_max = df.index[-1]-100
|
||||
print(data_file)
|
||||
# print(data_file)
|
||||
while _row < row_max:
|
||||
if count not in _data.keys():
|
||||
_data[count] = []
|
||||
@ -149,7 +128,7 @@ def single_file_proc(ws, data_file, df, low, high, cycle, w2t):
|
||||
ws.cell(row=1, column=i).value = f"第{i-1}次测试"
|
||||
ws.cell(row=i, column=1).value = f"第{i-1}次精度变化"
|
||||
|
||||
print(_data)
|
||||
# print(_data)
|
||||
for i in sorted(_data.keys()):
|
||||
_row = 2
|
||||
_column = i + 1
|
||||
@ -162,9 +141,9 @@ def execution(data_files, w2t):
|
||||
wb = Workbook()
|
||||
for data_file in data_files:
|
||||
ws, df, low, high, cycle = preparation(data_file, wb, w2t)
|
||||
print(f"low = {low}")
|
||||
print(f"high = {high}")
|
||||
print(f"cycle = {cycle}")
|
||||
# print(f"low = {low}")
|
||||
# print(f"high = {high}")
|
||||
# print(f"cycle = {cycle}")
|
||||
single_file_proc(ws, data_file, df, low, high, cycle, w2t)
|
||||
|
||||
wd = data_files[0].split('\\')
|
||||
|
Reference in New Issue
Block a user