import json import threading import time import pandas import numpy import math import csv from PySide6.QtCore import Signal, QThread from codes.common import clibs class DoFactoryTest(QThread): output = Signal(str, str) curve_map = { "周期内平均转矩": ["device_servo_trq_feedback", ], "周期内最大速度": ["hw_joint_vel_feedback", ], } def __init__(self, dir_path, interval, procs, /): super().__init__() self.dir_path = dir_path self.interval = int(interval) if interval != "" else clibs.CYCLE self.procs = procs self.idx = 6 self.curves = [] def logger(self, level, module, content, color="black", error="", flag="both"): flag = "cursor" if level.upper() == "DEBUG" else "both" clibs.logger(level, module, content, color, flag, signal=self.output) if level.upper() == "ERROR": raise Exception(f"{error} | {content}") def initialization(self, data_dirs, data_files): @clibs.handle_exception def check_files(): for proc_name, is_enabled in self.procs.items(): if is_enabled: self.curves.extend(self.curve_map[proc_name]) if len(self.curves) == 0: self.logger("ERROR", "factory", "未查询到需要记录数据的曲线,至少选择一个!", "red", "CurveNameError") if len(data_dirs) != 0 or len(data_files) != 1: self.logger("ERROR", "factory", "初始路径下不允许有文件夹,且初始路径下只能存在一个工程文件 —— *.zip,确认后重新运行!", "red", "InitFileError") if not data_files[0].endswith(".zip"): self.logger("ERROR", "factory", f"{data_files[0]} 不是一个有效的工程文件,需确认!", "red", "ProjectFileError") return data_files[0] @clibs.handle_exception def get_configs(): robot_type, records = None, None msg_id = clibs.c_hr.execution("controller.get_params") records = clibs.c_hr.get_from_id(msg_id) for record in records: if "请求发送成功" not in record[0]: robot_type = eval(record[0])["data"]["robot_type"] server_file = f"/home/luoshi/bin/controller/robot_cfg/{robot_type}/{robot_type}.cfg" local_file = self.dir_path + f"/{robot_type}.cfg" clibs.c_pd.pull_file_from_server(server_file, local_file) try: with open(local_file, mode="r", encoding="utf-8") as f_config: configs = json.load(f_config) except Exception as Err: self.logger("ERROR", "factory", f"无法打开 {local_file}
{Err}", "red", "OpenFileError") # 最大角速度,额定电流,减速比,额定转速 version = configs["VERSION"] m_avs = configs["MOTION"]["JOINT_MAX_SPEED"] self.logger("INFO", "factory", f"get_configs: 机型文件版本 {robot_type}_{version}") self.logger("INFO", "factory", f"get_configs: 各关节角速度 {m_avs}") return m_avs self.logger("INFO", "factory", "正在做初始化校验和配置,这可能需要一点时间......", "green") prj_file = check_files() if prj_file is None: return avs = get_configs() params = { "prj_file": prj_file, "interval": self.interval, "avs": avs, } self.logger("INFO", "factory", "数据目录合规性检查结束,未发现问题......", "green") return params @clibs.handle_exception def change_curve_state(self, stat): display_pdo_params = [{"name": name, "channel": chl} for name in self.curves for chl in range(6)] clibs.c_hr.execution("diagnosis.open", open=stat, display_open=stat) clibs.c_hr.execution("diagnosis.set_params", display_pdo_params=display_pdo_params) @clibs.handle_exception def run_rl(self, params): prj_file, interval = params["prj_file"], params["interval"] # 1. 关闭诊断曲线,触发软急停,并解除,目的是让可能正在运行着的机器停下来,切手动模式并下电 self.change_curve_state(False) clibs.c_md.r_soft_estop(0) clibs.c_md.r_soft_estop(1) clibs.c_md.r_reset_estop() clibs.c_md.r_clear_alarm() clibs.c_md.write_act(False) time.sleep(1) # 让曲线彻底关闭 # 2. reload工程后,pp2main,并且自动模式和上电 prj_name = ".".join(prj_file.split("/")[-1].split(".")[:-1]) prj_path = f"{prj_name}/_build/{prj_name}.prj" clibs.c_hr.execution("overview.reload", prj_path=prj_path, tasks=["factory"]) clibs.c_hr.execution("rl_task.pp_to_main", tasks=["factory"]) clibs.c_hr.execution("state.switch_auto") clibs.c_hr.execution("state.switch_motor_on") # 3. 开始运行程序 self.logger("INFO", "factory", f"正在采集场景工程的周期,大概1min左右......", "blue") clibs.c_hr.execution("rl_task.set_run_params", loop_mode=True, override=1.0) clibs.c_hr.execution("rl_task.run", tasks=["factory"]) t_start = time.time() while True: if clibs.c_md.read_ready_to_go() == 1: clibs.c_md.write_act(True) break else: if (time.time() - t_start) > 15: self.logger("ERROR", "factory", "15s 内未收到机器人的运行信号,需要确认RL程序编写正确并正常执行...", "red", "ReadySignalTimeoutError") else: time.sleep(clibs.INTERVAL) # 4. 获取初始数据,周期时间,首次的各轴平均电流值,打开诊断曲线,并执行采集 time.sleep(clibs.INTERVAL*10) # 等待 RL 程序中 scenario_time 初始化 t_start = time.time() while True: scenario_time = float(f"{float(clibs.c_md.read_scenario_time()):.2f}") if scenario_time != 0: self.logger("INFO", "factory", f"耐久工程的周期时间:{scenario_time}s | 单轮次执行时间:{scenario_time+interval}~{scenario_time*2+interval}") break else: time.sleep(clibs.INTERVAL) if (time.time() - t_start) > 900: self.logger("ERROR", "factory", f"900s内未收到耐久工程的周期时间,需要确认RL程序和工具通信交互是否正常执行(支持最长工程周期时间为300s)......", "red", "GetScenarioTimeError") # 6. 准备数据保存文件 for proc_name, is_enabled in self.procs.items(): if not is_enabled: continue with open(f"{self.dir_path}/{proc_name}.csv", mode="a+", newline="") as f_csv: for curve in self.curve_map[proc_name]: titles = [f"{curve}_{i}" for i in range(6)] titles.insert(0, "time") csv_writer = csv.writer(f_csv) csv_writer.writerow(titles) # 7. 开始采集 count = 0 while clibs.running[self.idx]: this_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time())) next_time_1 = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()+scenario_time+interval+1)) next_time_2 = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()+scenario_time+interval+1+scenario_time)) self.logger("INFO", "factory", f"[{this_time}] 当前次数:{count:09d} | 预计下次数据更新时间:{next_time_1}~{next_time_2}", "#008B8B") count += 1 # 固定间隔,更新一次数据,打开曲线,获取周期内电流,关闭曲线 time.sleep(interval) while True: capture_start = clibs.c_md.read_capture_start() if capture_start == 1: break else: time.sleep(clibs.INTERVAL/10) self.change_curve_state(True) time.sleep(scenario_time) end_time = time.time() start_time = end_time - scenario_time self.change_curve_state(False) # 保留数据并处理输出 self.gen_results(params, start_time, end_time) else: self.change_curve_state(False) self.logger("INFO", "factory", "后台数据清零完成,现在可以重新运行其他程序。", "green") @clibs.handle_exception def gen_results(self, params, start_time, end_time): s_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(start_time)) e_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(end_time)) try: clibs.lock.acquire(True) clibs.cursor.execute(f"SELECT content FROM logs WHERE timestamp BETWEEN '{s_time}' AND '{e_time}' AND content LIKE '%diagnosis.result%' ORDER BY id ASC") records = clibs.cursor.fetchall() finally: clibs.lock.release() self.data_proc(records, params) @clibs.handle_exception def data_proc(self, records, params): for proc_name, is_enabled in self.procs.items(): if not is_enabled: continue if proc_name == "周期内平均转矩": # get_avg_trq(records, params, w2t) t = threading.Thread(target=self.get_avg_trq, args=(records, params, proc_name)) t.daemon = True t.start() elif proc_name == "周期内最大速度": # get_joint_max_vel(records, params, w2t) t = threading.Thread(target=self.get_joint_max_vel, args=(records, params, proc_name)) t.daemon = True t.start() @clibs.handle_exception def get_avg_trq(self, records, params, proc_name): d_trq, results = [[], [], [], [], [], []], [time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))] for record in records: data = eval(record[0])["data"] for item in data: d_item = reversed(item["value"]) for axis in range(6): if item.get("channel", None) == axis and item.get("name", None) == "device_servo_trq_feedback": d_trq[axis].extend(d_item) for axis in range(6): df = pandas.DataFrame.from_dict({"device_servo_trq_feedback": d_trq[axis]}) _ = math.sqrt(numpy.square(df[df.columns[0]].values * 1.27 / 1000).sum() / len(df)) results.append(_) path = "/".join(params["prj_file"].split("/")[:-1]) with open(f"{path}/{proc_name}.csv", mode="a+", newline="") as f_csv: csv_writer = csv.writer(f_csv) csv_writer.writerow(results) @clibs.handle_exception def get_joint_max_vel(self, records, params, proc_name): d_trq, results = [[], [], [], [], [], []], [time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))] for record in records: data = eval(record[0])["data"] for item in data: d_item = reversed(item["value"]) for axis in range(6): if item.get("channel", None) == axis and item.get("name", None) == "hw_joint_vel_feedback": d_trq[axis].extend(d_item) for axis in range(6): df = pandas.DataFrame.from_dict({"hw_joint_vel_feedback": d_trq[axis]}) _ = df.max().iloc[0] results.append(_) path = "/".join(params["prj_file"].split("/")[:-1]) with open(f"{path}/{proc_name}.csv", mode="a+", newline="") as f_csv: csv_writer = csv.writer(f_csv) csv_writer.writerow(results) @staticmethod def detect_db_size(): @clibs.db_lock @clibs.handle_exception def release_memory(): line_number = 20000 leftover = 4000 # 200s clibs.cursor.execute("SELECT COUNT(id) FROM logs") len_records = clibs.cursor.fetchone()[0] if len_records > line_number: del_num = len_records - leftover + 1 clibs.cursor.execute(f"DELETE FROM logs WHERE id < {del_num}") clibs.cursor.execute(f"UPDATE logs SET id=(id-{del_num - 1}) WHERE id > {del_num - 1}") clibs.cursor.execute(f"UPDATE sqlite_sequence SET seq = {leftover + 1} WHERE name = 'logs' ") clibs.cursor.execute("VACUUM") while True: release_memory() time.sleep(clibs.INTERVAL*10) @clibs.handle_exception def processing(self): time_start = time.time() clibs.running[self.idx] = 1 if clibs.status["hmi"] != 1 or clibs.status["md"] != 1: self.logger("ERROR", "factory", "processing: 需要在网络设置中连接HMI以及Modbus通信!", "red", "NetworkError") t = threading.Thread(target=self.detect_db_size) t.daemon = True t.start() data_dirs, data_files = clibs.traversal_files(self.dir_path, self.output) params = self.initialization(data_dirs, data_files) clibs.c_pd.push_prj_to_server(params["prj_file"]) self.run_rl(params) self.logger("INFO", "factory", "-"*60 + "
全部处理完毕
", "purple") time_total = time.time() - time_start msg = f"处理时间:{time_total // 3600:02.0f} h {time_total % 3600 // 60:02.0f} m {time_total % 60:02.0f} s" self.logger("INFO", "factory", msg)