Commit 769fc826 by dongshufeng

refactor(all): remove nousage files

parent 40a0f0f2
use std::collections::HashMap;
#[cfg(feature = "tools")]
use std::collections::HashSet;
#[cfg(feature = "tools")]
use eig_aoe::aoe::AoeModel;
#[cfg(feature = "tools")]
use eig_aoe::check_loop_in_computing_points;
use eig_domain::{get_csv_str, PbAlarmDefine, PbAlarmDefine_AlarmLevel, PbAlarmDefines};
#[cfg(feature = "tools")]
use eig_domain::{Measurement, MINIMUM_AOE_ID, MINIMUM_POINT_ID, Transport};
pub use model::*;
pub mod model;
pub const HEADER_TOKEN: &str = "access-token";
// ============= 对eig包中的对象进行apidoc注释-开始
/**
* @api {枚举_文件操作类型} /PbFile_FileOperation PbFile_FileOperation
* @apiPrivate
* @apiGroup A_Enum
* @apiSuccess {String} UPDATE 更新
* @apiSuccess {String} DELETE 删除
*/
/**
* @api {枚举_告警等级} /PbAlarmDefine_AlarmLevel PbAlarmDefine_AlarmLevel
* @apiPrivate
* @apiGroup A_Enum
* @apiSuccess {String} Emergency 紧急
* @apiSuccess {String} Important 严重
* @apiSuccess {String} Common 普通
*/
/**
* @api {枚举_告警类型} /PbEigAlarm_AlarmType PbEigAlarm_AlarmType
* @apiPrivate
* @apiGroup A_Enum
* @apiSuccess {String} invalidPoints 无效测点
* @apiSuccess {String} invalidTransport 无效通道
* @apiSuccess {String} invalidAOE 无效AOE
* @apiSuccess {String} alarmLevel1 告警等级1
* @apiSuccess {String} alarmLevel2 告警等级2
* @apiSuccess {String} badData 坏数据
* @apiSuccess {String} userDefine 用户自定义
*/
/**
* @api {枚举_告警状态} /PbEigAlarm_AlarmStatus PbEigAlarm_AlarmStatus
* @apiPrivate
* @apiGroup A_Enum
* @apiSuccess {String} occur occur
* @apiSuccess {String} disappear disappear
*/
/**
* @api {枚举_设点状态} /PbSetPointResult_SetPointStatus PbSetPointResult_SetPointStatus
* @apiPrivate
* @apiGroup A_Enum
* @apiSuccess {String} YkCreated YkCreated
* @apiSuccess {String} YtCreated YtCreated
* @apiSuccess {String} YkSuccess YkSuccess
* @apiSuccess {String} YtSuccess YtSuccess
* @apiSuccess {String} YkFailTimeout YkFailTimeout
* @apiSuccess {String} YtFailTimeout YtFailTimeout
* @apiSuccess {String} YkFailTooBusy YkFailTooBusy
* @apiSuccess {String} YtFailTooBusy YtFailTooBusy
* @apiSuccess {String} YkFailProtocol YkFailProtocol
* @apiSuccess {String} YtFailProtocol YtFailProtocol
*/
/**
* @api {枚举_事件结果} /PbEventResult_EventEvalResult PbEventResult_EventEvalResult
* @apiPrivate
* @apiGroup A_Enum
* @apiSuccess {String} Happen 已发生
* @apiSuccess {String} NotHappen 未发生
* @apiSuccess {String} Canceled 取消
* @apiSuccess {String} Error 错误
*/
/**
* @api {枚举_动作结果} /PbActionResult_ActionExeResult PbActionResult_ActionExeResult
* @apiPrivate
* @apiGroup A_Enum
* @apiSuccess {String} NotRun 未执行
* @apiSuccess {String} Success 执行成功
* @apiSuccess {String} Failed 执行失败
*/
/**
* @api {枚举_HTTP方法} /PbRequest_RequestType PbRequest_RequestType
* @apiPrivate
* @apiGroup A_Enum
* @apiSuccess {String} Get Get
* @apiSuccess {String} Post Post
* @apiSuccess {String} Put Put
* @apiSuccess {String} Delete Delete
* @apiSuccess {String} Test Test
*/
/**
* @api {PbRequest} /PbRequest PbRequest
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u64} [id] 请求id
* @apiSuccess {String} url 请求url
* @apiSuccess {PbRequest_RequestType} [function] 请求方法
* @apiSuccess {String} content 请求体,base64
*/
/**
* @api {PbResponse} /PbResponse PbResponse
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u64} [request_id] 请求id
* @apiSuccess {bool} [is_ok] 是否成功
* @apiSuccess {String} content 返回内容
*/
/**
* @api {PbFile} /PbFile PbFile
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {String} fileName 文件名
* @apiSuccess {u8[]} fileContent 文件内容
* @apiSuccess {PbFile_FileOperation} [op] 操作类型
* @apiSuccess {bool} [is_zip] 是否是压缩文件
*/
/**
* @api {PbPointValues} /PbPointValues PbPointValues
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {PbDiscreteValue[]} dValues dValues
* @apiSuccess {PbAnalogValue[]} aValues aValues
*/
/**
* @api {PbDiscreteValue} /PbDiscreteValue PbDiscreteValue
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u64} [pointId] 测点id
* @apiSuccess {i64} [measValue] 量测值
* @apiSuccess {u64} [timestamp] 时间戳
* @apiSuccess {i64} [origValue] 原始值
*/
/**
* @api {PbAnalogValue} /PbAnalogValue PbAnalogValue
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u64} [pointId] 测点id
* @apiSuccess {f64} [measValue] 量测值
* @apiSuccess {u64} [timestamp] 时间戳
* @apiSuccess {f64} [origValue] 原始值
*/
/**
* @api {PbEigPingRes} /PbEigPingRes PbEigPingRes
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {String} id id
* @apiSuccess {String} name 名称
* @apiSuccess {String} ip ip
* @apiSuccess {String} desc 描述
* @apiSuccess {bool} [is_ems] is_ems
*/
/**
* @api {告警定义集} /PbAlarmDefines PbAlarmDefines
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {PbAlarmDefine[]} defines 告警定义列表
*/
/**
* @api {告警定义} /PbAlarmDefine PbAlarmDefine
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u32} [id] 告警定义id
* @apiSuccess {String} rule 告警规则
* @apiSuccess {PbAlarmDefine_AlarmLevel} [level] 告警等级
* @apiSuccess {String} name 名称
* @apiSuccess {String} desc 描述
* @apiSuccess {String} owner owner
*/
/**
* @api {告警结果集} /PbEigAlarms PbEigAlarms
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {PbEigAlarm[]} alarms 告警列表
*/
/**
* @api {告警结果} /PbEigAlarm PbEigAlarm
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u64} [timestamp] 时间戳
* @apiSuccess {u64} [id] 告警id
* @apiSuccess {PbEigAlarm_AlarmType} [alarm_type] 告警类型
* @apiSuccess {PbEigAlarm_AlarmStatus} [status] 告警状态
* @apiSuccess {u32} [define_id] 告警定义id
* @apiSuccess {String} content 告警内容
*/
/**
* @api {设点结果集} /PbSetPointResults PbSetPointResults
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {PbSetPointResult[]} results 设点结果列表
*/
/**
* @api {设点结果} /PbSetPointResult PbSetPointResult
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u64} [sender_id] sender_id
* @apiSuccess {u64} [point_id] 测点id
* @apiSuccess {u64} [create_time] 创建时间
* @apiSuccess {u64} [finish_time] 完成时间
* @apiSuccess {u64} [command] command
* @apiSuccess {PbSetPointResult_SetPointStatus} [status] 状态
*/
/**
* @api {AOE执行结果集} /PbAoeResults PbAoeResults
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {PbAoeResult[]} results AOE执行结果列表
*/
/**
* @api {AOE执行结果} /PbAoeResult PbAoeResult
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u64} [aoe_id] AOE_id
* @apiSuccess {u64} [start_time] 开始时间
* @apiSuccess {u64} [end_time] 结束时间
* @apiSuccess {PbEventResult[]} event_results 事件结果列表
* @apiSuccess {PbActionResult[]} action_results 动作结果列表
*/
/**
* @api {事件结果} /PbEventResult PbEventResult
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u64} [id] id
* @apiSuccess {u64} [start_time] 开始时间
* @apiSuccess {u64} [end_time] 结束时间
* @apiSuccess {PbEventResult_EventEvalResult} [final_result] 事件结果
*/
/**
* @api {动作结果} /PbActionResult PbActionResult
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u64} [source_id] 源节点id
* @apiSuccess {u64} [target_id] 目标节点id
* @apiSuccess {u64} [start_time] 开始时间
* @apiSuccess {u64} [end_time] 结束时间
* @apiSuccess {PbActionResult_ActionExeResult} [final_result] 动作结果
* @apiSuccess {u32} [fail_code] 失败code
* @apiSuccess {u64[]} yk_points yk_points
* @apiSuccess {i64[]} yk_values yk_values
* @apiSuccess {u64[]} yt_points yt_points
* @apiSuccess {f64[]} yt_values yt_values
* @apiSuccess {String[]} variables variables
* @apiSuccess {f64[]} var_values var_values
*/
// ============= 对eig包中的对象进行apidoc注释-结束
pub const DB_NAME: &str = "eig_model";
// 历史库维持在1G
pub const DATE_FORMAT: &str = "%Y-%m-%d %H:%M:%S";
pub const DB_NAME_FORMAT: &str = "%Y/%m-%d";
pub const OPERATION_RECEIVE_BUFF_NUM: usize = 100;
// max items for one query, only for history data
pub const ONE_QUERY_LIMIT: usize = 10000;
pub fn export_alarm_def_csv2(defines: &PbAlarmDefines, text_map: &HashMap<String, String>) -> String {
let vec: Vec<&PbAlarmDefine> = defines.defines.iter().collect();
export_alarm_def_csv(vec.as_slice(), text_map)
}
pub fn export_alarm_def_csv(defines: &[&PbAlarmDefine], text_map: &HashMap<String, String>) -> String {
// 表格抬头
let mut result = format!(
"{},{},{},{},{},{},{}\n",
text_map.get("index").unwrap_or(&"Index".to_string()),
text_map.get("alarm_define_id").unwrap_or(&"ID".to_string()),
text_map.get("name").unwrap_or(&"Name".to_string()),
text_map.get("rule").unwrap_or(&"Rule".to_string()),
text_map.get("level").unwrap_or(&"Level".to_string()),
text_map.get("desc").unwrap_or(&"Description".to_string()),
text_map.get("owner").unwrap_or(&"Owner".to_string()),
);
//生成表格内容
for i in 0..defines.len() {
let d = &defines[i];
let level = match d.level() {
PbAlarmDefine_AlarmLevel::Emergency => "Emergency",
PbAlarmDefine_AlarmLevel::Important => "Important",
PbAlarmDefine_AlarmLevel::Common => "Common",
};
result += &format!(
"{},{},{},\"{}\",{},{},{}\n",
i + 1,
d.id(),
get_csv_str(d.name()),
get_csv_str(d.rule()),
level,
get_csv_str(d.desc()),
get_csv_str(d.owners())
);
}
result
}
#[cfg(feature = "tools")]
fn check_points(points: &[Measurement]) -> bool {
let mut map: HashMap<u64, Measurement> = HashMap::with_capacity(points.len());
// 检查ID,若有重复则提示用户确认
for p in points {
if let std::collections::hash_map::Entry::Vacant(e) = map.entry(p.point_id) {
e.insert(p.clone());
} else {
log::warn!("!!Check not pass: Same point id found: {}", p.point_id);
return false;
}
if p.point_id < MINIMUM_POINT_ID {
log::warn!("!!Check not pass: Point id is too small: {}", p.point_id);
return false;
}
}
// check alias name
let mut alias_to_id: HashMap<String, u64> = HashMap::with_capacity(map.len());
for m in map.values() {
if !m.alias_id.is_empty() {
let key = m.alias_id.clone();
if let Some(item) = alias_to_id.get(&key) {
log::warn!("!!Check not pass: Point {} and Point {} have same alias {}",
item, m.point_id, key);
return false;
}
alias_to_id.insert(key, m.point_id);
}
// check lower limit and upper limit
if m.lower_limit > m.upper_limit {
log::warn!("!!Check not pass: The lower limit cannot exceed the upper limit");
return false;
}
// check computing point's expression is not blank
if m.is_computing_point && m.expression.is_empty() {
log::warn!("!!Check not pass: Computing pint expression is null, id {}", m.point_id);
return false;
}
}
// check whether there is a loop in computing points
if let Some(point_id) = check_loop_in_computing_points(&map, &alias_to_id) {
log::warn!("!!There is loop in computing points, id {}", point_id);
return false;
}
true
}
#[cfg(feature = "tools")]
fn check_transports(tps: &[Transport], all_points: &HashMap<u64, Measurement>,
all_serials: &Vec<String>) -> bool {
use eig_domain::utils::check_transport;
let mut occupied_paths = HashSet::with_capacity(tps.len());
let mut remote_points = HashSet::with_capacity(tps.len());
for tp in tps {
if let Err(alarm) = check_transport(tp, all_points, &mut occupied_paths,
&mut remote_points, all_serials) {
log::warn!("!!Check not pass: {}", alarm.content());
return false;
}
}
true
}
#[cfg(feature = "tools")]
fn check_aoes(aoes: &[AoeModel], tp_ids: &HashSet<u64>) -> bool {
use eig_aoe::aoe::model::check_trigger_type;
let mut ids: HashSet<u64> = HashSet::with_capacity(aoes.len());
for aoe in aoes {
// check id
if !ids.contains(&aoe.id) && !tp_ids.contains(&aoe.id) {
ids.insert(aoe.id);
} else {
log::warn!("!!Check not pass: ID {} already exists", aoe.id);
return false;
}
if aoe.id != 0 && aoe.id < MINIMUM_AOE_ID {
log::warn!("!!Check not pass: AOE ID should be greater than {}", MINIMUM_AOE_ID);
return false;
}
// check trigger type
if !check_trigger_type(&aoe.trigger_type).is_empty() {
return false;
}
// check for duplicate variable definitions
for i in 0..aoe.variables.len() {
let var_now = aoe.variables[i].0.clone();
for j in (i + 1)..aoe.variables.len() {
if var_now.eq(&aoe.variables[j].0) {
log::warn!("Check not pass: AOE variable {} has duplicate definition", var_now);
return false;
}
}
}
}
true
}
\ No newline at end of file
use std::collections::{BTreeMap, HashMap, HashSet};
use async_channel::{Receiver, Sender};
use serde::{Deserialize, Serialize};
use eig_aoe::aoe::AoeModel;
use eig_domain::*;
use eig_domain::web::EigConfig;
use eig_domain::proto::aoe::{PbAoeResult, PbAoeResults};
// request and response topic
pub const TOPIC_REGISTER: &str = "register";
......@@ -125,17 +120,6 @@ pub struct PointsQuery {
pub name: Option<String>,
}
/**
* @apiDefine TransportQuery
* @apiQuery {String} [id] 通道id,","间隔
* @apiQuery {TransportType} [transport_type] 通道类型,可选字符串为:ModbusTcpClient、ModbusTcpServer、ModbusRtuClient、ModbusRtuServer、DLT645Client、Mqtt、Iec104Client、Iec104Server、HYMqtt、Unknown
*/
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct TransportQuery {
pub id: Option<String>,
pub transport_type: Option<TransportType>,
}
// todo: api doc needed
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct LogQuery {
......@@ -155,29 +139,6 @@ impl LogQuery {
}
/**
* @api {枚举_实时消息} /EigRtMessage EigRtMessage
* @apiPrivate
* @apiGroup A_Enum
* @apiSuccess {Object} P2pMeasure {"P2pMeasure": tuple(String, String, PbPointValues)}
* @apiSuccess {Object} PlccMeasure {"PlccMeasure": tuple(String, PbPointValues)}
* @apiSuccess {Object} PlccAlarm {"PlccAlarm": tuple(String, PbEigAlarms)}
* @apiSuccess {Object} PlccAoe {"PlccAoe": tuple(String, PbAoeResult)}
* @apiSuccess {Object} PlccCommand {"PlccCommand": tuple(String, PbSetPointResults)}
* @apiSuccess {Object} PlccLog {"PlccLog": tuple(String, u8[])}
* @apiSuccess {String} Test Test
*/
#[derive(Serialize, Deserialize, Clone, Debug)]
pub enum EigRtMessage {
P2pMeasure(String, String, PbPointValues),
PlccMeasure(String, PbPointValues),
PlccAlarm(String, PbEigAlarms),
PlccAoe(String, PbAoeResult),
PlccCommand(String, PbSetPointResults),
PlccLog(String, Vec<u8>),
Test,
}
/**
* @api {EigRtRegister} /EigRtRegister EigRtRegister
* @apiPrivate
* @apiGroup A_Object
......@@ -207,24 +168,6 @@ pub struct EigRtRegister {
pub log: Vec<bool>,
}
#[derive(Clone, Debug)]
pub struct RtMsgRegisterData {
// key is bee id (lcc id) in following four maps
pub measure_registers: HashMap<String, Vec<(u32, HashSet<u64>)>>,
pub alarm_registers: HashSet<String>,
pub aoe_registers: HashSet<String>,
pub cmd_registers: HashSet<String>,
pub log_registers: HashSet<String>,
}
pub enum RtMsgRegisterMsg {
// set init
SetInits(u32, HashMap<u64, MeasureValue>),
SetRender(String, Sender<EigRtMessage>),
SetRegister(EigRtRegister),
// 取消
RemoveRegister(String),
}
/**
* @api {UserPub} /UserPub UserPub
......@@ -432,28 +375,6 @@ pub struct PointControl {
}
/**
* @api {PointControl2} /PointControl2 PointControl2
* @apiGroup A_Object
* @apiSuccess {SetIntValue2[]} discretes discretes
* @apiSuccess {SetFloatValue2[]} analogs analogs
*/
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct PointControl2 {
pub discretes: Vec<SetIntValue2>,
pub analogs: Vec<SetFloatValue2>,
}
/**
* @api {PointControl3} /PointControl3 PointControl3
* @apiGroup A_Object
* @apiSuccess {SetPointValue[]} commands commands
*/
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct PointControl3 {
pub commands: Vec<SetPointValue>,
}
/**
* @api {AoeAction} /AoeAction AoeAction
* @apiGroup A_Enum
* @apiSuccess {Object} StartAoe 开始AOE,{"StartAoe": u64}
......@@ -481,26 +402,6 @@ pub struct AoeControl {
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub enum ModelRequest {
QueryConfig,
// 查询模型操作
QueryPoints(PointsQuery),
QueryTransports(TransportQuery),
QueryAoes(AoeQuery),
// 保存模型操作
SavePoints(Vec<Measurement>),
SavePoint(Measurement),
SaveTransports(Vec<Transport>),
SaveAoes(Vec<AoeModel>),
SaveAoe(AoeModel),
SaveConfig(EigConfig),
// 删除操作
DeletePoints(Vec<u64>),
DeleteTransports(Vec<u64>),
DeleteAoes(Vec<u64>),
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub enum HisRequest {
// 查询历史数据操作
QueryMeasures(HisQuery),
......@@ -527,31 +428,6 @@ pub enum CommonRequest {
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub enum AlarmRequest {
// 查询所有的告警条数
QueryAlarmCount,
// 查询所有的告警规则
QueryAlarmDefines,
// 查询指定id的告警规则
QueryAlarmDefine(u32),
// 新建并保存告警规则
SaveAlarmDefine(PbAlarmDefine),
SaveAlarmDefines(PbAlarmDefines),
// 删除指定id的告警规则
DeleteAlarmDefines(Vec<u32>),
// 查询告警通知设置
QueryAlarmConfig,
// 配置告警通知
SetAlarmConfig(AlarmConfig),
// 确认告警(用户id, 被确认告警的id)
ConfirmAlarms(u16, Vec<u64>),
// 查询已确认的告警
QueryConfirmStatus,
// 查询未确认的告警数
QueryUnconfirmedNumber,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub enum ControlRequest {
Point(PointControl),
Aoe(AoeControl),
......@@ -567,59 +443,6 @@ pub enum StatusRequest {
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub enum PlccRequest {
Model(ModelRequest),
History(HisRequest),
Auth(AuthRequest),
Alarm(AlarmRequest),
Control(ControlRequest),
Status(StatusRequest),
Common(CommonRequest),
// other
Log(LogQuery),
ImportAllModels(PbFiles),
ExportAllModels(String),
Test,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub enum PlccResponse {
// model
EigConfig(EigConfig),
Points(Vec<Measurement>),
Transports(Vec<Transport>),
AoeModels(Vec<AoeModel>),
// history data
Measures(PbPointValues),
Alarms(PbEigAlarms),
UnConfirmedAlarms(PbEigAlarms),
AoeResults(PbAoeResults),
SetPointResults(PbSetPointResults),
// auth
User(Option<UserPub>),
Users(Vec<UserPub>),
// alarm
AlarmCount(u64),
AlarmDefine(Option<PbAlarmDefine>),
AlarmDefines(PbAlarmDefines),
AlarmConfig(Option<AlarmConfig>),
AlarmConfirmStatus(BTreeMap<u64, (u64, Option<u16>)>),
AlarmUnConfirmedCount(u64),
// status
RunningAoes(Vec<u64>),
UnrunAoes(Vec<u64>),
// other
KvResponse(Vec<u8>),
Tags(Vec<Vec<u64>>),
TagDefs(Vec<(String, u16)>),
TagDefIds(Vec<u16>),
LogFiles(PbFile),
ALlModels(PbFiles),
Error(String),
Done,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct EmsBroadcast {
pub ip: String,
pub rt_listen_port: u16,
......@@ -665,101 +488,6 @@ pub struct LccDevice {
pub is_ems: bool,
}
#[derive(Debug, Clone)]
pub enum LccOperation {
// common
QueryKv(String, Vec<u8>, Sender<Vec<u8>>),
UpdateKv(String, Vec<u8>, Vec<u8>),
UpdateKv2(String, Vec<u8>, Vec<u8>, Vec<u8>),
DeleteKv(String, Vec<u8>),
QueryIdsWithTag(String, u8, Vec<u16>, Sender<Vec<Vec<u64>>>),
QueryTagDefs(String, u8, Sender<Vec<(String, u16)>>),
UpdateTags(String, u8, Vec<(String, Vec<u64>)>, Sender<Vec<u16>>),
DeleteTags(String, u8, Vec<(u16, u64)>),
// 设备的增、删和查
QueryLccList(Sender<Vec<LccDevice>>),
QueryLcc(String, Sender<Option<LccDevice>>),
// models
ExportAllModels(String, String, Sender<PbFiles>),
ImportAllModels(String, PbFiles),
QueryConfig(String, Sender<Option<EigConfig>>),
PutConfig(String, EigConfig),
QueryAoes(String, AoeQuery, Sender<Vec<AoeModel>>),
PostAoes(String, Vec<AoeModel>),
DeleteAoes(String, Vec<u64>),
QueryPoints(String, PointsQuery,Sender<Vec<Measurement>>),
PostPoints(String, Vec<Measurement>),
DeletePoints(String, Vec<u64>),
QueryTransports(String, TransportQuery, Sender<Vec<Transport>>),
PostTransports(String, Vec<Transport>),
DeleteTransports(String, Vec<u64>),
// status
QueryRunningAoes(String, Sender<Vec<u64>>),
QueryUnRunAoes(String, Sender<Vec<u64>>),
// control
ControlQuitForce(String),
ControlPoint(String, PointControl),
ControlAoe(String, AoeControl),
ControlReset(String),
ControlRecover(String),
// history date query
QueryMeasures(String, HisQuery, Sender<PbPointValues>),
QueryAoeResults(String, HisQuery, Sender<PbAoeResults>),
QueryAlarms(String, HisQuery, Sender<PbEigAlarms>),
QuerySoes(String, HisQuery, Sender<PbPointValues>),
QuerySetPointResults(String, HisSetPointQuery, Sender<PbSetPointResults>),
QueryLogFiles(String, LogQuery, Sender<PbFile>),
// alarm related
QueryAlarmCount(String, Sender<u64>),
// 查询所有的告警规则
QueryAlarmDefines(String, Sender<PbAlarmDefines>),
// 查询指定id的告警规则
QueryAlarmDefine(String, u32, Sender<Option<PbAlarmDefine>>),
// 新建并保存告警规则
SaveAlarmDefine(String, PbAlarmDefine),
SaveAlarmDefines(String, PbAlarmDefines),
// 删除指定id的告警规则
DeleteAlarmDefines(String, String),
// 查询告警通知设置
QueryAlarmConfig(String, Sender<Option<AlarmConfig>>),
// 配置告警通知
SetAlarmConfig(String, AlarmConfig),
// 确认告警(lcc id, 用户id, 被确认告警的id)
ConfirmAlarms(String, u16, Vec<u64>),
// 查询已确认的告警(alarm id, 用户id(若用户id为空则表示未确认))
QueryConfirmStatus(String, Sender<BTreeMap<u64, (u64, Option<u16>)>>),
// 查询未确认的告警数
QueryUnconfirmedCount(String, Sender<u64>),
QueryUnconfirmedAlarms(String, Sender<PbEigAlarms>),
// 查询用户
QueryUsers(String, Sender<Vec<UserPub>>),
// 加载LCC的测点到base服务
ApplyPoints(String, Vec<Measurement>),
// message from socket
Register(PbEigPingRes, Sender<PlccRequest>, Receiver<PlccResponse>),
Closed(String),
RegisterRt(PbEigPingRes, Sender<EigRtRegister>),
SetRtMsgRegister(EigRtRegister),
RemoveRtMsgRegister(String),
SetRtMsgSender(String, Sender<EigRtMessage>),
NewRtMsg(EigRtMessage),
ClosedRt(String),
// 北向接口的api
// 通过北向接口设备的增、删和查
QueryNorthList(Sender<Vec<LccDevice>>),
QueryNorthDev(String, Sender<Option<LccDevice>>),
// 请求
NorthRequest(String, PbRequest, Sender<PbResponse>),
NorthResponse(PbResponse),
// 注册请求响应
NorthRegister(PbEigPingRes, Sender<PbRequest>, Receiver<PbResponse>),
// from udp ping
Ping(PbEigPingRes),
// 退出
Quit,
}
/**
* @api {CommitNote} /CommitNote CommitNote
......@@ -786,23 +514,3 @@ pub struct VersionData<T> {
// 对应的数据
pub data: T,
}
/// 测点用于记录历史版本的数据集合
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct PointVersionData {
pub point_models: Vec<Measurement>,
pub beeid_to_points: Vec<(String, Vec<u64>)>,
}
/// 报表用于记录历史版本的数据集合
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct GraphVersionData {
pub graph_models: Vec<PbFile>,
}
// 版本查询参数
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct VersionQuery {
//版本号,可选,若为空则默认0号版本
pub version: Option<u32>,
}
use std::collections::{BTreeMap, HashMap};
use std::convert::TryInto;
use std::net::SocketAddr;
#[cfg(target_family = "unix")]
use std::path::PathBuf;
use csv::StringRecord;
use serde::{Deserialize, Serialize};
use crate::excel::{excel_bytes_to_csv_bytes, transfer_to_utf8};
use crate::{create_parity, csv_str, csv_string, csv_u32, csv_u64, csv_u8, csv_usize, get_csv_str, SerialPara, SerialParity, UNKNOWN_POINT_ID};
const DEFAULT_POLLING_PERIOD_IN_MILLI: u64 = 5000;
const DEFAULT_TIMEOUT_IN_MILLI: u64 = 3000;
// 默认是20ms
pub const DEFAULT_DELAY_BETWEEN_REQUESTS: u64 = 20;
/**
* @api {枚举_Dlt645参数} /Dlt645Para Dlt645Para
* @apiPrivate
* @apiGroup A_Enum
* @apiSuccess {Object} Serial {"Serial": SerialPara}
* @apiSuccess {Object} Socket {"Socket": tuple(String, u16)}
*/
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub enum Dlt645Para {
Serial(SerialPara),
Socket(String, u16),
}
/**
* @api {Dlt645ClientTp} /Dlt645ClientTp Dlt645ClientTp
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u64} id 通道id
* @apiSuccess {String} name 通道名称
* @apiSuccess {Dlt645Para} para 参数
* @apiSuccess {Dlt645Connection[]} connections connections
*/
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct Dlt645ClientTp {
pub id: u64,
pub name: String,
pub para: Dlt645Para,
pub connections: Vec<Dlt645Connection>,
}
/**
* @api {Dlt645连接信息} /Dlt645Connection Dlt645Connection
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u8} slave_id slave_id
* @apiSuccess {String} name 连接名称
* @apiSuccess {u64} timeout_in_milli 超时时间_毫秒
* @apiSuccess {u64} point_id 通道状态对应的测点号
* @apiSuccess {u64} default_polling_period_in_milli 默认的轮询周期
* @apiSuccess {Dlt645RegisterData[]} data_configure register settings
* @apiSuccess {Map} point_id_to_rd HashMap<point_id:u64, position_of_register_data:u16>
* @apiSuccess {Map} data_id_to_rd HashMap<寄存器地址:u16, setting中Dlt645RegisterData[]的位置:u16>
* @apiSuccess {Map} polling_period_to_data 轮询周期不同的数据,有序Map<轮询周期_毫秒数:u64, position:u16[]>
*/
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct Dlt645Connection {
pub slave_id: u64,
// 连接名称
pub name: String,
// 超时设置
pub timeout_in_milli: u64,
// 通道状态对应的测点号
pub point_id: u64,
// 默认的轮询周期
pub default_polling_period_in_milli: u64,
// register settings
pub data_configure: Vec<RegisterData>,
// key is point id, value is position of register data
pub point_id_to_rd: HashMap<u64, u16>,
// key:寄存器地址,value:setting中vec<RegisterData>的位置
pub data_id_to_rd: HashMap<u32, u16>,
// 轮询周期不同的数据, key is period in milli, value is position.
pub polling_period_to_data: BTreeMap<u64, Vec<u16>>,
}
impl Default for Dlt645Connection {
fn default() -> Self {
Dlt645Connection {
slave_id: 1,
name: "new".to_string(),
timeout_in_milli: DEFAULT_TIMEOUT_IN_MILLI,
point_id: 0,
default_polling_period_in_milli: DEFAULT_POLLING_PERIOD_IN_MILLI,
data_configure: Vec::new(),
point_id_to_rd: HashMap::new(),
data_id_to_rd: HashMap::new(),
polling_period_to_data: BTreeMap::new(),
}
}
}
/**
* @api {Dlt645注册信息} /Dlt645RegisterData Dlt645RegisterData
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u32} data_id 数据标识
* @apiSuccess {u64} polling_period_in_milli 轮询周期,毫秒
* @apiSuccess {u64[]} point_ids 对应的测点Id
*/
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct RegisterData {
// 数据标识
pub data_id: u32,
// 轮询周期
pub polling_period_in_milli: u64,
// 对应的测点Id
pub point_ids: Vec<u64>,
}
impl Dlt645Connection {
fn from_csv_records(
content: &[u8],
offset: usize,
) -> Result<Self, (usize, usize)> {
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.from_reader(content);
let mut records = rdr.records();
// 1th line
let rc = (0usize, 1 + offset);
let name = csv_string(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
// 2th line
let rc = (1usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let point_num = csv_usize(&record, rc.1).ok_or(rc)?;
if point_num as u16 > u16::MAX {
return Err(rc)
}
// 3th line
let rc = (2usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let slave_id = csv_u64(&record, rc.1).ok_or(rc)?;
// 4th line
let rc = (3usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let s = csv_str(&record, rc.1).ok_or(rc)?;
let default_polling_period_in_milli: u64 = if s.is_empty() {
DEFAULT_POLLING_PERIOD_IN_MILLI
} else {
s.parse().map_err(|_| rc)?
};
// 5th line
let rc = (4usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let s = csv_str(&record, rc.1).ok_or(rc)?;
let timeout_in_milli: u64 = if s.is_empty() {
DEFAULT_TIMEOUT_IN_MILLI
} else {
s.parse().map_err(|_| rc)?
};
// 6th line
let rc = (5usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let s = csv_str(&record, rc.1).ok_or(rc)?;
let point_id: u64 = if s.is_empty() {
0
} else {
s.parse().map_err(|_| rc)?
};
// 9th line ...
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.from_reader(content);
let mut records = rdr.records();
let rc = (0, 3 + offset);
records.next().ok_or(rc)?.map_err(|_| rc)?;
let mut data_configure: Vec<RegisterData> = Vec::with_capacity(point_num);
for row in 1..=point_num {
let rc = (row + 8, 3 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
data_configure.push(RegisterData::parse_register_data(&record, rc.0, rc.1)?);
}
let mut conn = Dlt645Connection {
slave_id,
name,
timeout_in_milli,
point_id,
default_polling_period_in_milli,
data_configure,
..Default::default()
};
conn.create_data_config().map_err(|(r, c, _)|(r, c + offset))?;
Ok(conn)
}
/// 得到某一个采样周期的data id集合
pub fn create_request(&self, period: u64) -> Vec<u32> {
return if let Some(positions) = self.polling_period_to_data.get(&period) {
let mut result = Vec::with_capacity(positions.len());
for pos in positions {
result.push(self.data_configure[*pos as usize].data_id);
}
result
} else {
// 没有找到对应的寄存器列表,返回空
vec![]
};
}
pub fn create_data_config(&mut self) -> Result<(),(usize, usize, String)> {
let size = self.data_configure.len();
let mut point_id_to_rd: HashMap<u64, u16> = HashMap::with_capacity(size);
// key:寄存器地址,value:setting中vec<RegisterData>的位置
let mut data_id_to_rd: HashMap<u32, u16> = HashMap::with_capacity(size);
// 轮询周期不同的数据, key is period in milli, value is position.
let mut polling_period_to_data: BTreeMap<u64, Vec<u16>> = BTreeMap::new();
polling_period_to_data.insert(self.default_polling_period_in_milli, Vec::with_capacity(size));
// 开始统计不同轮询周期的数据
let mut tmp: HashMap<u64, u32> = HashMap::with_capacity(10);
for rd in &self.data_configure {
if let Some(ori) = tmp.get_mut(&rd.polling_period_in_milli) {
*ori += 1;
} else {
tmp.insert(rd.polling_period_in_milli, 1);
}
}
for (i, num) in tmp {
let mut a_v: Vec<u16> = Vec::with_capacity(num as usize);
for (index, rd) in self.data_configure.iter().enumerate() {
if rd.polling_period_in_milli == i {
a_v.push(index.try_into().unwrap());
}
}
polling_period_to_data.insert(i, a_v);
}
for (index, rd) in self.data_configure.iter().enumerate() {
// 测点号重复
for point_id in &rd.point_ids {
if point_id_to_rd.contains_key(point_id) {
let tip = format!("Invalid register point (id :{}):\nThe point ID is already existed", point_id);
return Err((index + 1, 8, tip)); // 测点号的位置
}
}
for point_id in &rd.point_ids {
point_id_to_rd.insert(*point_id, index.try_into().unwrap());
}
// 起始地址重复
if data_id_to_rd.contains_key(&rd.data_id) {
let tip = format!("Invalid register point (id :{}):\nThe register address is already existed", rd.data_id);
return Err((index + 1, 4, tip)); // 地址的位置
}
data_id_to_rd.insert(rd.data_id, index.try_into().unwrap());
}
self.data_id_to_rd = data_id_to_rd;
self.point_id_to_rd = point_id_to_rd;
self.polling_period_to_data = polling_period_to_data;
Ok(())
}
}
impl Dlt645ClientTp {
pub fn from_file(path: &str) -> Result<Self, (usize, usize)> {
let content = std::fs::read(path).map_err(|_| (0, 0))?;
// let content = if env::IS_ENCRYPT {
// decrypt_vec(content.as_slice())
// } else {
// content
// };
let csv_bytes = if path.ends_with(".xlsx") || path.ends_with(".xls") {
let r = excel_bytes_to_csv_bytes(content.as_slice()).unwrap_or_default();
if r.is_empty() {
return Err((0, 0));
}
r[0].clone()
} else {
content
};
Self::from_csv_bytes(csv_bytes.as_slice())
}
pub fn from_csv(path: &str) -> Result<Dlt645ClientTp, (usize, usize)> {
let content = std::fs::read(path).map_err(|_| (0, 0))?;
// let content = if env::IS_ENCRYPT {
// let plain_t = decrypt_vec(content.as_slice());
// plain_t
// } else {
// content
// };
Dlt645ClientTp::from_csv_bytes(content.as_slice())
}
pub fn from_csv_bytes(content: &[u8]) -> Result<Dlt645ClientTp, (usize, usize)> {
let content_new = transfer_to_utf8(content.to_vec()).map_err(|_| (0, 0))?;
let content = content_new.as_slice();
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.from_reader(content);
let mut records = rdr.records();
let rc = (0usize, 1);
let name = csv_string(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let rc = (1usize, 1);
let conn_num: usize =
csv_usize(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let rc = (2usize, 1);
let baud_rate = csv_u32(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let rc = (3usize, 1);
let file_path = csv_string(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
// 如果能解析成socket
let para = if let Ok(addres) = file_path.parse::<SocketAddr>() {
Dlt645Para::Socket(addres.ip().to_string(), addres.port())
} else {
#[cfg(target_family = "unix")]
let file_path = if PathBuf::from(file_path.clone()).is_relative() {
"/dev/".to_string() + file_path.as_str()
} else {
file_path
};
// 下面三行是可选的
// 第5行
let record = records.next();
let data_bits = if let Some(Ok(tmp)) = record {
if let Some(v) = csv_u8(&tmp, 1) {
v
} else {
8 // 默认是8
}
} else {
8
};
// 第6行
let record = records.next();
let stop_bits = if let Some(Ok(tmp)) = record {
if let Some(v) = csv_u8(&tmp, 1) {
v
} else {
1 // 默认是1
}
} else {
1
};
// 第7行
let record = records.next();
let parity = if let Some(Ok(tmp)) = record {
if let Some(v) = csv_str(&tmp, 1) {
create_parity(v)
} else {
SerialParity::None
}
} else {
SerialParity::None
};
// 第8行
// 3.5个字符,每个字符 1起始+8+1校验(或0个)+1结尾 38.5个字符
let mut delay_between_requests = (38500. / (baud_rate as f64)).ceil() as u64;
if delay_between_requests == 0 {
delay_between_requests = DEFAULT_DELAY_BETWEEN_REQUESTS;
}
let record = records.next();
if let Some(Ok(tmp)) = record {
if let Some(v) = csv_u64(&tmp, 1) {
if v > 0 {
delay_between_requests = v;
}
}
}
Dlt645Para::Serial(SerialPara {
file_path,
baud_rate,
data_bits,
stop_bits,
parity,
delay_between_requests,
})
};
let mut connections: Vec<Dlt645Connection> = Vec::with_capacity(conn_num);
for i in 0..conn_num {
let connection = Dlt645Connection::from_csv_records(content, i * 7 + 3)?;
connections.push(connection);
}
Ok(Dlt645ClientTp {
id: 0,
name,
para,
connections,
})
}
pub fn export_csv(&self, text_map: &HashMap<String, String>) -> String {
let len_conn = self.connections.len();
// 第一排
let mut result = format!("{},{},,",
text_map.get("tp_name").unwrap_or(&"Transport Name".to_string()),
get_csv_str(&self.name));
for i in 0..len_conn {
result += &format!(
"{},{},{},{},{},{}",
text_map.get("conn_name").unwrap_or(&"Connection Name".to_string()),
get_csv_str(&self.connections[i].name),
text_map.get("index").unwrap_or(&"Index".to_string()),
text_map.get("dlt645_data_id").unwrap_or(&"Data ID".to_string()),
text_map.get("register_period_name").unwrap_or(&"Polling Period(ms)".to_string()),
text_map.get("status_point").unwrap_or(&"Status Point".to_string()),
);
if i != len_conn - 1 {
result += ",,";
}
}
result += "\n";
// 第二至八排
let title_conn = [text_map.get("point_number").unwrap_or(&"Point Count".to_string()).clone(),
text_map.get("slave_id").unwrap_or(&"Slave ID".to_string()).clone(),
text_map.get("register_period_name").unwrap_or(&"Polling Period(ms)".to_string()).clone(),
text_map.get("timeout_ms").unwrap_or(&"Timeout(ms)".to_string()).clone(),
text_map.get("tp_point_id").unwrap_or(&"Point ID".to_string()).clone(),
text_map.get("status_point").unwrap_or(&"Status Point".to_string()).clone(),
"".to_string(),
"".to_string()];
let title_tp = match &self.para {
Dlt645Para::Serial(para) => {
let parity = match &para.parity {
SerialParity::None => "NONE",
SerialParity::Odd => "ODD",
SerialParity::Even => "EVEN",
SerialParity::Mark => "MARK",
SerialParity::Space => "SPACE",
};
vec![
format!("{},{},",
text_map.get("conn_num").unwrap_or(&"Connection Count".to_string()),
self.connections.len()),
format!("{},{},",
text_map.get("baud_rate").unwrap_or(&"Baud Rate".to_string()),
para.baud_rate),
format!("{},{},",
text_map.get("file_path").unwrap_or(&"File Path".to_string()),
para.file_path),
format!("{},{},",
text_map.get("data_bits").unwrap_or(&"Data Bits".to_string()),
para.data_bits),
format!("{},{},",
text_map.get("stop_bits").unwrap_or(&"Stop Bits".to_string()),
para.stop_bits),
format!("{},{},",
text_map.get("parity").unwrap_or(&"Parity".to_string()),
parity),
format!("{},{},",
text_map.get("serial_para_delay_ms_tip").unwrap_or(&"Delay Between Requests (ms)".to_string()),
para.delay_between_requests),
]
}
Dlt645Para::Socket(ip, port) => {
vec![
format!("{},{},",
text_map.get("conn_num").unwrap_or(&"Connection Count".to_string()),
self.connections.len()),
format!("{},19200,", text_map.get("baud_rate").unwrap_or(&"Baud Rate".to_string())),
format!("{},{}:{},", text_map.get("file_path").unwrap_or(&"File Path".to_string()), ip, port),
",,".to_string(),
",,".to_string(),
",,".to_string(),
",,".to_string(),
]
}
};
for cnt in 0..7 {
result += &title_tp[cnt];
for i in 0..len_conn {
if self.connections[i].data_configure.len() > cnt {
let r = &self.connections[i].data_configure[cnt];
let content_conn = Self::get_dlt_conn_csv(&self.connections[i], cnt);
result += &format!(
",{},{},{},{:#010X},{},",
title_conn[cnt],
content_conn,
cnt + 1,
r.data_id,
r.polling_period_in_milli
);
for i in 0..r.point_ids.len() {
result += &format!("{}", r.point_ids[i]);
if i != r.point_ids.len() - 1 {
result += ";";
}
}
if i != len_conn - 1 {
result += ",";
}
} else {
let content_conn = Self::get_dlt_conn_csv(&self.connections[i], cnt);
result += &format!(",{},{},,,,", title_conn[cnt], content_conn);
}
}
result += "\n";
}
// 剩余的
let mut max_data_len = self.connections[0].data_configure.len();
for c in &self.connections {
if c.data_configure.len() > max_data_len {
max_data_len = c.data_configure.len();
}
}
for row in 7..max_data_len {
result += ",,";
for i in 0..len_conn {
if self.connections[i].data_configure.len() > row {
let r = &self.connections[i].data_configure[row];
result += &format!(
",,,{},{:#010X},{},",
row + 1,
r.data_id,
r.polling_period_in_milli
);
for i in 0..r.point_ids.len() {
result += &format!("{}", r.point_ids[i]);
if i != r.point_ids.len() - 1 {
result += ";";
}
}
if i != len_conn - 1 {
result += ",";
}
} else {
result += ",,,,,,,";
}
}
result += "\n";
}
result
}
fn get_dlt_conn_csv(conn: &Dlt645Connection, index: usize) -> String {
match index {
0 => conn.data_configure.len().to_string(),
1 => conn.slave_id.to_string(),
2 => conn.default_polling_period_in_milli.to_string(),
3 => conn.timeout_in_milli.to_string(),
4 => conn.point_id.to_string(),
_ => "".to_string(),
}
}
pub fn get_point_ids(&self) -> Vec<u64> {
let mut size = 0;
for conn in &self.connections {
size += conn.data_configure.len()
}
size += self.connections.len();
let mut r: Vec<u64> = Vec::with_capacity(size);
for conn in &self.connections {
for rd in &conn.data_configure {
for point_id in &rd.point_ids {
r.push(*point_id);
}
}
if conn.point_id != UNKNOWN_POINT_ID {
r.push(conn.point_id);
}
}
r
}
}
impl RegisterData {
fn parse_register_data(
record: &StringRecord,
row: usize,
first_col: usize,
) -> Result<Self, (usize, usize)> {
//let start: usize = 3 + offset;
let rc = (row, first_col);
let data_id = csv_u32(record, rc.1).ok_or(rc)?;
// 轮询周期
let rc = (row, first_col + 1);
let polling_period_in_milli = csv_u64(record, rc.1).ok_or(rc)?;
// 对应的测点Id
let rc = (row, first_col + 2);
let points = csv_str(record, rc.1).ok_or(rc)?;
let ids: Vec<&str> = points.split(';').collect();
let mut point_ids: Vec<u64> = Vec::with_capacity(ids.len());
for id in ids {
let point_id: u64 = id.parse().map_err(|_| rc)?;
point_ids.push(point_id);
}
Ok(RegisterData {
data_id,
polling_period_in_milli,
point_ids,
})
}
}
\ No newline at end of file
use std::collections::HashMap;
use std::path::Path;
use std::str::FromStr;
use serde::{Deserialize, Serialize};
use csv::StringRecord;
use crate::{csv_str, csv_string, csv_u16, csv_u64, csv_usize, get_csv_str};
use crate::excel::{excel_bytes_to_csv_bytes, transfer_to_utf8};
use crate::UNKNOWN_POINT_ID;
use crate::prop::DataType;
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct EcConnection {
pub name: String,
pub module_name: String,
pub index: usize,
pub point_id: u64,
pub data: Vec<PdiData>,
pub point_to_pos: HashMap<u64, usize>,
pub cycle_time_in_micro: u64,
pub watchdog_pdi: Option<u16>, // 1/25M*(multi_watchdog+2)*pdi_watchdog
pub watchdog_sm: Option<u16>, // 1/25M*(multi_watchdog+2)*sm_watchdog, defaukt to 1000
pub watchdog_multi: Option<u16>, // defaukt to 2498
}
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct EcMasterTp {
pub id: u64,
/// 通道名称
pub name: String,
pub eth: String,
pub connections: Vec<EcConnection>
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct PdiData {
pub is_writable: bool,
pub from: u16,
// 数据类型
pub data_type: DataType,
// 对应的测点Id
pub point_id: u64,
}
impl Default for EcConnection {
fn default() -> Self {
EcConnection {
name: "new".to_string(),
module_name: "new_module".to_string(),
index: 0,
point_id: 0,
data: Vec::new(),
point_to_pos: HashMap::new(),
cycle_time_in_micro: 0,
watchdog_pdi: None,
watchdog_sm: None,
watchdog_multi: None,
}
}
}
impl EcConnection {
pub fn read_config(&self) -> HashMap<usize, PdiData> {
let mut r = HashMap::with_capacity(self.data.len());
for pdi in &self.data {
if !pdi.is_writable {
r.insert(pdi.from as usize, pdi.clone());
}
}
r.shrink_to_fit();
r
}
pub fn write_config(&self) -> HashMap<usize, PdiData> {
let mut r = HashMap::new();
for pdi in &self.data {
if pdi.is_writable {
r.insert(pdi.from as usize, pdi.clone());
}
}
r.shrink_to_fit();
r
}
}
impl EcMasterTp {
pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Self, (usize, usize)> {
let content = std::fs::read(&path).map_err(|_| (0, 0))?;
// let content = if env::IS_ENCRYPT {
// decrypt_vec(content.as_slice())
// } else {
// content
// };
let csv_bytes = if path.as_ref().ends_with(".xlsx") || path.as_ref().ends_with(".xls") {
let r = excel_bytes_to_csv_bytes(content.as_slice()).unwrap_or_default();
if r.is_empty() {
return Err((0, 0));
}
r[0].clone()
} else {
content
};
Self::from_csv_bytes(csv_bytes.as_slice())
}
pub fn from_csv(path: &str) -> Result<Self, (usize, usize)> {
let content = std::fs::read(path).map_err(|_| (0, 0))?;
// if env::IS_ENCRYPT {
// let plain_t = decrypt_vec(content.as_slice());
// ModbusTcpClientTp::from_csv_bytes(plain_t.as_slice())
// } else {
// ModbusTcpClientTp::from_csv_bytes(content.as_slice())
// }
Self::from_csv_bytes(content.as_slice())
}
pub fn from_csv_bytes(content: &[u8]) -> Result<Self, (usize, usize)> {
let content_new = transfer_to_utf8(content.to_vec()).map_err(|_| (0, 0))?;
let content = content_new.as_slice();
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.from_reader(content);
let mut records = rdr.records();
let rc = (0usize, 1);
let name = csv_string(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let rc = (1usize, 1);
let conn_num: usize =
csv_usize(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let rc = (2usize, 1);
let eth =
csv_string(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let mut connections = Vec::with_capacity(conn_num);
for i in 0..conn_num {
let connection = EcConnection::from_csv_records(content, i * 8 + 3)?;
connections.push(connection);
}
Ok(EcMasterTp {
id: 0,
name,
eth,
connections,
})
}
pub fn get_point_ids(&self) -> Vec<u64> {
let mut size = 0;
for conn in &self.connections {
size += conn.data.len();
size += 1;
}
let mut r: Vec<u64> = Vec::with_capacity(size);
for conn in &self.connections {
for rd in &conn.data {
r.push(rd.point_id)
}
if conn.point_id != UNKNOWN_POINT_ID {
r.push(conn.point_id);
}
}
r
}
// 导出CSV文件内容
pub fn export_csv(&self, text_map: &HashMap<String, String>) -> String {
let len_conn = self.connections.len();
// 第一排
let mut result = format!("{},{},,",
text_map.get("tp_name").unwrap_or(&"Transport Name".to_string()),
get_csv_str(&self.name));
let mut i = 0;
for conn in &self.connections {
i += 1;
result += &format!("{},{},{},{},{},{},{}",
text_map.get("conn_name").unwrap_or(&"Connection Name".to_string()),
get_csv_str(&conn.name),
text_map.get("index").unwrap_or(&"Index".to_string()),
text_map.get("is_writable").unwrap_or(&"Writable".to_string()),
text_map.get("start_addr").unwrap_or(&"Start Address".to_string()),
text_map.get("data_type").unwrap_or(&"Data Type".to_string()),
text_map.get("point_id").unwrap_or(&"Point ID".to_string()),
);
if i != len_conn {
result += ",,";
} else {
break;
}
}
result += "\n";
let title_conn = vec![
text_map.get("point_number").unwrap_or(&"Point Count".to_string()).clone(),
text_map.get("module_name").unwrap_or(&"Module Name".to_string()).clone(),
text_map.get("period").unwrap_or(&"Period(ms)".to_string()).clone(),
text_map.get("index_num").unwrap_or(&"Index Num".to_string()).clone(),
text_map.get("watchdog_multi").unwrap_or(&"WD Multi".to_string()).clone(),
text_map.get("watchdog_sm").unwrap_or(&"WD SM".to_string()).clone(),
text_map.get("watchdog_pdi").unwrap_or(&"WD Pdi".to_string()).clone(),
];
let title_tp = vec![
format!("{},{}",
text_map.get("conn_num").unwrap_or(&"Connection Count".to_string()),
len_conn
),
format!("{},{}",
text_map.get("eth_name").unwrap_or(&"NIC Name".to_string()),
get_csv_str(&self.eth)
),
];
// 第二至三排
for row in 0..2 {
result += &title_tp[row];
let mut i = 0;
for conn in &self.connections {
i += 1;
let content_conn = match row {
0 => conn.data.len().to_string(),
1 => conn.module_name.clone(),
_ => "".to_string(),
};
if conn.data.len() > row {
let p = &conn.data[row];
result += &format!(
",,{},{},{},{},{},{},{}",
title_conn[row],
content_conn,
row + 1,
p.is_writable.to_string().to_uppercase(),
p.from.to_string(),
p.data_type.to_string(),
p.point_id.to_string(),
);
} else {
result += &format!(",,{},{},,,,,", title_conn[row], content_conn);
}
if i == len_conn {
break;
}
}
result += "\n";
}
// 剩余的
let mut max_data_len = if self.connections.is_empty() {
0
} else {
self.connections[0].data.len()
};
for conn in &self.connections {
if conn.data.len() > max_data_len {
max_data_len = conn.data.len();
}
}
if max_data_len < 7 {
for row in 2..max_data_len {
result += ",";
let mut i = 0;
for conn in &self.connections {
i += 1;
let content_conn = match row {
2 => conn.cycle_time_in_micro.to_string(),
3 => conn.index.to_string(),
4 => if let Some(s) = conn.watchdog_multi { s.to_string() } else { "".to_string() }
5 => if let Some(s) = conn.watchdog_sm { s.to_string() } else { "".to_string() }
6 => if let Some(s) = conn.watchdog_pdi { s.to_string() } else { "".to_string() }
_ => "".to_string(),
};
if conn.data.len() > row {
let p = &conn.data[row];
result += &format!(
",,{},{},{},{},{},{},{}",
title_conn[row],
content_conn,
row + 1,
p.is_writable.to_string().to_uppercase(),
p.from.to_string(),
p.data_type.to_string(),
p.point_id.to_string(),
);
} else {
result += &format!(",,{},{},,,,,", title_conn[row], content_conn);
}
if i == len_conn {
break;
}
}
result += "\n";
}
for row in max_data_len..7 {
result += ",";
let mut i = 0;
for conn in &self.connections {
i += 1;
let content_conn = match row {
2 => conn.cycle_time_in_micro.to_string(),
3 => conn.index.to_string(),
4 => if let Some(s) = conn.watchdog_multi { s.to_string() } else { "".to_string() }
5 => if let Some(s) = conn.watchdog_sm { s.to_string() } else { "".to_string() }
6 => if let Some(s) = conn.watchdog_pdi { s.to_string() } else { "".to_string() }
_ => "".to_string(),
};
result += &format!(",,{},{},,,,,", title_conn[row], content_conn);
if i == len_conn {
break;
}
}
result += "\n";
}
} else {
for row in 2..7 {
result += ",";
let mut i = 0;
for conn in &self.connections {
i += 1;
let content_conn = match row {
2 => conn.cycle_time_in_micro.to_string(),
3 => conn.index.to_string(),
4 => if let Some(s) = conn.watchdog_multi { s.to_string() } else { "".to_string() }
5 => if let Some(s) = conn.watchdog_sm { s.to_string() } else { "".to_string() }
6 => if let Some(s) = conn.watchdog_pdi { s.to_string() } else { "".to_string() }
_ => "".to_string(),
};
if conn.data.len() > row {
let p = &conn.data[row];
result += &format!(
",,{},{},{},{},{},{},{}",
title_conn[row],
content_conn,
row + 1,
p.is_writable.to_string().to_uppercase(),
p.from.to_string(),
p.data_type.to_string(),
p.point_id.to_string(),
);
} else {
result += &format!(",,{},{},,,,,", title_conn[row], content_conn);
}
if i == len_conn {
break;
}
}
result += "\n";
}
for row in 7..max_data_len {
result += ",";
let mut i = 0;
for conn in &self.connections {
i += 1;
if conn.data.len() > row {
let p = &conn.data[row];
result += &format!(
",,,{},{},{},{},{}",
row + 1,
p.is_writable.to_string().to_uppercase(),
p.from.to_string(),
p.data_type.to_string(),
p.point_id.to_string(),
);
} else {
result += ",,,,,,,,,";
}
if i == len_conn {
break;
}
}
result += "\n";
}
}
result
}
}
impl EcConnection {
fn from_csv_records (
content: &[u8],
offset: usize,
) -> Result<EcConnection, (usize, usize)> {
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.from_reader(content);
let mut records = rdr.records();
// 1th line
let rc = (0usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let name = csv_string(&record, rc.1).ok_or(rc)?;
// 2th line
let rc = (1usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let point_num = csv_usize(&record, rc.1).ok_or(rc)?;
// 3th line
let rc = (2usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let module_name = csv_string(&record, rc.1).ok_or(rc)?;
// 4th line
let rc = (3usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let cycle_time_in_micro = csv_u64(&record, rc.1).ok_or(rc)?;
// 5th line
let rc = (4usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let index = csv_usize(&record, rc.1).ok_or(rc)?;
// 6th line
let rc = (5usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let watchdog_multi = csv_u16(&record, rc.1);
// 7th line
let rc = (6usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let watchdog_sm = csv_u16(&record, rc.1);
// 8th line
let rc = (7usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let watchdog_pdi = csv_u16(&record, rc.1);
// 9th..
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.from_reader(content);
let mut records = rdr.records();
let mut pdi_data = Vec::with_capacity(point_num);
let rc = (0, 3 + offset);
records.next().ok_or(rc)?.map_err(|_| rc)?;
for row in 1..=point_num {
let rc = (row, 3 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let data = PdiData::parse_pdi_data(&record, rc.0, rc.1)?;
pdi_data.push(data);
}
let mut conn = EcConnection {
name,
module_name,
point_id: UNKNOWN_POINT_ID,
data: pdi_data,
index,
cycle_time_in_micro,
watchdog_multi,
watchdog_pdi,
watchdog_sm,
..Default::default()
};
conn.create_data_config().map_err(|(r, c, _)|(r, c + offset))?;
Ok(conn)
}
pub fn create_data_config(&mut self) -> Result<(),(usize, usize, String)> {
let size = self.data.len();
let mut point_to_pos = HashMap::with_capacity(size);
for (index, rd) in self.data.iter().enumerate() {
point_to_pos.insert(rd.point_id, index);
}
let mut keys = self.data.clone();
keys.sort_by(|a, b| a.from.cmp(&b.from));
// 判断地址之间有没有互相覆盖
let mut last_addr = u16::MIN;
for (index, rd) in keys.iter().enumerate() {
let tip = format!("Invalid register point (id :{}):\nThe register address is already existed", rd.point_id);
// 如果开始地址在已经被使用的地址范围
if rd.from < last_addr {
return Err((index + 1, 4, tip)); // 地址的位置
}
last_addr = rd.from + rd.data_type.get_byte_count();
}
self.point_to_pos = point_to_pos;
Ok(())
}
}
impl PdiData {
fn parse_pdi_data(
record: &StringRecord,
row: usize,
first_col: usize,
) -> Result<Self, (usize, usize)> {
let rc = (row, first_col);
let s = csv_str(record, rc.1).ok_or(rc)?;
let is_writable = match s {
"FALSE" => false,
"TRUE" => true,
_ => false,
};
let rc = (row, first_col + 1);
let from = csv_u16(record, rc.1).ok_or(rc)?;
let rc = (row, first_col + 2);
let s = csv_str(record, rc.1).ok_or(rc)?;
let data_type = DataType::from_str(s).map_err(|_|rc)?;
// 对应的测点Id
let rc = (row, first_col + 3);
let point_id = csv_u64(record, rc.1).ok_or(rc)?;
Ok(PdiData {
is_writable,
from,
data_type,
point_id,
})
}
}
\ No newline at end of file
use std::collections::HashMap;
use std::io::{Cursor, Write};
use std::path::Path;
use calamine::{open_workbook_auto_from_rs, Data, Reader, Sheets, Xlsx, open_workbook_from_rs};
pub fn excel_to_csv_bytes<P: AsRef<Path>>(path: P) -> Option<Vec<Vec<u8>>> {
let bytes = std::fs::read(path).ok()?;
excel_bytes_to_csv_bytes(bytes.as_slice())
}
pub fn get_first_sheet_merged_cells(bytes: Vec<u8>) -> Option<(u32, u32, HashMap<(u32,u32), (u32, u32)>)> {
let c = Cursor::new(bytes);
let mut excel: Xlsx<_> = open_workbook_from_rs(c).ok()?;
excel.load_merged_regions().ok()?;
let sheet_names = excel.sheet_names();
let mut max_col = 0;
if sheet_names.len() > 0 {
let v = excel.merged_regions_by_sheet(&sheet_names[0]);
let mut merged_cells = HashMap::with_capacity(v.len());
for (_, _, c) in v {
merged_cells.insert(c.start, c.end);
if c.end.1 > max_col {
max_col = c.end.1;
}
}
let range = excel.worksheet_range_ref(&sheet_names[0]).ok()?;
let (m, w) = range.get_size();
let n = if w as u32 > max_col + 1 { w as u32 } else { max_col + 1 };
return Some((m as u32, n, merged_cells));
}
None
}
pub fn excel_bytes_to_csv_bytes(bytes: &[u8]) -> Option<Vec<Vec<u8>>> {
let c = Cursor::new(bytes.to_vec());
if let Ok(mut xl) = open_workbook_auto_from_rs(c) {
let mut sheet_names = xl.sheet_names();
sheet_names.retain(|name| !name.starts_with('_'));
sheets_to_csv(&mut xl, sheet_names)
} else {
let is_csv = csv::ReaderBuilder::new()
.has_headers(true)
.from_reader(bytes)
.records()
.next().is_some_and(|x| x.is_ok());
if is_csv {
Some(vec![bytes.to_vec()])
} else {
None
}
}
}
pub fn excel_bytes_to_csv_bytes_by_sheet_names(
bytes: &[u8],
names: Vec<String>,
) -> Option<Vec<Vec<u8>>> {
let c = Cursor::new(bytes.to_vec());
let mut xl = open_workbook_auto_from_rs(c).ok()?;
sheets_to_csv(&mut xl, names)
}
fn sheets_to_csv<T>(xl: &mut Sheets<T>, names: Vec<String>) -> Option<Vec<Vec<u8>>>
where
T: std::io::Read + std::io::Seek,
{
let mut result = Vec::with_capacity(names.len());
for name in names {
let range = xl.worksheet_range(name.as_str()).ok()?;
let n = range.get_size().1 - 1;
let mut dest = Vec::new();
for r in range.rows() {
for (i, c) in r.iter().enumerate() {
match *c {
Data::Empty => Ok(()),
Data::String(ref s) => {
if s.contains(',')
|| s.contains('\r')
|| s.contains('\n')
|| s.contains('"')
{
let new_s = s.replace('\"', "\"\"");
write!(dest, "\"{new_s}\"")
} else {
write!(dest, "{s}")
}
}
Data::Float(ref f) => write!(dest, "{f}"),
Data::DateTime(ref data) => write!(dest, "{data}"),
Data::DurationIso(ref s) | Data::DateTimeIso(ref s) => write!(dest, "{s}"),
Data::Int(ref i) => write!(dest, "{i}"),
Data::Error(ref e) => write!(dest, "{:?}", e),
Data::Bool(ref b) => write!(dest, "{b}"),
}
.ok()?;
if i != n {
write!(dest, ",").ok()?;
}
}
write!(dest, "\r\n").ok()?;
}
if !dest.is_empty() {
result.push(dest);
}
}
Some(result)
}
#[derive(Debug, PartialEq)]
enum FileEncode {
UTF8,
UTF16LE,
UTF16BE,
GBK,
}
pub fn transfer_to_utf8(data: Vec<u8>) -> Result<Vec<u8>,()> {
let encode = get_encoding(data.as_slice());
// encoding_rs::max_utf8_buffer_length
let mut decoder = match encode {
FileEncode::UTF8 => encoding_rs::UTF_8.new_decoder(),
FileEncode::UTF16LE => {
encoding_rs::UTF_16LE.new_decoder()
}
FileEncode::UTF16BE => {
encoding_rs::UTF_16BE.new_decoder()
}
FileEncode::GBK => {
encoding_rs::GBK.new_decoder()
}
};
let mut result = Vec::with_capacity(
decoder.max_utf8_buffer_length(data.len()).unwrap()
);
result.resize(result.capacity(), 0u8);
let (_, _, written, has_errors) = decoder.decode_to_utf8(data.as_slice(), &mut result, true);
if has_errors {
Err(())
} else {
result.truncate(written);
Ok(result)
}
}
fn get_encoding(data: &[u8]) -> FileEncode {
// let data: Vec<u8> = vec![0xFF, 0xFE, 0x41, 0x00, 0x42, 0x00];
// let data = data.to_owned();
// let data_clone = data.to_owned();
let len = data.len();
if len > 2 && data[0] == 0xFF && data[1] == 0xFE {
return FileEncode::UTF16LE;
} else if len > 2 && data[0] == 0xFE && data[1] == 0xFF {
return FileEncode::UTF16BE;
} else if len > 3 && data[0] == 0xEF && data[1] == 0xBB && data[2] == 0xBF {
// UTF8-BOM
return FileEncode::UTF8;
} else {
// 根据编码规则判断编码格式是GBK/UTF-8
//无文件头根据编码规律来判断编码格式
//UTF-8的编码规则很简单,只有二条:
//1)对于单字节的符号,字节的第一位设为0,后面7位为这个符号的unicode码。因此对于英语字母,UTF - 8编码和ASCII码是相同的。
//2)对于n字节的符号(n>1),第一个字节的前n位都设为1,第n + 1位设为0,后面字节的前两位一律设为10。剩下的没有提及的二进制位,全部为这个符号的unicode码。
// let mut byte_number = 0;
let mut utf8_number = 0;
let mut index = 0;
while index < len {
//取第一个字节判断第一位是否为1,以及获取第一位为1时后面位连续为1的数量
let mut byte_number = 0;
for i in 0..8 {
if data[index] & (0b10000000 >> i) != 0 {
byte_number += 1;
} else {
break;
}
}
//若byte等于0,则非中文,中文数量清零
if byte_number == 0 {
utf8_number = 0;
index += 1;
} else if byte_number == 1 || byte_number > 4 {
return FileEncode::GBK;
} else {
//如果该字节开头几位连续为1,且数量byte超过1,则判断d该自己后面byte-1个字节是否符合UTF-8编码规则, 即10开头;
for i in 1..byte_number {
if data[index + i] & 0b11000000 != 0b10000000 {
return FileEncode::GBK;
}
}
//即使满足UTF-8,仍可能为GBK
//如果连续的UTF-8编码的中文数量超过3个,则判断为utf-8
utf8_number += 1;
index += byte_number;
if utf8_number >= 3 {
return FileEncode::UTF8;
}
}
}
}
return FileEncode::UTF8;
}
\ No newline at end of file
use std::collections::{HashMap, HashSet};
use serde::{Deserialize, Serialize};
use crate::excel::{excel_bytes_to_csv_bytes, transfer_to_utf8};
use crate::{csv_str, csv_string, csv_u16, csv_u32, csv_u64, csv_usize};
pub const SKIP_TYPE_PREFIX: &str = "SKIP_";
/**
* @api {华云Mqtt通道信息} /HYMqttTransport HYMqttTransport
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u8} id 通道id
* @apiSuccess {String} name 通道名称
* @apiSuccess {tuple} mqtt_broker 服务端的ip和port,tuple格式为(ip:String, port:u16)
* @apiSuccess {u64} point_id 通道状态对应的测点号
* @apiSuccess {String} read_topic 读测点的主题
* @apiSuccess {String} write_topic 写测点的主题
* @apiSuccess {String} [user_name] 用户名,可选
* @apiSuccess {String} [user_password] 密码,可选
* @apiSuccess {u64} poll_time 轮询周期,单位毫秒
* @apiSuccess {bool} is_poll is_poll
* @apiSuccess {bool} is_new 版本,false是配电物联2020版本,true是2021版本,该参数会导致topic不同
* @apiSuccess {String} app_name APP的名称,用于生成topic
* @apiSuccess {Map} point_id_to_pos HashMap<point_id:u64, data_configure的索引:usize>
* @apiSuccess {HYPoint[]} data_configure 测点列表
* @apiSuccess {Map} model_to_pos HashMap<model:String, 测点索引:usize[]>
* @apiSuccess {Map} device_configure HashMap<device_id:u64, 设备的信息:HYDevice>
*/
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct HYMqttTransport {
pub id: u64,
/// 通道名称
pub name: String,
/// 服务端的ip和por
pub mqtt_broker: (String, u16),
/// 通道状态对应的测点号
pub point_id: u64,
/// 读测点的主题
pub read_topic: String,
/// 写测点的主题
pub write_topic: String,
/// 用户名,可选
pub user_name: Option<String>,
/// 用户密码,可选
pub user_password: Option<String>,
/// 轮询周期,单位毫秒
pub poll_time: u64,
pub is_poll: bool,
/// 版本,false是配电物联2020版本,true是2021版本,该参数会导致topic不同
pub is_new: bool,
/// APP的名称,用于生成topic
pub app_name: String,
/// key is point id, value is information object address(data_configure的索引)
pub point_id_to_pos: HashMap<u64, usize>,
/// 测点列表
pub data_configure: Vec<HYPoint>,
/// 模型列表key is model, value is 测点索引
pub model_to_pos: HashMap<String, Vec<usize>>,
/// 设备key is 设备序号, value is (dev,设备的信息)
pub device_configure: HashMap<u32, HYDevice>,
}
/**
* @api {华云台区智能融合终端模型} /HYDevice HYDevice
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u32} device_id device_id
* @apiSuccess {bool} need_register need_register
* @apiSuccess {String} [dev] dev
* @apiSuccess {usize[]} points_pos points_pos
* @apiSuccess {u64} poll_period poll_period
* @apiSuccess {HYDeviceInfo} device_info device_info
*/
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
#[allow(non_snake_case)]
pub struct HYDevice {
pub device_id: u32,
pub need_register: bool,
pub dev_uuid: Option<String>,
pub points_pos: Vec<usize>,
pub poll_period: u64,
pub device_info: HYDeviceInfo,
}
/**
* @api {华云台区智能融合终端模型信息} /HYDeviceInfo HYDeviceInfo
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {String} model 模型名称
* @apiSuccess {String} port RS485-1、RS485-2、RS485-3、RS485-4、PLC、UMW
* @apiSuccess {String} addr 地址
* @apiSuccess {String} desc 描述
* @apiSuccess {String} manuID 厂商ID 1234 名
* @apiSuccess {String} isReport 上报标志 0不需要上报,1需要上报
* @apiSuccess {String} manuName 厂商名称
* @apiSuccess {String} ProType 协议类型
* @apiSuccess {String} deviceType 设备型号
* @apiSuccess {String} nodeID 节点ID
* @apiSuccess {String} productID 产品ID
*/
// 华云-台区智能融合终端模型
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
#[allow(non_snake_case)]
pub struct HYDeviceInfo {
/// 模型名称
pub model: String,
/// RS485-1、RS485-2、RS485-3、RS485-4、PLC、UMW
pub port: String,
pub addr: String,
pub desc: String,
pub manuID: String, // 厂商ID 1234 名
pub isReport: String, // 上报标志 0不需要上报,1需要上报
// 以下是新版本中添加的
pub manuName: String, // 厂商名称
pub ProType: String, // 协议类型
pub deviceType: String, // 设备型号
pub nodeID: String, // 节点ID
pub productID: String, // 产品ID
}
impl HYDeviceInfo {
pub fn new_undefine() -> Self {
HYDeviceInfo {
model: "".to_string(),
port: "".to_string(),
addr: "".to_string(),
desc: "".to_string(),
manuID: "".to_string(),
isReport: "".to_string(),
manuName: "".to_string(),
ProType: "".to_string(),
deviceType: "".to_string(),
nodeID: "".to_string(),
productID: "".to_string(),
}
}
}
/**
* @api {华云台区智能融合终端测点} /HYPoint HYPoint
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {bool} is_writable 是否可写(暂时无用)
* @apiSuccess {u64} device_id 测点归属的设备序号
* @apiSuccess {u64} point_id 对应的测点Id
* @apiSuccess {HYPointInfo} point_info 测点信息
*/
// 华云-台区智能融合终端测点
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct HYPoint {
/// 暂时无用
pub not_realtime: bool,
/// 测点归属的设备序号
pub device_id: u32,
/// 对应的测点Id
pub point_id: u64,
pub point_info: HYPointInfo,
}
/**
* @api {华云台区智能融合终端测点信息} /HYPointInfo HYPointInfo
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {String} name name
* @apiSuccess {String} type type
* @apiSuccess {String} unit unit
* @apiSuccess {String} deadzone deadzone
* @apiSuccess {String} ratio ratio
* @apiSuccess {String} isReport isReport
* @apiSuccess {String} userdefine userdefine
*/
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
#[allow(non_snake_case)]
pub struct HYPointInfo {
pub name: String,
pub r#type: String,
pub unit: String,
pub deadzone: String,
pub ratio: String,
pub isReport: String,
// 名字不能改!!!
pub userdefine: String,
}
impl HYMqttTransport {
pub fn from_file(path: &str) -> Result<Self, (usize, usize)> {
let content = std::fs::read(path).map_err(|_| (0, 0))?;
// let content = if env::IS_ENCRYPT {
// env::decrypt_vec(content.as_slice())
// } else {
// content
// };
let csv_bytes = if path.ends_with(".xlsx") || path.ends_with(".xls") {
let r = excel_bytes_to_csv_bytes(content.as_slice()).unwrap_or_default();
if r.is_empty() {
return Err((0, 0));
}
r[0].clone()
} else {
content
};
Self::from_csv_bytes(csv_bytes.as_slice())
}
pub fn from_csv(path: &str) -> Result<HYMqttTransport, (usize, usize)> {
let content = std::fs::read(path).map_err(|_| (0, 0))?;
// let content = if env::IS_ENCRYPT {
// let plain_t = decrypt_vec(content.as_slice());
// plain_t
// } else {
// content
// };
HYMqttTransport::from_csv_bytes(content.as_slice())
}
pub fn from_csv_bytes(content: &[u8]) -> Result<HYMqttTransport, (usize, usize)> {
let content_new = transfer_to_utf8(content.to_vec()).map_err(|_| (0, 0))?;
let content = content_new.as_slice();
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.from_reader(content);
let mut records = rdr.records();
let rc = (0usize, 1);
let name = csv_string(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let rc = (1usize, 1);
let broker_ip = csv_string(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let rc = (2usize, 1);
let broker_port = csv_u16(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let mqtt_broker = (broker_ip, broker_port);
let rc = (3usize, 1);
let point_num = csv_usize(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let rc = (4usize, 1);
let point_id = csv_u64(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let rc = (5usize, 1);
let read_topic = csv_string(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let rc = (6usize, 1);
let write_topic =
csv_string(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
// 下面用户名和密码是可选的
let mut user_name = None;
let mut user_password = None;
let rc = (7usize, 1);
if let Some(Ok(line)) = records.next() {
user_name = csv_string(&line, rc.1);
}
let rc = (8usize, 1);
if let Some(Ok(line)) = records.next() {
user_password = csv_string(&line, rc.1);
}
// 轮询时间可选,需大于100ms
let rc = (9usize, 1);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let s = csv_str(&record, rc.1).ok_or(rc)?;
let (poll_time, is_poll) = if s.is_empty() {
(10000, false)
} else {
let time = s.parse::<u64>().map_err(|_| rc)?;
if time < 100 {
return Err(rc);
}
(time, true)
};
let rc = (10usize, 1);
let app_name = csv_string(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let rc = (11usize, 1);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let s = csv_str(&record, rc.1).ok_or(rc)?.to_uppercase();
let is_new = match s.as_str() {
"FALSE" => false,
"TRUE" => true,
_ => false,
};
// 开启读取测点信息
let mut point_id_to_pos: HashMap<u64, usize> = HashMap::with_capacity(point_num);
let mut data_configure: Vec<HYPoint> = Vec::with_capacity(point_num);
let mut model_to_pos: HashMap<String, Vec<usize>> = HashMap::new();
let mut device_configure: HashMap<u32, HYDevice> = HashMap::new();
// 从新加载
let mut rdr = csv::ReaderBuilder::new()
.has_headers(true)
.from_reader(content);
let mut records = rdr.records();
let mut tmp = HashSet::with_capacity(point_num);
for i in 0..point_num {
let rc = (i, 3);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let id = csv_u64(&record, rc.1).ok_or(rc)?;
let rc = (i, 4);
let s = csv_str(&record, rc.1).ok_or(rc)?.to_uppercase();
let is_writable = match s.as_str() {
"FALSE" => false,
"TRUE" => true,
_ => false,
};
let rc = (i, 5);
let name = csv_string(&record, rc.1).ok_or(rc)?;
let rc = (i, 6);
let point_type = csv_string(&record, rc.1).ok_or(rc)?;
let rc = (i, 7);
let point_information = csv_string(&record, rc.1).ok_or(rc)?;
let pi: Vec<&str> = point_information.split(';').collect();
if pi.len() < 5 {
return Err(rc);
}
let rc = (i, 8);
let model_name = csv_string(&record, rc.1).ok_or(rc)?;
let rc = (i, 9);
let device_id = csv_u32(&record, rc.1).ok_or(rc)?;
let rc = (i, 10);
let device_information = csv_string(&record, rc.1).ok_or(rc)?;
let de: Vec<&str> = device_information.split(';').collect();
match device_configure.get_mut(&device_id) {
Some(d) => {
// 已经有这个设备
if d.device_info.model != model_name {
// 同一个设备的模型不一致
return Err((i, 9));
}
d.points_pos.push(i);
}
None => {
// 没有这个设备就添加新设备
if de.len() < 5 {
return Err(rc);
}
let rc = (i, 11);
let poll_period = csv_u64(&record, rc.1).ok_or(rc)?;
let rc = (i, 12);
let s = csv_str(&record, rc.1).ok_or(rc)?.to_uppercase();
let need_register = match s.as_str() {
"FALSE" => false,
"TRUE" => true,
_ => false,
};
device_configure.insert(
device_id,
HYDevice {
device_id,
need_register,
dev_uuid: None,
points_pos: vec![i],
poll_period,
device_info: HYDeviceInfo {
model: model_name.clone(),
port: de[0].to_string(),
addr: de[1].to_string(),
desc: de[2].to_string(),
manuID: de[3].to_string(),
isReport: de[4].to_string(),
// 以下如果没有就取默认值
manuName: de.get(5).unwrap_or(&"xxx").to_string(),
ProType: de.get(6).unwrap_or(&"xxx").to_string(),
deviceType: de.get(7).unwrap_or(&"1234").to_string(),
nodeID: de.get(8).unwrap_or(&"XXXX").to_string(),
productID: de.get(9).unwrap_or(&"1111XXXX").to_string(),
},
},
);
}
}
data_configure.push(HYPoint {
not_realtime: is_writable,
device_id,
point_id: id,
point_info: HYPointInfo {
name: name.clone(),
r#type: point_type.clone(),
unit: pi[0].to_string(),
deadzone: pi[1].to_string(),
ratio: pi[2].to_string(),
isReport: pi[3].to_string(),
userdefine: pi[4].to_string(),
},
});
match model_to_pos.get_mut(&model_name) {
Some(v) => {
// 记录名字不重复的量测点
let mut is_exist = false;
for pos in &mut *v {
if data_configure[*pos].point_info.name == name {
is_exist = true;
break;
}
}
if !is_exist {
v.push(i);
}
}
None => {
model_to_pos.insert(model_name, vec![i]);
}
}
// 测点不能重复
if tmp.contains(&id) {
return Err(rc);
}
point_id_to_pos.insert(id, i);
let point_type_upper = point_type.to_uppercase();
let types = point_type_upper.split(";").collect::<Vec<_>>();
if types.len() > 1 {
let mut current_id = id;
for t in types {
if !t.starts_with(SKIP_TYPE_PREFIX) {
tmp.insert(current_id);
current_id += 1;
}
}
} else {
tmp.insert(id);
}
}
Ok(HYMqttTransport {
id: 0,
name,
mqtt_broker,
point_id,
read_topic,
write_topic,
user_name,
user_password,
poll_time,
is_poll,
point_id_to_pos,
data_configure,
model_to_pos,
device_configure,
app_name,
is_new
})
}
pub fn get_point_ids(&self) -> Vec<u64> {
let mut result = Vec::with_capacity(self.point_id_to_pos.len());
for (id, index) in &self.point_id_to_pos {
let type_upper = self.data_configure[*index].point_info.r#type.to_uppercase();
let types: Vec<&str> = type_upper.split(";").collect();
if types.len() > 1 {
let mut current_id = *id;
for t in types {
if !t.starts_with(SKIP_TYPE_PREFIX) {
result.push(current_id);
current_id += 1;
}
}
} else {
result.push(*id);
}
}
result
}
}
\ No newline at end of file
use std::{collections::HashMap, convert::TryInto};
use std::collections::HashSet;
use csv::StringRecord;
use serde::{Deserialize, Serialize};
use crate::excel::{excel_bytes_to_csv_bytes, transfer_to_utf8};
use super::*;
/**
* @api {Iec104ClientTp} /Iec104ClientTp Iec104ClientTp
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u64} id 通道id
* @apiSuccess {String} name 通道名称
* @apiSuccess {tuple} tcp_server 服务端的ip和port,tuple格式为(ip:String, port:u16)
* @apiSuccess {Iec104Connection} connection connection
*/
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct Iec104ClientTp {
/// 通道id
pub id: u64,
/// 通道名称
pub name: String,
/// 服务端的ip和port
pub tcp_server: (String, u16),
/// 遥信点号的数据类型
#[serde(default)]
pub yx_data_type: u8,
/// 遥测点号的数据类型
#[serde(default)]
pub yc_data_type: u8,
/// 连接
pub connection: Iec104Connection,
}
/**
* @api {Iec104ServerTp} /Iec104ServerTp Iec104ServerTp
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u64} id 通道id
* @apiSuccess {String} name 通道名称
* @apiSuccess {u16} tcp_server_port 服务的port
* @apiSuccess {u8} yx_data_type 遥信点号的数据类型
* @apiSuccess {u8} yc_data_type 遥测点号的数据类型
* @apiSuccess {tuple[]} connections 连接信息,数组,tuple格式为(String, Iec104Connection)
*/
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct Iec104ServerTp {
/// 通道id
pub id: u64,
/// 通道名称
pub name: String,
/// 服务的port
pub tcp_server_port: u16,
/// 遥信点号的数据类型
pub yx_data_type: u8,
/// 遥测点号的数据类型
pub yc_data_type: u8,
/// 连接
pub connections: Vec<(String, Iec104Connection)>,
}
/**
* @api {Iec104测点信息} /Iec104Point Iec104Point
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u32} ioa 协议地址
* @apiSuccess {u64} point_id 对应的测点Id
* @apiSuccess {bool} is_yx 是否是遥信量
* @apiSuccess {u32} [control_ioa] 控制点地址,若进行配置控制点地址,则说明该点可写
*/
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct Iec104Point {
/// 协议地址
pub ioa: u32,
/// 对应的测点Id
pub point_id: u64,
/// 是否是遥信量
pub is_yx: bool,
/// 控制点地址,若进行配置控制点地址,则说明该点可写
pub control_ioa: Option<u32>,
}
/**
* @api {Iec104通道连接信息} /Iec104Connection Iec104Connection
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {String} name 连接名称
* @apiSuccess {u64} point_id 通道状态对应的测点号
* @apiSuccess {Iec104Point[]} data_configure register settings
* @apiSuccess {Map} point_id_to_ioa HashMap<point_id:u64, information_object_addressa:u32>
* @apiSuccess {Map} ioa_to_pos HashMap<Point地址:u32, data_configure中的位置:u16>
* @apiSuccess {bool} is_control_with_time 控制方向是否带时标
* @apiSuccess {u16} common_address 公共地址
* @apiSuccess {u8} cot_field_length 传输原因字节个数
* @apiSuccess {u8} common_address_field_length 公共地址字节个数
* @apiSuccess {u8} ioa_field_length 信息体地址字节个数
* @apiSuccess {u64} max_time_no_ack_received t1
* @apiSuccess {u64} max_time_no_ack_sent t2
* @apiSuccess {u64} max_idle_time t3
* @apiSuccess {u8} max_unconfirmed_apdus_sent k,发送方发送k条连续的未被确认的I格式报文,停止发送
* @apiSuccess {u8} max_unconfirmed_apdus_received w,接收方收到w个I格式报文后发送确认
* @apiSuccess {u64} [call_time] 总召时间间隔
* @apiSuccess {u64} [call_counter_time] 点度量总召时间间隔
* @apiSuccess {bool} is_client 是否为客户端
*/
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct Iec104Connection {
/// 连接名称
pub name: String,
/// 通道状态对应的测点号
pub point_id: u64,
/// register settings
pub data_configure: Vec<Iec104Point>,
/// key is point id, value is information object address
pub point_id_to_ioa: HashMap<u64, u32>,
/// key:Point地址,value:data_configure中的位置
pub ioa_to_pos: HashMap<u32, u16>,
/// 控制方向是否带时标
pub is_control_with_time: bool,
/// 遥控遥调是否为直控,默认为false
#[serde(default)]
pub direct_yk: bool,
#[serde(default)]
pub direct_yt: bool,
/// 源发地址
pub originator_address: u8,
/// 公共地址
pub common_address: u16,
/// 传输原因字节个数
pub cot_field_length: u8,
/// 公共地址字节个数
pub common_address_field_length: u8,
/// 信息体地址字节个数
pub ioa_field_length: u8,
/// t1
pub max_time_no_ack_received: u64,
/// t2
pub max_time_no_ack_sent: u64,
/// t3
pub max_idle_time: u64,
/// k,发送方发送k条连续的未被确认的I格式报文,停止发送
pub max_unconfirmed_apdus_sent: u8,
/// w,接收方收到w个I格式报文后发送确认
pub max_unconfirmed_apdus_received: u8,
/// 总召时间间隔
pub call_time: Option<u64>,
/// 点度量总召时间间隔
pub call_counter_time: Option<u64>,
/// 是否为客户端
pub is_client: bool,
}
impl Default for Iec104Connection {
fn default() -> Self {
Iec104Connection {
name: "new".to_string(),
point_id: 0,
data_configure: vec![],
point_id_to_ioa: Default::default(),
ioa_to_pos: Default::default(),
is_control_with_time: false,
direct_yk: false,
direct_yt: false,
originator_address: 0,
common_address: 1,
cot_field_length: 2,
common_address_field_length: 2,
ioa_field_length: 3,
max_time_no_ack_received: 15000,
max_time_no_ack_sent: 10000,
max_idle_time: 20000,
max_unconfirmed_apdus_sent: 12,
max_unconfirmed_apdus_received: 8,
call_time: None,
call_counter_time: None,
is_client: false,
}
}
}
impl Iec104ClientTp {
pub fn from_file(path: &str) -> Result<Self, (usize, usize)> {
let content = std::fs::read(path).map_err(|_| (0, 0))?;
// let content = if env::IS_ENCRYPT {
// decrypt_vec(content.as_slice())
// } else {
// content
// };
let csv_bytes = if path.ends_with(".xlsx") || path.ends_with(".xls") {
let r = excel_bytes_to_csv_bytes(content.as_slice()).unwrap_or_default();
if r.is_empty() {
return Err((0, 0));
}
r[0].clone()
} else {
content
};
Self::from_csv_bytes(csv_bytes.as_slice())
}
pub fn from_csv(path: &str) -> Result<Iec104ClientTp, (usize, usize)> {
let content = std::fs::read(path).map_err(|_| (0, 0))?;
// if env::IS_ENCRYPT {
// let content = decrypt_vec(content.as_slice());
// Iec104ClientTp::from_csv_bytes(content.as_slice())
// } else {
// Iec104ClientTp::from_csv_bytes(content.as_slice())
// }
Iec104ClientTp::from_csv_bytes(content.as_slice())
}
pub fn from_csv_bytes(content: &[u8]) -> Result<Iec104ClientTp, (usize, usize)> {
let content_new = transfer_to_utf8(content.to_vec()).map_err(|_| (0, 0))?;
let content = content_new.as_slice();
let tp = Iec104ClientTp::from_csv_records(content, 0)?;
let rc = (2usize, 1);
tp.tcp_server
.0
.parse::<std::net::Ipv4Addr>()
.map_err(|_| rc)?;
Ok(tp)
}
fn from_csv_records(
content: &[u8],
offset: usize,
) -> Result<Iec104ClientTp, (usize, usize)> {
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.from_reader(content);
let mut records = rdr.records();
// 1st line
let rc = (0usize, 1 + offset);
let name = csv_string(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
// 2nd line
let rc = (1usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let point_num = csv_usize(&record, rc.1).ok_or(rc)?;
if point_num as u16 > u16::MAX {
return Err(rc)
}
// 3rd line
let rc = (2usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let tcp_server_ip = csv_string(&record, rc.1).ok_or(rc)?;
// 4th line
let rc = (3usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let tcp_server_port = csv_u16(&record, rc.1).ok_or(rc)?;
// 5th line
let rc = (4usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let s = csv_str(&record, rc.1).ok_or(rc)?;
let point_id: u64 = if s.is_empty() {
0
} else {
s.parse().map_err(|_| rc)?
};
// 6th line
let rc = (5usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let originator_address = csv_u8(&record, rc.1).ok_or(rc)?;
// 7th line
let rc = (6usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let common_address = csv_u16(&record, rc.1).ok_or(rc)?;
// 8th line
let rc = (7usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let common_address_field_length = csv_u8(&record, rc.1).ok_or(rc)?;
// 9th line
let rc = (8usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let cot_field_length = csv_u8(&record, rc.1).ok_or(rc)?;
// 10th line
let rc = (9usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let ioa_field_length = csv_u8(&record, rc.1).ok_or(rc)?;
// 11th line
let rc = (10usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let max_time_no_ack_received = csv_u64(&record, rc.1).ok_or(rc)?;
// 12th line
let rc = (11usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let max_time_no_ack_sent = csv_u64(&record, rc.1).ok_or(rc)?;
// 13th line
let rc = (12usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let max_idle_time = csv_u64(&record, rc.1).ok_or(rc)?;
// 14th line
let rc = (13usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let max_unconfirmed_apdus_sent = csv_u8(&record, rc.1).ok_or(rc)?;
// 15th line
let rc = (14usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let max_unconfirmed_apdus_received = csv_u8(&record, rc.1).ok_or(rc)?;
// 16th line
let rc = (15usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let s = csv_string(&record, rc.1).ok_or(rc)?.trim().to_uppercase();
let cs: Vec<&str> = s.split(';').collect();
let is_control_with_time = if let Some(c) = cs.get(0) {
c == &"TRUE"
} else {
false
};
let direct_yk = if let Some(c) = cs.get(1) {
c == &"TRUE"
} else {
false
};
let direct_yt = if let Some(c) = cs.get(2) {
c == &"TRUE"
} else {
false
};
// 17th line
let rc = (16usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let s = csv_str(&record, rc.1).ok_or(rc)?;
let call_time = if s.is_empty() {
None
} else {
Some(s.parse::<u64>().map_err(|_| rc)?)
};
// 18th line
let rc = (17usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let s = csv_string(&record, rc.1).ok_or(rc)?.trim().to_uppercase();
let is_client = s.as_str() == "TRUE";
// 19th line
let rc = (18usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let s = csv_str(&record, rc.1).ok_or(rc)?;
let call_counter_time = if s.is_empty() {
None
} else {
Some(s.parse::<u64>().map_err(|_| rc)?)
};
// 20th line
let rc = (19usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let s = csv_str(&record, rc.1).ok_or(rc)?;
let yx_data_type = if s.is_empty() {
0
} else {
s.parse::<u8>().map_err(|_| rc)?
};
// 21st line
let rc = (20usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let s = csv_str(&record, rc.1).ok_or(rc)?;
let yc_data_type = if s.is_empty() {
0
} else {
s.parse::<u8>().map_err(|_| rc)?
};
// 22th ...
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.from_reader(content);
let mut records = rdr.records();
let rc = (0, 3 + offset);
records.next().ok_or(rc)?.map_err(|_| rc)?;
let mut data_configure: Vec<Iec104Point> = Vec::with_capacity(point_num);
for row in 1..=point_num {
let rc = (row, 3 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
data_configure.push(Iec104Point::parse_register_data(&record, rc.0, rc.1)?);
}
let mut conn = Iec104Connection {
name: name.clone(),
point_id,
data_configure,
is_control_with_time,
direct_yk,
direct_yt,
originator_address,
common_address,
cot_field_length,
common_address_field_length,
ioa_field_length,
max_time_no_ack_received,
max_time_no_ack_sent,
max_idle_time,
max_unconfirmed_apdus_sent,
max_unconfirmed_apdus_received,
call_time,
call_counter_time,
is_client,
..Default::default()
};
conn.create_data_config().map_err(|(r, c, _)|(r, c + offset))?;
Ok(Iec104ClientTp {
id: 0,
name,
tcp_server: (tcp_server_ip, tcp_server_port),
yx_data_type,
yc_data_type,
connection: conn,
})
}
pub fn export_csv(&self, text_map: &HashMap<String, String>) -> String {
let title = vec![
text_map.get("point_number").unwrap_or(&"Point Count".to_string()).clone(),
text_map.get("server_ip").unwrap_or(&"Server IP".to_string()).clone(),
text_map.get("server_port").unwrap_or(&"Server Port".to_string()).clone(),
text_map.get("tp_point_id").unwrap_or(&"Point ID".to_string()).clone(),
text_map.get("originator_addr").unwrap_or(&"Originator Address".to_string()).clone(),
text_map.get("common_addr").unwrap_or(&"Common Address".to_string()).clone(),
text_map.get("common_addr_filed_len").unwrap_or(&"Common Address Field Length".to_string()).clone(),
text_map.get("cot_field_len").unwrap_or(&"Cot Field Length".to_string()).clone(),
text_map.get("ioa_field_len").unwrap_or(&"Ioa Field Length".to_string()).clone(),
text_map.get("t1_ms").unwrap_or(&"T1 (ms)".to_string()).clone(),
text_map.get("t2_ms").unwrap_or(&"T2 (ms)".to_string()).clone(),
text_map.get("t3_ms").unwrap_or(&"T3 (ms)".to_string()).clone(),
text_map.get("iec104_k").unwrap_or(&"k".to_string()).clone(),
text_map.get("iec104_w").unwrap_or(&"w".to_string()).clone(),
text_map.get("is_control_with_time").unwrap_or(&"Is Control With Time;Direct yk;Direct yt".to_string()).clone(),
text_map.get("call_time_ms").unwrap_or(&"Call Time (ms)".to_string()).clone(),
text_map.get("is_client_true").unwrap_or(&"is Client(TRUE)".to_string()).clone(),
text_map.get("call_counter_time").unwrap_or(&"Call Counter Time (ms)".to_string()).clone(),
text_map.get("telesignaling_type_default").unwrap_or(&"Default Telesignaling Type".to_string()).clone(),
text_map.get("telemetering_type_default").unwrap_or(&"Default Telemetering Type".to_string()).clone(),
];
let c = self.connection.clone();
let p = self.connection.data_configure.clone();
let mut content = vec![
format!("{}", c.data_configure.len()),
format!("{}", self.tcp_server.0),
format!("{}", self.tcp_server.1),
format!("{}", c.point_id),
format!("{}", c.originator_address),
format!("{}", c.common_address),
format!("{}", c.common_address_field_length),
format!("{}", c.cot_field_length),
format!("{}", c.ioa_field_length),
format!("{}", c.max_time_no_ack_received),
format!("{}", c.max_time_no_ack_sent),
format!("{}", c.max_idle_time),
format!("{}", c.max_unconfirmed_apdus_sent),
format!("{}", c.max_unconfirmed_apdus_received),
format!("{};{};{}", c.is_control_with_time, c.direct_yk, c.direct_yt).to_uppercase(),
"".to_string(),
//format!("{}", c.is_client).to_uppercase(),
"TRUE".to_string(),
"".to_string(),
"".to_string(),
"".to_string(),
];
if let Some(call_time) = c.call_time {
content[15] = call_time.to_string()
};
if let Some(call_counter_time) = c.call_counter_time {
content[17] = call_counter_time.to_string()
};
if self.yx_data_type != 0 {
content[18] = self.yx_data_type.to_string();
}
if self.yc_data_type != 0 {
content[19] = self.yc_data_type.to_string();
}
let mut result = format!(
"{},{},{},{},{},{},{}\n",
text_map.get("tp_name").unwrap_or(&"Transport Name".to_string()),
get_csv_str(&self.name),
text_map.get("index").unwrap_or(&"Index".to_string()),
text_map.get("addr_monitor_direction").unwrap_or(&"Address of Monitoring Direction Information".to_string()),
text_map.get("point_id_short").unwrap_or(&"Status Point".to_string()),
text_map.get("is_telesignaling").unwrap_or(&"Is Telesignaling".to_string()),
text_map.get("addr_control_direction").unwrap_or(&"Address of Control Direction Information".to_string()),
);
for i in 0_usize..20_usize {
if p.len() > i {
let yx_status = format!("{}", p[i].is_yx).to_uppercase();
result += &format!(
"{},{},{},{},{},{}",
title[i],
content[i],
i + 1,
p[i].ioa,
p[i].point_id,
yx_status
);
if let Some(addr) = p[i].control_ioa {
result += &format!(",{}\n", addr);
} else {
result += ", \n";
}
} else {
result += &format!("{},{},,,,,\n", title[i], content[i]);
}
}
if p.len() > 20 {
let mut index = 20_usize;
while index < p.len() {
let yx_status = format!("{}", p[index].is_yx).to_uppercase();
result += &format!(
",,{},{},{},{}",
index + 1,
p[index].ioa,
p[index].point_id,
yx_status
);
if let Some(addr) = p[index].control_ioa {
result += &format!(",{}\n", addr);
} else {
result += ", \n";
}
index += 1;
}
}
result
}
pub fn get_point_ids(&self) -> Vec<u64> {
let size = self.connection.data_configure.len() + 1;
let mut r: Vec<u64> = Vec::with_capacity(size);
for rd in &self.connection.data_configure {
r.push(rd.point_id)
}
if self.connection.point_id != UNKNOWN_POINT_ID {
r.push(self.connection.point_id);
}
r
}
}
impl Iec104ServerTp {
pub fn from_file(path: &str) -> Result<Self, (usize, usize)> {
let content = std::fs::read(path).map_err(|_| (0, 0))?;
// let content = if env::IS_ENCRYPT {
// decrypt_vec(content.as_slice())
// } else {
// content
// };
let csv_bytes = if path.ends_with(".xlsx") || path.ends_with(".xls") {
let r = excel_bytes_to_csv_bytes(content.as_slice()).unwrap_or_default();
if r.is_empty() {
return Err((0, 0));
}
r[0].clone()
} else {
content
};
Self::from_csv_bytes(csv_bytes.as_slice())
}
pub fn from_csv(path: &str) -> Result<Iec104ServerTp, (usize, usize)> {
let content = std::fs::read(path).map_err(|_| (0, 0))?;
// let content = if env::IS_ENCRYPT {
// let content = decrypt_vec(content.as_slice());
// content
// } else {
// content
// };
Iec104ServerTp::from_csv_bytes(content.as_slice())
}
pub fn from_csv_bytes(content: &[u8]) -> Result<Iec104ServerTp, (usize, usize)> {
let content_new = transfer_to_utf8(content.to_vec()).map_err(|_| (0, 0))?;
let content = content_new.as_slice();
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.from_reader(content);
// 1. 解析服务端的配置
let mut records = rdr.records();
let rc = (0usize, 1);
let name = csv_string(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let rc = (1usize, 1);
let mut conn_num =
csv_usize(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let rc = (2usize, 1);
let tcp_server_port =
csv_u16(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let rc = (3usize, 1);
let yx_data_type = csv_u8(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let rc = (4usize, 1);
let yc_data_type = csv_u8(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let rc = (5usize, 1);
let s = csv_string(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let is_client = s.as_str() == "TRUE";
if is_client {
conn_num = 1; //如果是客户端从站,只保留一个通道连接
}
// 2. 解析连接的信息
let mut connections: Vec<(String, Iec104Connection)> = Vec::with_capacity(conn_num);
let offset = 8;
for i in 0..conn_num {
let mut tp = Iec104ClientTp::from_csv_records(content, i * offset + 3)?;
tp.connection.is_client = is_client; // 确保104d的各个连接的client/server属性保持一致,且完全遵从通道配置
let (client_ip, client_port) = tp.tcp_server;
if client_ip != "+" {
let rc = (2usize, i * offset + 4);
client_ip.parse::<std::net::Ipv4Addr>().map_err(|_| rc)?;
}
// 检查具有相同ip的client是否配置一样
for (key, conn) in &connections {
// 这里连接名字和通道名称是一样的
if *key == format!("{}/{}/{}", client_ip, client_port, conn.name) {
// 地址配置必须一样
if *conn.data_configure != tp.connection.data_configure {
return Err((0, i * offset + 3));
}
}
}
let port_str = client_port.to_string();
let port = if client_port as u32 == UNKNOWN_TCP_PORT {
"+"
} else {
port_str.as_str()
};
// 利用point_id简化多个client配置,测点号1-100预留,不允许设置
if tp.connection.point_id > 1
&& tp.connection.point_id <= DEFAULT_TCP_CLIENT_LIMIT as u64 {
let count = tp.connection.point_id;
for i in 1..count {
let key = format!("{}/{}/{}@{}", client_ip, port, tp.name, i);
let mut connection = tp.connection.clone();
connection.point_id = UNKNOWN_POINT_ID; // 多个通道共用一个配置,状态点号无需设置
connections.push((key, connection));
}
let key = format!("{}/{}/{}@{}", client_ip, port, tp.name, count);
tp.connection.point_id = UNKNOWN_POINT_ID;
connections.push((key, tp.connection))
} else {
let key = format!("{}/{}/{}", client_ip, port, tp.name);
connections.push((key, tp.connection))
}
}
Ok(Iec104ServerTp {
id: 0,
name,
tcp_server_port,
yx_data_type,
yc_data_type,
connections,
})
}
pub fn export_csv(&self, text_map: &HashMap<String, String>) -> String {
let mut len_conn = 0;
let mut unkown_ip_map: HashMap<u32, usize> = HashMap::new();
for (s, _) in &self.connections {
if s.starts_with("+/") {
let info: Vec<&str> = s.split('/').collect();
if info.len() != 3 {
continue;
}
let port = if info[1] == "+" {
UNKNOWN_TCP_PORT
} else {
info[1].parse::<u32>().unwrap_or(UNKNOWN_TCP_PORT)
};
if let Some(multi_count) = unkown_ip_map.get_mut(&port) {
*multi_count += 1;
continue;
} else {
unkown_ip_map.insert(port, 1);
len_conn += 1;
}
} else {
len_conn += 1;
}
}
// 第一排
let mut result = format!("{},{},,",
text_map.get("tp_name").unwrap_or(&"Transport Name".to_string()),
get_csv_str(&self.name));
let mut i = 0;
let mut multi_found = false;
for (s, conn) in &self.connections {
result += &format!(
"{},{},{},{},{},{},{}",
text_map.get("conn_name").unwrap_or(&"Connection Name".to_string()),
get_csv_str(&conn.name),
text_map.get("index").unwrap_or(&"Index".to_string()),
text_map.get("addr_monitor_direction").unwrap_or(&"Address of Monitoring Direction Information".to_string()),
text_map.get("status_point").unwrap_or(&"Status Point".to_string()),
text_map.get("is_telesignaling").unwrap_or(&"Is Telesignaling".to_string()),
text_map.get("addr_control_direction").unwrap_or(&"Address of Control Direction Information".to_string()),
);
if i != len_conn - 1 {
result += ",,";
} else {
break;
}
if s.starts_with("+/") {
if multi_found {
continue;
} else {
multi_found = true;
i += 1;
}
} else {
i += 1;
}
}
result += "\n";
// 第二至十九排
let title_conn = vec![
text_map.get("point_number").unwrap_or(&"Point Count".to_string()).clone(),
text_map.get("client_ip").unwrap_or(&"Client IP".to_string()).clone(),
text_map.get("client_port").unwrap_or(&"Client Port".to_string()).clone(),
text_map.get("tp_point_id").unwrap_or(&"Point ID".to_string()).clone(),
text_map.get("originator_addr").unwrap_or(&"Originator Address".to_string()).clone(),
text_map.get("common_addr").unwrap_or(&"Common Address".to_string()).clone(),
text_map.get("common_addr_filed_len").unwrap_or(&"Common Address Field Length".to_string()).clone(),
text_map.get("cot_field_len").unwrap_or(&"Cot Field Length".to_string()).clone(),
text_map.get("ioa_field_len").unwrap_or(&"Ioa Field Length".to_string()).clone(),
text_map.get("t1_ms").unwrap_or(&"T1 (ms)".to_string()).clone(),
text_map.get("t2_ms").unwrap_or(&"T2 (ms)".to_string()).clone(),
text_map.get("t3_ms").unwrap_or(&"T3 (ms)".to_string()).clone(),
text_map.get("iec104_k").unwrap_or(&"k".to_string()).clone(),
text_map.get("iec104_w").unwrap_or(&"w".to_string()).clone(),
text_map.get("is_control_with_time").unwrap_or(&"Is Control With Time;Direct yk;Direct yt".to_string()).clone(),
text_map.get("call_time_ms").unwrap_or(&"Call Time (ms)".to_string()).clone(),
text_map.get("is_client").unwrap_or(&"is Client".to_string()).clone(),
text_map.get("call_counter_time").unwrap_or(&"Call Counter Time (ms)".to_string()).clone(),
];
let title_tp = vec![
format!(
"{},{}",
text_map.get("conn_num").unwrap_or(&"Connection Count".to_string()),
len_conn
),
format!(
"{},{}",
text_map.get("server_port").unwrap_or(&"Server Port".to_string()),
self.tcp_server_port
),
format!(
"{},{}",
text_map.get("telesignaling_type").unwrap_or(&"Telesignaling Type".to_string()),
self.yx_data_type
),
format!(
"{},{}",
text_map.get("telemetering_type").unwrap_or(&"Telemetering Type".to_string()),
self.yc_data_type
),
format!("{},FALSE", text_map.get("is_client").unwrap_or(&"Is Client".to_string())),
text_map.get("type_id_name").unwrap_or(&"Type ID,Type Name".to_string()).clone(),
IEC104D_INFO[0].to_string(),
IEC104D_INFO[1].to_string(),
IEC104D_INFO[2].to_string(),
IEC104D_INFO[3].to_string(),
IEC104D_INFO[4].to_string(),
IEC104D_INFO[5].to_string(),
IEC104D_INFO[6].to_string(),
IEC104D_INFO[7].to_string(),
IEC104D_INFO[8].to_string(),
IEC104D_INFO[9].to_string(),
IEC104D_INFO[10].to_string(),
IEC104D_INFO[11].to_string(),
];
for cnt in 0..18 {
result += &title_tp[cnt];
let mut i = 0;
let mut multi_found = HashSet::with_capacity(unkown_ip_map.len());
for conn in &self.connections {
if conn.0.starts_with("+/") {
let info: Vec<&str> = conn.0.split('/').collect();
if info.len() != 3 {
continue;
}
let port = if info[1] == "+" {
UNKNOWN_TCP_PORT
} else {
info[1].parse::<u32>().unwrap_or(UNKNOWN_TCP_PORT)
};
if multi_found.contains(&port) {
continue;
} else {
multi_found.insert(port);
i += 1;
}
} else {
i += 1;
}
if conn.1.data_configure.len() > cnt {
let p = &conn.1.data_configure[cnt];
let content_conn = if cnt == 3 && conn.0.starts_with("+/") {
let info: Vec<&str> = conn.0.split('/').collect();
if info.len() != 3 {
continue;
}
let port = if info[1] == "+" {
UNKNOWN_TCP_PORT
} else {
info[1].parse::<u32>().unwrap_or(UNKNOWN_TCP_PORT)
};
unkown_ip_map.get(&port).unwrap_or(&0).to_string()
} else {
Self::get_iec104d_conn_csv(conn, cnt)
};
result += &format!(
",,{},{},{},{},{},{}",
title_conn[cnt],
content_conn,
cnt + 1,
p.ioa,
p.point_id,
p.is_yx.to_string().to_uppercase()
);
if let Some(addr) = p.control_ioa {
result += &format!(",{}", addr);
} else {
result += ",";
}
} else {
let content_conn = Self::get_iec104d_conn_csv(conn, cnt);
result += &format!(",,{},{},,,,,", title_conn[cnt], content_conn);
}
if i == len_conn {
break;
}
}
result += "\n";
}
// 剩余的
let mut max_data_len = if self.connections.is_empty() {
0
} else {
self.connections[0].1.data_configure.len()
};
for c in &self.connections {
if c.1.data_configure.len() > max_data_len {
max_data_len = c.1.data_configure.len();
}
}
for row in 18..max_data_len {
if row < 27 {
result += &format!("{},", IEC104D_INFO[row - 6]);
} else {
// 如果Data Type输出完了但测点寄存器还有
result += ",,";
}
let mut i = 0;
let mut multi_found = HashSet::with_capacity(unkown_ip_map.len());
for (s, conn) in &self.connections {
if s.starts_with("+/") {
let info: Vec<&str> = s.split('/').collect();
if info.len() != 3 {
continue;
}
let port = if info[1] == "+" {
UNKNOWN_TCP_PORT
} else {
info[1].parse::<u32>().unwrap_or(UNKNOWN_TCP_PORT)
};
if multi_found.contains(&port) {
continue;
} else {
multi_found.insert(port);
i += 1;
}
} else {
i += 1;
}
if conn.data_configure.len() > row {
let p = &conn.data_configure[row];
result += &format!(
",,,{},{},{},{}",
row + 1,
p.ioa,
p.point_id,
p.is_yx.to_string().to_uppercase()
);
if let Some(addr) = p.control_ioa {
result += &format!(",{}", addr);
} else {
result += ",";
}
} else {
result += ",,,,,";
}
if i != len_conn {
result += ",";
} else {
break;
}
}
result += "\n";
}
let row = usize::max(max_data_len, 19);
if (8..28).contains(&row) {
//如果测点寄存器输出完了但Data Type还有
// 8个逗号重复n遍
let comma = ",,,,,,,,".repeat(len_conn);
for row in row..28 {
result += &format!("{}{}\n", IEC104D_INFO[row - 8], comma);
}
}
result
}
fn get_iec104d_conn_csv(conn: &(String, Iec104Connection), index: usize) -> String {
return match index {
0 => conn.1.data_configure.len().to_string(),
1 => {
let info: Vec<&str> = conn.0.split('/').collect();
if info.len() != 3 {
return "".to_string();
}
info[0].to_string()
}
2 => {
let info: Vec<&str> = conn.0.split('/').collect();
if info.len() != 3 {
return "".to_string();
}
if info[1] == "+" {
UNKNOWN_TCP_PORT.to_string()
} else {
info[1].to_string()
}
}
3 => conn.1.point_id.to_string(),
4 => conn.1.originator_address.to_string(),
5 => conn.1.common_address.to_string(),
6 => conn.1.common_address_field_length.to_string(),
7 => conn.1.cot_field_length.to_string(),
8 => conn.1.ioa_field_length.to_string(),
9 => conn.1.max_time_no_ack_received.to_string(),
10 => conn.1.max_time_no_ack_sent.to_string(),
11 => conn.1.max_idle_time.to_string(),
12 => conn.1.max_unconfirmed_apdus_sent.to_string(),
13 => conn.1.max_unconfirmed_apdus_received.to_string(),
14 => format!("{};{};{}", conn.1.is_control_with_time, conn.1.direct_yk, conn.1.direct_yt).to_uppercase(),
15 => {
if let Some(t) = conn.1.call_time {
t.to_string()
} else {
"".to_string()
}
}
16 => conn.1.is_client.to_string().to_uppercase(),
17 => {
if let Some(t) = conn.1.call_counter_time {
t.to_string()
} else {
"".to_string()
}
}
_ => "unknown".to_string(),
};
}
pub fn get_point_ids(&self) -> Vec<u64> {
let mut size = 0;
for (_, conn) in &self.connections {
size += conn.data_configure.len()
}
size += self.connections.len();
let mut r = HashSet::with_capacity(size);
for (_, conn) in &self.connections {
if conn.point_id != UNKNOWN_POINT_ID {
r.insert(conn.point_id);
}
for rd in &conn.data_configure {
r.insert(rd.point_id);
}
}
r.into_iter().collect()
}
}
impl Iec104Connection {
pub fn create_data_config(&mut self) -> Result<(),(usize, usize, String)> {
let size = self.data_configure.len();
let mut point_id_to_ioa: HashMap<u64, u32> = HashMap::with_capacity(size);
// key:寄存器地址,value:setting中vec<RegisterData>的位置
let mut ioa_to_pos: HashMap<u32, u16> = HashMap::with_capacity(size);
let mut control_ioa_to_measure_ioa: HashMap<u32, u32> = HashMap::with_capacity(size);
for (index, rd) in self.data_configure.iter().enumerate() {
// 测点号重复
if point_id_to_ioa.contains_key(&rd.point_id) {
let tip = format!("Invalid register point (id :{}):\nThe point ID is already existed", rd.point_id);
return Err((index + 1, 4, tip)); // 测点号的位置
}
point_id_to_ioa.insert(rd.point_id, rd.ioa);
// 起始地址重复
if ioa_to_pos.contains_key(&rd.ioa) {
let tip = format!("Invalid register point (ioa :{}):\nThe ioa is already existed", rd.ioa);
return Err((index + 1, 3, tip)); // 监视地址的位置
}
ioa_to_pos.insert(rd.ioa, index.try_into().unwrap());
// 有控制点的情况下:判断控制点地址是否重复,控制点地址与测量点地址是否重复
if let Some(control_ioa) = &rd.control_ioa {
if control_ioa_to_measure_ioa.contains_key(control_ioa) {
let tip = format!("Invalid register point (control ioa :{}):\nThe control ioa is already existed", control_ioa);
return Err((index + 1, 6, tip)); // 控制地址的位置
}
if ioa_to_pos.contains_key(control_ioa) {
let tip = format!("Invalid register point (control ioa :{}):\nThe control ioa and another ioa are repeated", control_ioa);
return Err((index + 1, 6, tip)); // 监视地址的位置
}
control_ioa_to_measure_ioa.insert(*control_ioa, rd.ioa);
}
}
self.point_id_to_ioa = point_id_to_ioa;
self.ioa_to_pos = ioa_to_pos;
Ok(())
}
}
impl Iec104Point {
fn parse_register_data(
record: &StringRecord,
row: usize,
first_col: usize,
) -> Result<Self, (usize, usize)> {
//let start: usize = 3 + offset;
let rc = (row, first_col);
let ioa = csv_u32(record, rc.1).ok_or(rc)?;
// 对应的测点Id
let rc = (row, first_col + 1);
let point_id = csv_u64(record, rc.1).ok_or(rc)?;
let rc = (row, first_col + 2);
let s = csv_string(record, rc.1).ok_or(rc)?.trim().to_uppercase();
let is_yx = s.as_str() == "TRUE";
let rc = (row, first_col + 3);
let s = csv_str(record, rc.1).ok_or(rc)?;
let control_ioa = if s.is_empty() {
None
} else {
Some(s.parse::<u32>().map_err(|_| rc)?)
};
Ok(Iec104Point {
ioa,
point_id,
is_yx,
control_ioa,
})
}
}
\ No newline at end of file
extern crate core;
use std::{fmt, io};
use std::collections::{BTreeMap, HashMap};
use std::fmt::Display;
use std::str::FromStr;
use std::fmt;
use std::marker::PhantomData;
use byteorder::{BigEndian, ByteOrder};
// use bytes::BytesMut;
use csv::{Reader, StringRecord};
// use protobuf::CodedInputStream;
// use protobuf::error::WireError;
use protobuf::{EnumFull, EnumOrUnknown};
use serde::{Deserialize, Serialize};
use protobuf::EnumFull;
use protobuf::EnumOrUnknown;
use eig_expr::Expr;
use eig_expr::Token;
pub use crate::prop::*;
use crate::excel::{excel_bytes_to_csv_bytes, transfer_to_utf8};
use crate::dlt645::Dlt645ClientTp;
use crate::ethercat::EcMasterTp;
pub use crate::hymqtt::{HYMqttTransport, HYPointInfo};
pub use crate::iec104::{Iec104ClientTp, Iec104ServerTp, Iec104Connection, Iec104Point};
use crate::memory::{MemoryPosixTp, MemorySystemVTp};
pub use crate::modbus::{ModbusRtuClientTp, ModbusTcpClientTp, ModbusRtuServerTp, ModbusTcpServerTp, MbConnection, RegisterType};
pub use crate::mqtt::MqttTransport;
pub use crate::proto::eig::*;
pub use crate::proto::eig::pb_alarm_define::AlarmLevel as PbAlarmDefine_AlarmLevel;
pub use crate::proto::eig::pb_eig_alarm::AlarmType as PbEigAlarm_AlarmType;
pub use crate::proto::eig::pb_eig_alarm::AlarmStatus as PbEigAlarm_AlarmStatus;
pub use crate::proto::eig::pb_set_point_result::SetPointStatus as PbSetPointResult_SetPointStatus;
pub use crate::proto::eig::pb_file::FileOperation as PbFile_FileOperation;
pub use crate::proto::eig::pb_request::RequestType as PbRequest_RequestType;
use crate::prop::DataUnit;
pub mod excel;
pub mod hymqtt;
pub mod iec104;
pub mod dlt645;
pub mod modbus;
pub mod mqtt;
pub mod ethercat;
pub mod memory;
pub mod prop;
pub mod proto;
pub mod topics;
pub mod web;
pub const UNKNOWN_TCP_PORT: u32 = 9999;
pub const UNKNOWN_POINT_ID: u64 = 0;
pub const MINIMUM_POINT_ID: u64 = 100001;
// 增加aoe id最小值的限制,这是为了避免在注册监听量测变化的时候id,从文件加载的通道id是系统自动生成的,从1开始
// aoe和transport都会监听,而各自的id是监听注册时的id,发生冲突的时候,早注册的那个会收不到量测变化通知
// user id type is u16, so aoe id will not conflict with user id
pub const MINIMUM_AOE_ID: u64 = u16::MAX as u64 + 1;
pub const DEFAULT_TCP_CLIENT_LIMIT: u8 = 100;
pub const MAX_POLLING_PERIOD: u64 = 1_000_000_000;
pub const DATA_INFO: [&str; 38] = [
"DATA TYPE,NOTE",
"Binary,bool",
"OneByteIntSigned,byte",
"OneByteIntSignedLower,byte",
"OneByteIntSignedUpper,byte",
"OneByteIntUnsigned,byte",
"OneByteIntUnsignedLower,byte",
"OneByteIntUnsignedUpper,byte",
"TwoByteIntUnsigned,u16",
"TwoByteIntSigned,i16",
"TwoByteIntSignedSwapped,",
"TwoByteBcd,",
"TwoByteIntUnsignedSwapped,",
"FourByteIntUnsigned,",
"FourByteIntSigned,u32",
"FourByteIntUnsignedSwapped,",
"FourByteIntSignedSwapped,",
"FourByteIntUnsignedSwappedSwapped,",
"FourByteIntSignedSwappedSwapped,",
"FourByteFloat,",
"FourByteFloatSwapped,",
"FourByteBcd,",
"FourByteBcdSwapped,",
"FourByteMod10k,",
"FourByteMod10kSwapped,",
"SixByteMod10k,",
"SixByteMod10kSwapped,",
"EightByteIntUnsigned,",
"EightByteIntSigned,",
"EightByteIntUnsignedSwapped,",
"EightByteIntSignedSwapped,",
"EightByteIntUnsignedSwappedSwapped,",
"EightByteIntSignedSwappedSwapped,",
"EightByteFloat,",
"EightByteFloatSwapped,",
"EightByteFloatSwappedSwapped,",
"EightByteMod10kSwapped,",
"EightByteMod10k,",
];
// pub const IEC104D_INFO: [&str; 21] = ["1, 单点遥信", "3,双点遥信", "5,步位置遥信", "30,单点遥信(带时标)", "31,双点遥信(带时标)",
// "32,步位置遥信(带时标)", "7,32比特串遥测", "9,规一化遥测值", "11,标度化遥测值", "13,短浮点遥测值", "15,累计量遥测", "33,32比特串遥测(带时标)",
// "34,规一化遥测值(带时标)", "35,标度化遥测值(带时标)", "36,短浮点遥测值(带时标)", "37,累计量遥测(带时标)", "20,成组单点遥信",
// "21,规一化遥测值", "38,继电保护装置事件", "39,继电保护装置成组启动事件", "40, 继电保护装置成组出口信息"];
pub const IEC104D_INFO: [&str; 21] = [
"1,Single Command",
"3,Double Command",
"5,Step Command",
"30,Single Command (with time)",
"31,Double Command (with time)",
"32,Step Command (with time)",
"7,32-Bitstring Command",
"9,Normalized Command Value",
"11,Scaled Command Value",
"13,Short Float Command Value",
"15,Cumulant Command Value",
"33,32-Bitstring Command (with time)",
"34,Normalized Command Value (with time)",
"35,Scaled Command Value (with time)",
"36,Short Float Command Value (with time)",
"37,Cumulant Command Value(with time)",
"20,Group Single Command",
"21,Normalized Command Value",
"38,Relay Protection Device Event",
"39,Group Startup of Relay Protection Device Event",
"40, Group Export Information of Relay Protection Device",
];
/// public api
/**
* @api {枚举_通道类型} /TransportType TransportType
* @apiGroup A_Enum
* @apiSuccess {String} ModbusTcpClient ModbusTcp客户端
* @apiSuccess {String} ModbusTcpServer ModbusTcp服务端
* @apiSuccess {String} ModbusRtuClient ModbusRtu客户端
* @apiSuccess {String} ModbusRtuServer ModbusRtu服务端
* @apiSuccess {String} DLT645Client DLT645客户端
* @apiSuccess {String} Mqtt Mqtt
* @apiSuccess {String} Iec104Client Iec104客户端
* @apiSuccess {String} Iec104Server Iec104服务端
* @apiSuccess {String} HYMqtt HYMqtt
* @apiSuccess {String} Unknown 未知
*/
#[derive(Serialize, Deserialize, Copy, Debug, Clone, PartialEq)]
pub enum TransportType {
ModbusTcpClient = 1,
ModbusTcpServer,
ModbusRtuClient,
ModbusRtuServer,
// dlt
DLT645Client,
// mqtt
Mqtt,
// iec 104
Iec104Client = 11,
Iec104Server,
// 融合终端mqtt
HYMqtt,
// etherCAT
EtherCAT,
// memory
MemoryPosix,
MemorySystemV,
Unknown = 100,
}
impl Display for TransportType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl TransportType {
pub fn to_header(&self) -> String {
match self {
TransportType::ModbusTcpClient => String::from("tcp-mbc"),
TransportType::ModbusTcpServer => String::from("tcp-mbd"),
TransportType::ModbusRtuClient => String::from("rtu-mbc"),
TransportType::ModbusRtuServer => String::from("rtu-mbd"),
TransportType::DLT645Client => String::from("dlt645"),
TransportType::Mqtt => String::from("mqtt"),
TransportType::Iec104Client => String::from("iec104c"),
TransportType::Iec104Server => String::from("iec104d"),
TransportType::EtherCAT => String::from("ethercat"),
TransportType::MemoryPosix => String::from("posix-memory"),
TransportType::MemorySystemV => String::from("symtemv-memory"),
_ => String::from("unknown"),
}
}
}
impl From<&str> for TransportType {
fn from(value: &str) -> Self {
match value {
"ModbusTcpClient" => TransportType::ModbusTcpClient,
"ModbusTcpServer" => TransportType::ModbusTcpServer,
"ModbusRtuClient" => TransportType::ModbusRtuClient,
"ModbusRtuServer" => TransportType::ModbusRtuServer,
"DLT645Client" => TransportType::DLT645Client,
"Mqtt" => TransportType::Mqtt,
"Iec104Client" => TransportType::Iec104Client,
"Iec104Server" => TransportType::Iec104Server,
"EtherCAT" => TransportType::EtherCAT,
"MemoryPosix" => TransportType::MemoryPosix,
"MemorySystemV" => TransportType::MemorySystemV,
_ => TransportType::Unknown,
}
}
}
impl From<String> for TransportType {
fn from(value: String) -> Self {
TransportType::from(value.as_str())
}
}
/**
* @api {枚举_通道对象} /Transport Transport
* @apiPrivate
* @apiGroup A_Enum
* @apiSuccess {Object} MbcTcp {"MbcTcp": ModbusTcpClientTp}
* @apiSuccess {Object} MbdTcp {"MbdTcp": ModbusTcpServerTp}
* @apiSuccess {Object} MbcRtu {"MbcRtu": ModbusRtuClientTp}
* @apiSuccess {Object} MbdRtu {"MbdRtu": ModbusRtuServerTp}
* @apiSuccess {Object} DLT645c {"DLT645c": Dlt645ClientTp}
* @apiSuccess {Object} Mqtt {"Mqtt": MqttTransport}
* @apiSuccess {Object} Iec104c {"Iec104c": Iec104ClientTp}
* @apiSuccess {Object} Iec104d {"Iec104d": Iec104ServerTp}
* @apiSuccess {Object} HYMqtt {"HYMqtt": HYMqttTransport}
*/
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub enum Transport {
MbcTcp(ModbusTcpClientTp),
MbdTcp(ModbusTcpServerTp),
MbcRtu(ModbusRtuClientTp),
MbdRtu(ModbusRtuServerTp),
DLT645c(Dlt645ClientTp),
Mqtt(MqttTransport),
Iec104c(Iec104ClientTp),
Iec104d(Iec104ServerTp),
HYMqtt(HYMqttTransport),
EtherCAT(EcMasterTp),
MemoryPosix(MemoryPosixTp),
MemorySystemV(MemorySystemVTp),
}
impl Transport {
pub fn id(&self) -> u64 {
match self {
Transport::MbcTcp(t) => t.id,
Transport::MbdTcp(t) => t.id,
Transport::MbcRtu(t) => t.id,
Transport::MbdRtu(t) => t.id,
Transport::DLT645c(t) => t.id,
Transport::Mqtt(t) => t.id,
Transport::Iec104c(t) => t.id,
Transport::Iec104d(t) => t.id,
Transport::HYMqtt(t) => t.id,
Transport::EtherCAT(t) => t.id,
Transport::MemoryPosix(t) => t.id,
Transport::MemorySystemV(t) => t.id,
}
}
pub fn name(&self) -> String {
match self {
Transport::MbcTcp(t) => t.name.clone(),
Transport::MbdTcp(t) => t.name.clone(),
Transport::MbcRtu(t) => t.name.clone(),
Transport::MbdRtu(t) => t.name.clone(),
Transport::DLT645c(t) => t.name.clone(),
Transport::Mqtt(t) => t.name.clone(),
Transport::Iec104c(t) => t.name.clone(),
Transport::Iec104d(t) => t.name.clone(),
Transport::HYMqtt(t) => t.name.clone(),
Transport::EtherCAT(t) => t.name.clone(),
Transport::MemoryPosix(t) => t.name.clone(),
Transport::MemorySystemV(t) => t.name.clone(),
}
}
pub fn get_type(&self) -> TransportType {
match self {
Transport::MbcTcp(_) => TransportType::ModbusTcpClient,
Transport::MbdTcp(_) => TransportType::ModbusTcpServer,
Transport::MbcRtu(_) => TransportType::ModbusRtuClient,
Transport::MbdRtu(_) => TransportType::ModbusRtuServer,
Transport::DLT645c(_) => TransportType::DLT645Client,
Transport::Mqtt(_) => TransportType::Mqtt,
Transport::Iec104c(_) => TransportType::Iec104Client,
Transport::Iec104d(_) => TransportType::Iec104Server,
Transport::HYMqtt(_) => TransportType::HYMqtt,
Transport::EtherCAT(_) => TransportType::EtherCAT,
Transport::MemoryPosix(_) => TransportType::MemoryPosix,
Transport::MemorySystemV(_) => TransportType::MemorySystemV,
}
}
pub fn set_id(&mut self, id: u64) {
match self {
Transport::MbcTcp(t) => t.id = id,
Transport::MbdTcp(t) => t.id = id,
Transport::MbcRtu(t) => t.id = id,
Transport::MbdRtu(t) => t.id = id,
Transport::DLT645c(t) => t.id = id,
Transport::Mqtt(t) => t.id = id,
Transport::Iec104c(t) => t.id = id,
Transport::Iec104d(t) => t.id = id,
Transport::HYMqtt(t) => t.id = id,
Transport::EtherCAT(t) => t.id = id,
Transport::MemoryPosix(t) => t.id = id,
Transport::MemorySystemV(t) => t.id = id,
}
}
pub fn set_name(&mut self, name: String) {
match self {
Transport::MbcTcp(t) => t.name = name,
Transport::MbdTcp(t) => t.name = name,
Transport::MbcRtu(t) => t.name = name,
Transport::MbdRtu(t) => t.name = name,
Transport::DLT645c(t) => t.name = name,
Transport::Mqtt(t) => t.name = name,
Transport::Iec104c(t) => t.name = name,
Transport::Iec104d(t) => t.name = name,
Transport::HYMqtt(t) => t.name = name,
Transport::EtherCAT(t) => t.name = name,
Transport::MemoryPosix(t) => t.name = name,
Transport::MemorySystemV(t) => t.name = name,
}
}
pub fn get_point_ids(&self) -> Vec<u64> {
match self {
Transport::MbcTcp(t) => t.get_point_ids(),
Transport::MbdTcp(t) => t.get_point_ids(),
Transport::MbcRtu(t) => t.get_point_ids(),
Transport::MbdRtu(t) => t.get_point_ids(),
Transport::DLT645c(t) => t.get_point_ids(),
Transport::Mqtt(t) => t.get_point_ids(),
Transport::Iec104c(t) => t.get_point_ids(),
Transport::Iec104d(t) => t.get_point_ids(),
Transport::HYMqtt(t) => t.get_point_ids(),
Transport::EtherCAT(t) => t.get_point_ids(),
Transport::MemoryPosix(t) => t.get_point_ids(),
Transport::MemorySystemV(t) => t.get_point_ids(),
}
}
pub fn get_connection_count(&self) -> usize {
match self {
Transport::MbcTcp(t) => t.connections.len(),
Transport::MbdTcp(t) => t.connections.len(),
Transport::MbcRtu(t) => t.connections.len(),
Transport::DLT645c(t) => t.connections.len(),
Transport::Iec104d(t) => t.connections.len(),
Transport::EtherCAT(t) => t.connections.len(),
Transport::MemoryPosix(t) => t.connections.len(),
_ => 1,
}
}
pub fn is_remote(&self) -> bool {
match self {
Transport::MbcTcp(_) => true,
Transport::MbdTcp(_) => false,
Transport::MbcRtu(_) => true,
Transport::MbdRtu(_) => false,
Transport::DLT645c(_) => true,
Transport::Mqtt(t) => !t.is_transfer,
Transport::Iec104c(_) => true,
Transport::Iec104d(_) => false,
Transport::HYMqtt(_) => true,
Transport::EtherCAT(_) => true,
Transport::MemoryPosix(t) => !t.is_transfer,
Transport::MemorySystemV(t) => !t.is_transfer,
}
}
pub fn get_file_name(&self) -> String {
let id = self.id();
match self {
Transport::MbcTcp(_) => format!("tcp-mbc-{}.csv", id),
Transport::MbdTcp(_) => format!("tcp-mbd-{}.csv", id),
Transport::MbcRtu(_) => format!("rtu-mbc-{}.csv", id),
Transport::MbdRtu(_) => format!("rut-mbd-{}.csv", id),
Transport::DLT645c(_) => format!("dlt645-{}.csv", id),
Transport::Mqtt(_) => format!("mqtt-{}.csv", id),
Transport::Iec104c(_) => format!("iec104c-{}.csv", id),
Transport::Iec104d(_) => format!("iec104d-{}.csv", id),
Transport::HYMqtt(_) => format!("ttu-{}.csv", id),
Transport::EtherCAT(_) => format!("ethercat-{}.csv", id),
Transport::MemoryPosix(_) => format!("posix-memory-{}.csv", id),
Transport::MemorySystemV(_) => format!("systemv-memory-{}.csv", id),
}
}
pub fn from_bytes(file_name: &str, file_content: &[u8]) -> Vec<Result<Self, (usize, usize)>> {
let mut result_vec = Vec::new();
let file_name = file_name.to_lowercase();
let content_vec = if file_name.ends_with(".xlsx") || file_name.ends_with(".xls") || file_name.is_empty() {
excel_bytes_to_csv_bytes(file_content).unwrap_or_default()
} else if file_name.ends_with(".csv") {
vec![file_content.to_owned()]
} else {
return vec![Err((0, 0))];
};
for content in content_vec {
let tp_result = if file_name.contains("xa-mbc") || file_name.contains("encap-mbc") {
match ModbusTcpClientTp::from_csv_bytes(content.as_slice()) {
Ok(t) => {
Ok(Transport::MbcTcp(t))
}
Err((r, c)) => Err((r + 1, c + 1))
}
} else if file_name.contains("tcp-mbc") {
match ModbusTcpClientTp::from_csv_bytes2(content.as_slice()) {
Ok(t) => {
Ok(Transport::MbcTcp(t))
}
Err((r, c)) => Err((r + 1, c + 1))
}
} else if file_name.contains("tcp-mbd") {
match ModbusTcpServerTp::from_csv_bytes(content.as_slice()) {
Ok(t) => {
Ok(Transport::MbdTcp(t))
}
Err((r, c)) => Err((r + 1, c + 1))
}
} else if file_name.contains("rtu-mbc") {
match ModbusRtuClientTp::from_csv_bytes(content.as_slice()) {
Ok(t) => {
Ok(Transport::MbcRtu(t))
}
Err((r, c)) => Err((r + 1, c + 1))
}
} else if file_name.contains("rtu-mbd") {
match ModbusRtuServerTp::from_csv_bytes(content.as_slice()) {
Ok(t) => {
Ok(Transport::MbdRtu(t))
}
Err((r, c)) => Err((r + 1, c + 1))
}
} else if file_name.contains("dlt645") {
match Dlt645ClientTp::from_csv_bytes(content.as_slice()) {
Ok(t) => {
Ok(Transport::DLT645c(t))
}
Err((r, c)) => Err((r + 1, c + 1))
}
} else if file_name.contains("mqtt") {
match MqttTransport::from_csv_bytes(content.as_slice()) {
Ok(t) => {
Ok(Transport::Mqtt(t))
}
Err((r, c)) => Err((r + 1, c + 1))
}
} else if file_name.contains("iec104c") {
match Iec104ClientTp::from_csv_bytes(content.as_slice()) {
Ok(t) => {
Ok(Transport::Iec104c(t))
}
Err((r, c)) => Err((r + 1, c + 1))
}
} else if file_name.contains("iec104d") {
match Iec104ServerTp::from_csv_bytes(content.as_slice()) {
Ok(t) => {
Ok(Transport::Iec104d(t))
}
Err((r, c)) => Err((r + 1, c + 1))
}
} else if file_name.contains("ttu") {
match HYMqttTransport::from_csv_bytes(content.as_slice()) {
Ok(t) => {
Ok(Transport::HYMqtt(t))
}
Err((r, c)) => Err((r + 1, c + 1))
}
} else if file_name.contains("ethercat") {
match EcMasterTp::from_csv_bytes(content.as_slice()) {
Ok(t) => {
Ok(Transport::EtherCAT(t))
}
Err((r, c)) => Err((r + 1, c + 1))
}
} else if file_name.contains("posix-memory") {
match MemoryPosixTp::from_csv_bytes(content.as_slice()) {
Ok(t) => {
Ok(Transport::MemoryPosix(t))
}
Err((r, c)) => Err((r + 1, c + 1))
}
} else if file_name.contains("systemv-memory") {
match MemorySystemVTp::from_csv_bytes(content.as_slice()) {
Ok(t) => {
Ok(Transport::MemorySystemV(t))
}
Err((r, c)) => Err((r + 1, c + 1))
}
} else {
Err((0, 0))
};
result_vec.push(tp_result);
}
result_vec
}
pub fn from_file(path: &str) -> Vec<Result<Self, (usize, usize)>> {
let f_result = std::fs::read(path);
if f_result.is_err() {
return vec![Err((0, 0))];
}
let content = f_result.unwrap();
// let content = if env::IS_ENCRYPT {
// decrypt_vec(content.as_slice())
// } else {
// content
// };
Self::from_bytes(path, content.as_slice())
}
pub fn export_csv(&self, text_map: &HashMap<String, String>) -> String {
match self {
Transport::MbcTcp(t) => t.export_csv(text_map),
Transport::MbdTcp(t) => t.export_csv(text_map),
Transport::MbcRtu(t) => t.export_csv(text_map),
Transport::MbdRtu(t) => t.export_csv(text_map),
Transport::DLT645c(t) => t.export_csv(text_map),
Transport::Mqtt(t) => t.export_csv(text_map),
Transport::Iec104c(t) => t.export_csv(text_map),
Transport::Iec104d(t) => t.export_csv(text_map),
Transport::EtherCAT(t) => t.export_csv(text_map),
Transport::MemoryPosix(t) => t.export_csv(text_map),
_ => "".to_string(),
}
}
}
/**
* @api {Measurement} /Measurement Measurement
* @apiPrivate
......@@ -633,309 +106,6 @@ pub struct MeasureValue {
pub transformed_discrete: i64,
}
/**
* @api {SerialParity} /SerialParity SerialParity
* @apiPrivate
* @apiGroup A_Enum
* @apiSuccess {String} None None
* @apiSuccess {String} Odd Odd
* @apiSuccess {String} Even Even
* @apiSuccess {String} Mark Mark
* @apiSuccess {String} Space Space
*/
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
pub enum SerialParity {
#[default]
None = 0,
Odd = 1,
Even = 2,
Mark = 3,
Space = 4,
}
/**
* @api {串口通道参数} /SerialPara SerialPara
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {String} file_path file_path
* @apiSuccess {u32} baud_rate baud_rate
* @apiSuccess {u8} data_bits data_bits
* @apiSuccess {u8} stop_bits stop_bits
* @apiSuccess {SerialParity} parity parity
* @apiSuccess {u64} delay_between_requests delay_between_requests
*/
/// 串口通道
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct SerialPara {
pub file_path: String,
pub baud_rate: u32,
pub data_bits: u8,
pub stop_bits: u8,
pub parity: SerialParity,
pub delay_between_requests: u64,
}
impl Default for SerialPara {
fn default() -> Self {
SerialPara {
file_path: Default::default(),
baud_rate: 9600,
data_bits: 8,
stop_bits: 1,
parity: Default::default(),
delay_between_requests: Default::default(),
}
}
}
/**
* @api {整型指令数据} /SetIntValue SetIntValue
* @apiGroup A_Object
* @apiSuccess {u64} sender_id sender_id
* @apiSuccess {u64} point_id point_id
* @apiSuccess {i64} yk_command yk_command
* @apiSuccess {u64} timestamp timestamp
*/
/// 指令数据
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct SetIntValue {
pub sender_id: u64,
pub point_id: u64,
pub yk_command: i64,
pub timestamp: u64,
}
/**
* @api {整型指令数据} /SetIntValue2 SetIntValue2
* @apiGroup A_Object
* @apiSuccess {u64} sender_id sender_id
* @apiSuccess {u64} point_alias point_alias
* @apiSuccess {i64} yk_command yk_command
* @apiSuccess {u64} timestamp timestamp
*/
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct SetIntValue2 {
pub sender_id: u64,
pub point_alias: String,
pub yk_command: i64,
pub timestamp: u64,
}
/**
* @api {浮点型指令数据} /SetFloatValue SetFloatValue
* @apiGroup A_Object
* @apiSuccess {u64} sender_id sender_id
* @apiSuccess {u64} point_id point_id
* @apiSuccess {f64} yt_command yt_command
* @apiSuccess {u64} timestamp timestamp
*/
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct SetFloatValue {
pub sender_id: u64,
pub point_id: u64,
pub yt_command: f64,
pub timestamp: u64,
}
/**
* @api {浮点型指令数据} /SetFloatValue2 SetFloatValue2
* @apiGroup A_Object
* @apiSuccess {u64} sender_id sender_id
* @apiSuccess {str} point_alias point_alias
* @apiSuccess {f64} yt_command yt_command
* @apiSuccess {u64} timestamp timestamp
*/
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct SetFloatValue2 {
pub sender_id: u64,
pub point_alias: String,
pub yt_command: f64,
pub timestamp: u64,
}
/**
* @api {公式型指令数据} /SetPointValue SetPointValue
* @apiGroup A_Object
* @apiSuccess {u64} sender_id sender_id
* @apiSuccess {u64} point_id point_id
* @apiSuccess {expr} command command
* @apiSuccess {u64} timestamp timestamp
*/
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct SetPointValue {
pub sender_id: u64,
pub point_id: u64,
pub command: Expr,
pub timestamp: u64,
}
#[repr(i8)]
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
pub enum SetPointStatus {
YkCreated = 0,
YtCreated,
YkSuccess,
YtSuccess,
YkFailTimeout,
YtFailTimeout,
YkFailTooBusy,
YtFailTooBusy,
YkFailProtocol,
YtFailProtocol,
}
pub fn is_yk(r: &PbSetPointResult) -> bool {
matches!(
r.status(),
PbSetPointResult_SetPointStatus::YkCreated
| PbSetPointResult_SetPointStatus::YkSuccess
| PbSetPointResult_SetPointStatus::YkFailTimeout
| PbSetPointResult_SetPointStatus::YkFailTooBusy
| PbSetPointResult_SetPointStatus::YkFailProtocol
)
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
pub struct SetPointResult {
pub sender_id: u64,
pub point_id: u64,
pub create_time: u64,
pub finish_time: u64,
pub command: u64,
pub status: SetPointStatus,
}
impl SetPointResult {
pub fn is_yk(&self) -> bool {
matches!(
self.status,
SetPointStatus::YkCreated
| SetPointStatus::YkFailTimeout
| SetPointStatus::YkFailTooBusy
| SetPointStatus::YkFailProtocol
| SetPointStatus::YkSuccess
)
}
pub fn get_float_command(&self) -> f64 {
byteorder::BigEndian::read_f64(&self.command.to_be_bytes())
}
pub fn get_int_command(&self) -> i64 {
byteorder::BigEndian::read_i64(&self.command.to_be_bytes())
}
pub fn create_pb_result(&self) -> PbSetPointResult {
let mut result = PbSetPointResult::new();
result.set_sender_id(self.sender_id);
result.set_command(self.command);
result.set_create_time(self.create_time);
result.set_finish_time(self.finish_time);
result.set_point_id(self.point_id);
match &self.status {
SetPointStatus::YkCreated => {
result.set_status(PbSetPointResult_SetPointStatus::YkCreated)
}
SetPointStatus::YtCreated => {
result.set_status(PbSetPointResult_SetPointStatus::YtCreated)
}
SetPointStatus::YkSuccess => {
result.set_status(PbSetPointResult_SetPointStatus::YkSuccess)
}
SetPointStatus::YtSuccess => {
result.set_status(PbSetPointResult_SetPointStatus::YtSuccess)
}
SetPointStatus::YkFailTimeout => {
result.set_status(PbSetPointResult_SetPointStatus::YkFailTimeout)
}
SetPointStatus::YtFailTimeout => {
result.set_status(PbSetPointResult_SetPointStatus::YtFailTimeout)
}
SetPointStatus::YkFailTooBusy => {
result.set_status(PbSetPointResult_SetPointStatus::YkFailTooBusy)
}
SetPointStatus::YtFailTooBusy => {
result.set_status(PbSetPointResult_SetPointStatus::YtFailTooBusy)
}
SetPointStatus::YkFailProtocol => {
result.set_status(PbSetPointResult_SetPointStatus::YkFailProtocol)
}
SetPointStatus::YtFailProtocol => {
result.set_status(PbSetPointResult_SetPointStatus::YtFailProtocol)
}
}
result
}
pub fn from(r: PbSetPointResult) -> Self {
let status = match r.status() {
PbSetPointResult_SetPointStatus::YkCreated => SetPointStatus::YkCreated,
PbSetPointResult_SetPointStatus::YtCreated => SetPointStatus::YtCreated,
PbSetPointResult_SetPointStatus::YkSuccess => SetPointStatus::YkSuccess,
PbSetPointResult_SetPointStatus::YtSuccess => SetPointStatus::YtSuccess,
PbSetPointResult_SetPointStatus::YkFailTimeout => SetPointStatus::YkFailTimeout,
PbSetPointResult_SetPointStatus::YtFailTimeout => SetPointStatus::YtFailTimeout,
PbSetPointResult_SetPointStatus::YkFailTooBusy => SetPointStatus::YkFailTooBusy,
PbSetPointResult_SetPointStatus::YtFailTooBusy => SetPointStatus::YtFailTooBusy,
PbSetPointResult_SetPointStatus::YkFailProtocol => SetPointStatus::YkFailProtocol,
PbSetPointResult_SetPointStatus::YtFailProtocol => SetPointStatus::YtFailProtocol,
};
Self {
sender_id: r.sender_id(),
point_id: r.point_id(),
create_time: r.create_time(),
finish_time: r.finish_time(),
command: r.command(),
status,
}
}
}
impl Display for Measurement {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
//point_id, point_name, dev_id, is_discrete, is_computing_point,
//expression, trans_expression,inv_trans_expression,change_expression,zero_expression
//data_unit, upper_limit, lower_limit, max_gradient, min_gradient
//is_realtime,is_soe,init_value,
let init_v = if self.is_discrete {
get_i64_str(self.get_init_discrete())
} else {
get_f64_str(self.get_init_analog())
};
let upper_limit = if self.upper_limit == f64::MAX {
"".to_string()
} else {
self.upper_limit.to_string()
};
let lower_limit = if self.lower_limit == f64::MIN {
"".to_string()
} else {
self.lower_limit.to_string()
};
let str = format!(
"{},{},{},{},{},{},{},{},{},{},{},{:?},{:?},{},{},{},{},{}",
self.point_id,
get_csv_str(&self.point_name),
get_csv_str(&self.alias_id),
if self.is_discrete { "TRUE" } else { "FALSE" },
if self.is_computing_point { "TRUE" } else { "FALSE" },
get_csv_str(&self.expression),
get_csv_str(&self.trans_expr),
get_csv_str(&self.inv_trans_expr),
get_csv_str(&self.change_expr),
get_csv_str(&self.zero_expr),
self.data_unit,
upper_limit,
lower_limit,
get_csv_str(&self.alarm_level1_expr),
get_csv_str(&self.alarm_level2_expr),
if self.is_realtime { "TRUE" } else { "FALSE" },
if self.is_soe { "TRUE" } else { "FALSE" },
init_v,
);
write!(f, "{}", str)
}
}
impl MeasureValue {
......@@ -1070,682 +240,6 @@ impl MeasureValue {
}
}
impl SetIntValue {
/// 生成设定指令
pub fn from_bool(sender_id: u64, point_id: u64, b: bool, timestamp: u64) -> SetIntValue {
let discrete_value = if b { 1 } else { 0 };
SetIntValue {
sender_id,
point_id,
yk_command: discrete_value,
timestamp,
}
}
}
pub fn from_csv_to_map<R: io::Read>(
mut rdr: Reader<R>,
start_row: usize,
map: &mut HashMap<u64, Measurement>,
id_check: bool,
) -> Result<Vec<(String, Vec<u64>)>, (usize, usize)> {
let mut records = rdr.records();
let offset: usize = 1;
let mut row: usize = start_row;
let mut assist_map: BTreeMap<String, Vec<u64>> = BTreeMap::new();
while let Some(Ok(record)) = records.next() {
let rc = (row, offset);
let point_id = csv_u64(&record, rc.1).ok_or(rc)?;
if id_check && point_id < MINIMUM_POINT_ID {
return Err(rc);
}
let rc = (row, offset + 1);
let point_name = csv_string(&record, rc.1).ok_or(rc)?;
let rc = (row, offset + 2);
// check alias_id
let mut alias_id = csv_string(&record, rc.1).ok_or(rc)?;
if !alias_id.is_empty() {
if let Ok(alias_expr) = alias_id.parse::<Expr>() {
for token in &alias_expr.rpn {
match token {
Token::Var(n) => alias_id = n.clone(),
_ => return Err(rc),
}
}
} else {
return Err(rc);
}
}
let rc = (row, offset + 3);
let s = csv_str(&record, rc.1).ok_or(rc)?;
let is_discrete = match s.to_uppercase().as_str() {
"FALSE" => false,
"TRUE" => true,
_ => false,
};
let rc = (row, offset + 4);
let s = csv_str(&record, rc.1).ok_or(rc)?;
let is_computing_point = match s.to_uppercase().as_str() {
"FALSE" => false,
"TRUE" => true,
_ => false,
};
let rc = (row, offset + 5);
let expression = csv_string(&record, rc.1).ok_or(rc)?;
if !expression.is_empty() {
expression.parse::<Expr>().map_err(|_| rc)?;
}
let rc = (row, offset + 6);
let trans_expr = csv_string(&record, rc.1).ok_or(rc)?;
if !trans_expr.is_empty() {
trans_expr.parse::<Expr>().map_err(|_| rc)?;
}
let rc = (row, offset + 7);
let inv_trans_expr = csv_string(&record, rc.1).ok_or(rc)?;
if !inv_trans_expr.is_empty() {
inv_trans_expr.parse::<Expr>().map_err(|_| rc)?;
}
let rc = (row, offset + 8);
let change_expr = csv_string(&record, rc.1).ok_or(rc)?;
if !change_expr.is_empty() {
change_expr.parse::<Expr>().map_err(|_| rc)?;
}
let rc = (row, offset + 9);
let zero_expr = csv_string(&record, rc.1).ok_or(rc)?;
if !zero_expr.is_empty() {
zero_expr.parse::<Expr>().map_err(|_| rc)?;
}
let rc = (row, offset + 10);
let data_unit = csv_string(&record, rc.1).ok_or(rc)?;
let unit = DataUnit::from_str(&data_unit).or(Err(rc))?;
let rc = (row, offset + 11);
let upper_limit_str = csv_string(&record, rc.1).ok_or(rc)?;
let mut upper_limit = f64::MAX;
if !upper_limit_str.is_empty() {
upper_limit = upper_limit_str.parse::<f64>().map_err(|_| rc)?;
}
let rc = (row, offset + 12);
let lower_limit_str = csv_string(&record, rc.1).ok_or(rc)?;
let mut lower_limit = f64::MIN;
if !lower_limit_str.is_empty() {
lower_limit = lower_limit_str.parse::<f64>().map_err(|_| rc)?;
}
if lower_limit > upper_limit {
return Err(rc);
}
let rc = (row, offset + 13);
let alarm_level1_expr = csv_string(&record, rc.1).ok_or(rc)?;
let alarm_level1 = if !alarm_level1_expr.is_empty() {
Some(alarm_level1_expr.parse::<Expr>().map_err(|_| rc)?)
} else {
None
};
let rc = (row, offset + 14);
let alarm_level2_expr = csv_string(&record, rc.1).ok_or(rc)?;
let alarm_level2 = if !alarm_level2_expr.is_empty() {
Some(alarm_level2_expr.parse::<Expr>().map_err(|_| rc)?)
} else {
None
};
let rc = (row, offset + 15);
let s = csv_str(&record, rc.1).ok_or(rc)?;
let is_realtime = match s.to_uppercase().as_str() {
"FALSE" => false,
"TRUE" => true,
_ => false,
};
let rc = (row, offset + 16);
let s = csv_str(&record, rc.1).ok_or(rc)?;
let is_soe = match s.to_uppercase().as_str() {
"FALSE" => false,
"TRUE" => true,
_ => false,
};
let init_value = if is_discrete {
let rc = (row, offset + 17);
let v = csv_i64(&record, rc.1).ok_or(rc)?;
let bytes = v.to_be_bytes();
BigEndian::read_u64(&bytes)
} else {
let rc = (row, offset + 17);
let v = csv_f64(&record, rc.1).ok_or(rc)?;
let bytes = v.to_be_bytes();
BigEndian::read_u64(&bytes)
};
// tags
if let Some(tags) = csv_str(&record, offset + 18) {
let tag_vec: Vec<&str> = tags.split(';').collect();
for tag in tag_vec {
let tag = tag.trim();
if tag.is_empty() {
continue;
}
if let Some(v) = assist_map.get_mut(tag) {
v.push(point_id);
} else {
assist_map.insert(tag.to_string(), vec![point_id]);
}
}
}
// desc
let desc = if let Some(desc) = csv_string(&record, offset + 19) {
desc
} else {
"".to_string()
};
map.insert(
point_id,
Measurement {
point_id,
point_name,
alias_id,
is_discrete,
is_computing_point,
expression,
trans_expr,
inv_trans_expr,
change_expr,
zero_expr,
data_unit,
upper_limit,
lower_limit,
alarm_level1_expr,
alarm_level1,
alarm_level2_expr,
alarm_level2,
is_realtime,
is_soe,
init_value,
unit,
desc,
is_remote: false,
},
);
row += 1;
}
let mut result = Vec::with_capacity(assist_map.len());
for (k, v) in assist_map {
result.push((k, v));
}
Ok(result)
}
pub fn from_file(path: &str) -> Result<(HashMap<u64, Measurement>, Vec<(String, Vec<u64>)>), (usize, usize)> {
let content = std::fs::read(path).map_err(|_| (0, 0))?;
// let content = if env::IS_ENCRYPT {
// decrypt_vec(content.as_slice())
// } else {
// content
// };
let csv_bytes = if path.ends_with(".xlsx") || path.ends_with(".xls") {
let r = excel_bytes_to_csv_bytes(content.as_slice()).unwrap_or_default();
if r.is_empty() {
return Err((0, 0));
}
r[0].clone()
} else {
content
};
from_csv_bytes(csv_bytes.as_slice(), true)
}
pub fn from_file2(path: &str, id_check: bool) -> Result<(HashMap<u64, Measurement>, Vec<(String, Vec<u64>)>), (usize, usize)> {
let content = std::fs::read(path).map_err(|_| (0, 0))?;
// let content = if env::IS_ENCRYPT {
// decrypt_vec(content.as_slice())
// } else {
// content
// };
let csv_bytes = if path.ends_with(".xlsx") || path.ends_with(".xls") {
let r = excel_bytes_to_csv_bytes(content.as_slice()).unwrap_or_default();
if r.is_empty() {
return Err((0, 0));
}
r[0].clone()
} else {
content
};
from_csv_bytes2(csv_bytes.as_slice(), true, id_check)
}
pub fn from_csv(path: &str) -> Result<(HashMap<u64, Measurement>, Vec<(String, Vec<u64>)>), (usize, usize)> {
let content = std::fs::read(path).map_err(|_| (0, 0))?;
// if env::IS_ENCRYPT {
// let content = decrypt_vec(content.as_slice());
// from_csv_bytes(content.as_slice(), true)
// } else {
// from_csv_bytes(content.as_slice(), true)
// }
from_csv_bytes(content.as_slice(), true)
}
pub fn from_csv2(path: &str, id_check: bool) -> Result<(HashMap<u64, Measurement>, Vec<(String, Vec<u64>)>), (usize, usize)> {
let content = std::fs::read(path).map_err(|_| (0, 0))?;
// if env::IS_ENCRYPT {
// let content = decrypt_vec(content.as_slice());
// from_csv_bytes2(content.as_slice(), true, id_check)
// } else {
// from_csv_bytes2(content.as_slice(), true, id_check)
// }
from_csv_bytes2(content.as_slice(), true, id_check)
}
pub fn from_csv_bytes(
content: &[u8],
has_headers: bool,
) -> Result<(HashMap<u64, Measurement>, Vec<(String, Vec<u64>)>), (usize, usize)> {
let content_new = transfer_to_utf8(content.to_vec()).map_err(|_| (0, 0))?;
let content = content_new.as_slice();
let rdr = csv::ReaderBuilder::new()
.has_headers(has_headers)
.from_reader(content);
let start_row = if has_headers { 1 } else { 0 };
let mut map: HashMap<u64, Measurement> = HashMap::new();
let tags = from_csv_to_map(rdr, start_row, &mut map, true)?;
map.shrink_to_fit();
Ok((map, tags))
}
pub fn from_csv_bytes2(
content: &[u8],
has_headers: bool,
id_check: bool,
) -> Result<(HashMap<u64, Measurement>, Vec<(String, Vec<u64>)>), (usize, usize)> {
let content_new = transfer_to_utf8(content.to_vec()).map_err(|_| (0, 0))?;
let content = content_new.as_slice();
let rdr = csv::ReaderBuilder::new()
.has_headers(has_headers)
.from_reader(content);
let start_row = if has_headers { 1 } else { 0 };
let mut map: HashMap<u64, Measurement> = HashMap::new();
let tags = from_csv_to_map(rdr, start_row, &mut map, id_check)?;
map.shrink_to_fit();
Ok((map, tags))
}
pub fn init_discrete_point(point_id: u64, init_v: i64) -> Measurement {
let bytes = init_v.to_be_bytes();
let init_value = BigEndian::read_u64(&bytes);
Measurement {
point_id,
point_name: "".to_string(),
alias_id: "".to_string(),
is_discrete: true,
is_computing_point: false,
expression: "".to_string(),
trans_expr: "".to_string(),
inv_trans_expr: "".to_string(),
change_expr: "".to_string(),
zero_expr: "".to_string(),
data_unit: "".to_string(),
unit: DataUnit::UnitOne,
upper_limit: f64::MAX,
lower_limit: f64::MIN,
alarm_level1_expr: "".to_string(),
alarm_level1: None,
alarm_level2_expr: "".to_string(),
alarm_level2: None,
is_realtime: false,
is_soe: false,
is_remote: false,
init_value,
desc: "".to_string(),
}
}
pub fn init_analog_point(point_id: u64, init_v: f64) -> Measurement {
let bytes = init_v.to_be_bytes();
let init_value = BigEndian::read_u64(&bytes);
Measurement {
point_id,
point_name: "".to_string(),
alias_id: "".to_string(),
is_discrete: false,
is_computing_point: false,
expression: "".to_string(),
trans_expr: "".to_string(),
inv_trans_expr: "".to_string(),
change_expr: "".to_string(),
zero_expr: "".to_string(),
data_unit: "".to_string(),
unit: DataUnit::UnitOne,
upper_limit: f64::MAX,
lower_limit: f64::MIN,
alarm_level1_expr: "".to_string(),
alarm_level1: None,
alarm_level2_expr: "".to_string(),
alarm_level2: None,
is_realtime: false,
is_soe: false,
is_remote: false,
init_value,
desc: "".to_string(),
}
}
pub fn init_computing_point(point_id: u64, expr: &str, is_discrete: bool) -> Measurement {
Measurement {
point_id,
point_name: "".to_string(),
alias_id: "".to_string(),
is_discrete,
is_computing_point: true,
expression: expr.to_string(),
trans_expr: "".to_string(),
inv_trans_expr: "".to_string(),
change_expr: "".to_string(),
zero_expr: "".to_string(),
data_unit: "".to_string(),
unit: DataUnit::UnitOne,
upper_limit: f64::MAX,
lower_limit: f64::MIN,
alarm_level1_expr: "".to_string(),
alarm_level1: None,
alarm_level2_expr: "".to_string(),
alarm_level2: None,
is_realtime: false,
is_soe: false,
is_remote: false,
init_value: 0,
desc: "".to_string(),
}
}
impl Measurement {
pub fn get_init_discrete(&self) -> i64 {
let buf = self.init_value.to_be_bytes();
BigEndian::read_i64(&buf)
}
pub fn get_init_analog(&self) -> f64 {
let buf = self.init_value.to_be_bytes();
BigEndian::read_f64(&buf)
}
}
pub fn csv_str(record: &StringRecord, col: usize) -> Option<&str> {
Some(record.get(col)?.trim())
}
pub fn csv_string(record: &StringRecord, col: usize) -> Option<String> {
Some(record.get(col)?.trim().to_string())
}
pub fn csv_usize(record: &StringRecord, col: usize) -> Option<usize> {
let s = record.get(col)?.to_string();
let r = s.parse().ok()?;
Some(r)
}
pub fn csv_u8(record: &StringRecord, col: usize) -> Option<u8> {
let s = record.get(col)?.trim();
let r = if s.starts_with("0x") {
u8::from_str_radix(s.trim_start_matches("0x"), 16).ok()?
} else {
s.parse().ok()?
};
Some(r)
}
pub fn csv_u16(record: &StringRecord, col: usize) -> Option<u16> {
let s = record.get(col)?.trim();
let r = if s.starts_with("0x") {
u16::from_str_radix(s.trim_start_matches("0x"), 16).ok()?
} else {
s.parse().ok()?
};
Some(r)
}
pub fn csv_u32(record: &StringRecord, col: usize) -> Option<u32> {
let s = record.get(col)?.trim();
let r = if s.starts_with("0x") {
u32::from_str_radix(s.trim_start_matches("0x"), 16).ok()?
} else {
s.parse().ok()?
};
Some(r)
}
pub fn csv_u64(record: &StringRecord, col: usize) -> Option<u64> {
let s = record.get(col)?.trim();
let r = if s.starts_with("0x") {
u64::from_str_radix(s.trim_start_matches("0x"), 16).ok()?
} else {
s.parse().ok()?
};
Some(r)
}
pub fn csv_i8(record: &StringRecord, col: usize) -> Option<i8> {
let s = record.get(col)?.trim();
let r = if s.starts_with("0x") {
i8::from_str_radix(s.trim_start_matches("0x"), 16).ok()?
} else {
s.parse().ok()?
};
Some(r)
}
pub fn csv_i16(record: &StringRecord, col: usize) -> Option<i16> {
let s = record.get(col)?.trim();
let r = if s.starts_with("0x") {
i16::from_str_radix(s.trim_start_matches("0x"), 16).ok()?
} else {
s.parse().ok()?
};
Some(r)
}
pub fn csv_i32(record: &StringRecord, col: usize) -> Option<i32> {
let s = record.get(col)?.trim();
let r = if s.starts_with("0x") {
i32::from_str_radix(s.trim_start_matches("0x"), 16).ok()?
} else {
s.parse().ok()?
};
Some(r)
}
pub fn csv_i64(record: &StringRecord, col: usize) -> Option<i64> {
let s = record.get(col)?.trim();
let r = if s.starts_with("0x") {
i64::from_str_radix(s.trim_start_matches("0x"), 16).ok()?
} else {
s.parse().ok()?
};
Some(r)
}
pub fn csv_f64(record: &StringRecord, col: usize) -> Option<f64> {
let s = record.get(col)?.trim();
let r = s.parse().ok()?;
Some(r)
}
pub fn csv_f32(record: &StringRecord, col: usize) -> Option<f32> {
let s = record.get(col)?.trim();
let r = s.parse().ok()?;
Some(r)
}
#[derive(Debug, Clone, PartialEq)]
pub enum DataUnitError {
UnknownDataUnit(String),
}
pub fn create_parity(v: &str) -> SerialParity {
if v.to_uppercase() == "NONE" {
SerialParity::None
} else if v.to_uppercase() == "ODD" {
SerialParity::Odd
} else if v.to_uppercase() == "EVEN" {
SerialParity::Even
} else if v.to_uppercase() == "MARK" {
SerialParity::Mark
} else if v.to_uppercase() == "SPACE" {
SerialParity::Space
} else {
SerialParity::None
}
}
pub fn export_points_header(text_map: &HashMap<String, String>) -> String {
format!(
"{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}",
text_map.get("index").unwrap_or(&"Index".to_string()),
text_map.get("point_id").unwrap_or(&"ID".to_string()),
text_map.get("name").unwrap_or(&"Name".to_string()),
text_map.get("alias").unwrap_or(&"Alias".to_string()),
text_map.get("is_discrete").unwrap_or(&"Is Discrete".to_string()),
text_map.get("is_computing").unwrap_or(&"Is Computing".to_string()),
text_map.get("point_expression").unwrap_or(&"Expression".to_string()),
text_map.get("trans_expr").unwrap_or(&"Trans Expr".to_string()),
text_map.get("inv_trans_expr").unwrap_or(&"Inv Trans Expr".to_string()),
text_map.get("change_expr").unwrap_or(&"Change Expr".to_string()),
text_map.get("zero_expr").unwrap_or(&"Zero Expr".to_string()),
text_map.get("unit").unwrap_or(&"Unit".to_string()),
text_map.get("upper_limit").unwrap_or(&"Upper Limit".to_string()),
text_map.get("lower_limit").unwrap_or(&"Lower Limit".to_string()),
text_map.get("alarm_level1").unwrap_or(&"Alarm 1".to_string()),
text_map.get("alarm_level2").unwrap_or(&"Alarm 2".to_string()),
text_map.get("is_realtime").unwrap_or(&"Is Realtime".to_string()),
text_map.get("is_soe").unwrap_or(&"Is SOE".to_string()),
text_map.get("point_init").unwrap_or(&"Initial".to_string()),
text_map.get("tags").unwrap_or(&"Tags".to_string()),
text_map.get("desc").unwrap_or(&"Description".to_string()),
)
}
pub fn export_points_csv(points: &[Measurement], tags_map: &HashMap<u64, String>,
text_map: &HashMap<String, String>) -> String {
let mut points_csv = export_points_header(text_map);
if !points.is_empty() {
points_csv.push('\n');
}
for i in 0..points.len() {
points_csv.push_str((i + 1).to_string().as_str());
points_csv.push(',');
points_csv += &points[i].to_string();
points_csv.push(',');
if let Some(s) = tags_map.get(&points[i].point_id) {
points_csv += &get_csv_str(s.as_str());
}
points_csv.push(',');
points_csv += &get_csv_str(&points[i].desc);
if i != points.len() - 1 {
points_csv += "\n";
}
}
points_csv
}
pub fn parse_set_points_csv( content: &[u8],
has_headers: bool,
points: &[Measurement],
) -> Result<(Vec<SetIntValue>,Vec<SetFloatValue>), (usize, usize)> {
let mut rdr = csv::ReaderBuilder::new()
.has_headers(has_headers)
.from_reader(content);
let mut alias_to_pos = HashMap::new();
let mut id_to_pos = HashMap::with_capacity(points.len());
for (i, point) in points.iter().enumerate() {
alias_to_pos.insert(point.alias_id.clone(), i);
id_to_pos.insert(point.point_id, i);
}
let start_row = if has_headers { 1 } else { 0 };
let mut records = rdr.records();
let offset: usize = 1;
let mut row: usize = start_row;
let mut set_int_values = Vec::new();
let mut set_float_values = Vec::new();
while let Some(Ok(record)) = records.next() {
let rc = (row, offset);
let index = if let Some(point_id) = csv_u64(&record, rc.1) {
*id_to_pos.get(&point_id).ok_or(rc)?
} else {
let alias = csv_str(&record, rc.1).ok_or(rc)?;
*alias_to_pos.get(alias).ok_or(rc)?
};
let point_id= points[index].point_id;
if points[index].is_discrete {
let rc = (row, offset + 1);
let v = csv_i64(&record, rc.1).ok_or(rc)?;
set_int_values.push(SetIntValue {
sender_id: 0,
point_id,
yk_command: v,
timestamp: 0,
});
} else {
let rc = (row, offset + 1);
let v = csv_f64(&record, rc.1).ok_or(rc)?;
set_float_values.push(SetFloatValue {
sender_id: 0,
point_id,
yt_command: v,
timestamp: 0,
});
}
row += 1;
}
Ok((set_int_values, set_float_values))
}
pub fn get_csv_str(s : &str) -> String {
if s.contains(',') || s.contains('\n') || s.contains('"')
|| s.starts_with(' ') || s.ends_with(' ') {
format!("\"{}\"", s.replace('\"', "\"\""))
} else {
s.to_string()
}
}
//至少保留1位小数,且最多8位有效数字
pub fn get_f64_str(num: f64) -> String {
let length = 8;
let num_integer = count_integer_places(num);
let num_decimal = count_decimal_places(num);
if (num_integer > length - 1)
|| ((num != 0.0) && (num.abs() < 0.1f64.powi((length - 1) as i32))) {
format!("{:.dec$e}", num, dec = length - 1)
} else {
let remain = length - num_integer;
format!("{:.dec$}", num, dec = 1.max(num_decimal.min(remain)))
}
}
/// 限制显示数值的长度,离散值
pub fn get_i64_str(num: i64) -> String {
if num.abs() > 1000000i64 {
format!("{:.6e}", num)
} else {
format!("{}", num)
}
}
/// 计算整数位数
fn count_integer_places(num: f64) -> usize {
let remainder = num.abs() as i64;
remainder.to_string().len()
}
/// 计算小数位数, 17 means it > 17
fn count_decimal_places(num: f64) -> usize {
let tmp = format!("{:?}", num);
if tmp.contains('e') {
return 17; // magic number, 做成常量比较好
};
if tmp.contains('.') {
tmp.split('.').collect::<Vec<&str>>()[1].len()
} else {
0
}
}
fn serialize_enum_or_unknown<E: EnumFull, S: serde::Serializer>(
e: &Option<EnumOrUnknown<E>>,
s: S,
......@@ -1760,6 +254,41 @@ fn serialize_enum_or_unknown<E: EnumFull, S: serde::Serializer>(
}
}
/**
* @api {整型指令数据} /SetIntValue SetIntValue
* @apiGroup A_Object
* @apiSuccess {u64} sender_id sender_id
* @apiSuccess {u64} point_id point_id
* @apiSuccess {i64} yk_command yk_command
* @apiSuccess {u64} timestamp timestamp
*/
/// 指令数据
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct SetIntValue {
pub sender_id: u64,
pub point_id: u64,
pub yk_command: i64,
pub timestamp: u64,
}
/**
* @api {浮点型指令数据} /SetFloatValue SetFloatValue
* @apiGroup A_Object
* @apiSuccess {u64} sender_id sender_id
* @apiSuccess {u64} point_id point_id
* @apiSuccess {f64} yt_command yt_command
* @apiSuccess {u64} timestamp timestamp
*/
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct SetFloatValue {
pub sender_id: u64,
pub point_id: u64,
pub yt_command: f64,
pub timestamp: u64,
}
fn deserialize_enum_or_unknown<'de, E: EnumFull, D: serde::Deserializer<'de>>(
d: D,
) -> Result<Option<EnumOrUnknown<E>>, D::Error> {
......@@ -1801,4 +330,4 @@ fn deserialize_enum_or_unknown<'de, E: EnumFull, D: serde::Deserializer<'de>>(
}
d.deserialize_any(DeserializeEnumVisitor(PhantomData))
}
\ No newline at end of file
}
use std::collections::{BTreeMap, HashMap, HashSet};
use std::path::Path;
use std::str::FromStr;
use csv::StringRecord;
use serde::{Deserialize, Serialize};
use crate::{csv_i32, csv_str, csv_string, csv_u64, csv_usize, MAX_POLLING_PERIOD};
use crate::excel::{excel_bytes_to_csv_bytes, transfer_to_utf8};
use crate::prop::DataType;
const DEFAULT_POLLING_PERIOD_IN_MILLI: u64 = 5000;
#[derive(Serialize, Deserialize, PartialEq, Debug, Default, Clone, Copy)]
pub enum MemLock {
#[default]
None,
Mutex(usize),
Semaphore,
}
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct MemConnection {
pub name: String,
pub base_addr: usize,
// 取决于计算机位数,如果溢出,应该报错。
pub total_size: Option<usize>,
pub point_to_pos: HashMap<u64, usize>,
pub data: Vec<MemData>,
pub default_polling_period_in_milli: u64,
/// key:寄存器地址,value:setting中vec<MemData>的位置
pub mem_addr_to_pos: HashMap<usize, usize>,
/// 轮询周期不同的数据, key is period in milli, value is position.
pub polling_period_to_data: BTreeMap<u64, Vec<usize>>,
pub lock_method: MemLock,
}
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct MemoryPosixTp {
pub id: u64,
pub name: String,
pub is_transfer: bool,
/// 通道名称
pub path: Option<String>,
pub connections: Vec<MemConnection>,
}
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone, Default)]
pub struct MemorySystemVTp {
pub id: u64,
pub name: String,
pub is_transfer: bool,
/// 通道名称
pub path: String,
pub identifier: i32,
pub connection: MemConnection,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct MemData {
pub is_writable: bool,
pub from: usize,
// 数据类型
pub data_type: DataType,
// 对应的测点Id
pub point_id: u64,
pub polling_period_in_milli: u64,
}
impl Default for MemConnection {
fn default() -> Self {
MemConnection {
name: "new".to_string(),
base_addr: 0,
total_size: None,
point_to_pos: Default::default(),
data: vec![],
default_polling_period_in_milli: DEFAULT_POLLING_PERIOD_IN_MILLI,
mem_addr_to_pos: Default::default(),
polling_period_to_data: Default::default(),
lock_method: MemLock::None,
}
}
}
impl MemConnection {
pub fn read_config(&self) -> HashMap<usize, MemData> {
let mut r = HashMap::with_capacity(self.data.len());
for pdi in &self.data {
if !pdi.is_writable {
r.insert(pdi.from, pdi.clone());
}
}
r.shrink_to_fit();
r
}
pub fn write_config(&self) -> HashMap<usize, MemData> {
let mut r = HashMap::new();
for pdi in &self.data {
if pdi.is_writable {
r.insert(pdi.from, pdi.clone());
}
}
r.shrink_to_fit();
r
}
fn from_csv_records_posix(
content: &[u8],
offset: usize,
) -> Result<Self, (usize, usize)> {
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.from_reader(content);
let mut records = rdr.records();
// 1th line
let rc = (0usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let name = csv_string(&record, rc.1).ok_or(rc)?;
// 2th line
let rc = (1usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let point_num = csv_usize(&record, rc.1).ok_or(rc)?;
// 3th line
let rc = (2usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let base_addr = csv_usize(&record, rc.1).ok_or(rc)?;
// 4th line
let rc = (3usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let total_size_tmp = csv_string(&record, rc.1).ok_or(rc)?;
let total_size = if total_size_tmp.is_empty() {
None
} else {
Some(total_size_tmp.parse().map_err(|_| rc)?)
};
// 5th line
let rc = (4usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let s = csv_str(&record, rc.1).ok_or(rc)?;
let default_polling_period_in_milli: u64 = if s.is_empty() {
DEFAULT_POLLING_PERIOD_IN_MILLI
} else {
s.parse().map_err(|_| rc)?
};
// 6th line
let rc = (5usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let s = csv_str(&record, rc.1).ok_or(rc).unwrap_or_default();
let lock_method = match s.trim().to_uppercase().as_str() {
"NONE" => MemLock::None,
"SEMAPHORE" => MemLock::Semaphore,
"MUTEX" => {
// 7th line
let rc = (6usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let mutex_num = csv_usize(&record, rc.1).ok_or(rc)?;
MemLock::Mutex(mutex_num)
}
_ => MemLock::None,
};
// 9th..
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.from_reader(content);
let mut records = rdr.records();
let mut mem_data = Vec::with_capacity(point_num);
let rc = (0, 3 + offset);
records.next().ok_or(rc)?.map_err(|_| rc)?;
for row in 1..=point_num {
let rc = (row, 3 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let data = MemData::parse_mem_data(&record, rc.0, rc.1)?;
mem_data.push(data);
}
let mut conn = MemConnection {
name,
base_addr,
total_size,
default_polling_period_in_milli,
data: mem_data,
lock_method,
..Default::default()
};
conn.create_data_config().map_err(|(r, c, _)| (r, c + offset))?;
Ok(conn)
}
pub fn create_data_config(&mut self) -> Result<(), (usize, usize, String)> {
let size = self.data.len();
let mut point_to_pos = HashMap::with_capacity(size);
for (index, rd) in self.data.iter().enumerate() {
point_to_pos.insert(rd.point_id, index);
}
let mut point_exist: HashSet<u64> = HashSet::with_capacity(size);
let mut register_addr_to_rd: HashMap<usize, usize> = HashMap::with_capacity(size);
let mut polling_period_to_data: BTreeMap<u64, Vec<usize>> = BTreeMap::new();
polling_period_to_data.insert(self.default_polling_period_in_milli, Vec::with_capacity(size));
let mut tmp: HashMap<u64, usize> = HashMap::with_capacity(10);
for data in &self.data {
if let Some(ori) = tmp.get_mut(&data.polling_period_in_milli) {
*ori += 1;
} else {
tmp.insert(data.polling_period_in_milli, 1);
}
}
for (i, num) in tmp {
// 对于不需要采集的数据可以通过设置一个很大的轮询周期
if i >= MAX_POLLING_PERIOD {
continue;
}
let mut a_v: Vec<usize> = Vec::with_capacity(num);
for (index, rd) in self.data.iter().enumerate() {
if rd.polling_period_in_milli == i {
a_v.push(index.try_into().unwrap());
}
}
polling_period_to_data.insert(i, a_v);
}
for (index, data) in self.data.iter().enumerate() {
// 测点号重复
if point_exist.contains(&data.point_id) {
let tip = format!("Invalid point (id :{}):\nThe point ID is already existed", data.point_id);
return Err((index + 1, 8, tip));
}
point_exist.insert(data.point_id);
// 起始地址重复
if register_addr_to_rd.contains_key(&data.from) {
let tip = format!("Invalid point (id :{}):\nThe register address is already existed", data.point_id);
return Err((index + 1, 4, tip));
}
register_addr_to_rd.insert(data.from, index);
}
// 判断地址之间有没有互相覆盖
let mut last_addr = usize::MIN;
let mut keys: Vec<&usize> = register_addr_to_rd.keys().collect();
keys.sort(); // 按照起始地址排序
for addr in keys {
let index = register_addr_to_rd.get(addr).unwrap();
let rd = self.data.get(*index).unwrap();
// 如果开始地址在已经被使用的地址范围
if rd.from < last_addr {
let tip = format!("Invalid register point (id :{}):\nThe start address is in the range of addresses that are already in use", rd.point_id);
return Err((index + 1, 4, tip));
}
last_addr = rd.from + rd.data_type.get_byte_count() as usize;
}
self.point_to_pos = point_to_pos;
self.mem_addr_to_pos = register_addr_to_rd;
self.polling_period_to_data = polling_period_to_data;
Ok(())
}
// 这里传入的register是已经按照地址排好序
pub fn create_request(&self, period: u64, is_transfer: bool) -> Vec<(usize, usize)> {
return if let Some(positions) = self.polling_period_to_data.get(&period) {
let mut off_set = 0usize;
let mut num_of_registers = 0usize;
let mut result = Vec::with_capacity(positions.len());
let mut last_index: Option<usize> = None;
for index in 0..positions.len() {
let d = &self.data[positions[index]];
if is_transfer && !d.is_writable {
continue;
}
let current_byte_num = d.data_type.get_byte_count() as usize;
if last_index.is_some() {
let last = &self.data[positions[last_index.unwrap()]];
let last_byte_num = last.data_type.get_byte_count() as usize;
// 地址不连续
if d.from - last.from != last_byte_num {
result.push((off_set, num_of_registers));
off_set = d.from;
num_of_registers = current_byte_num;
} else {
// 以上情况都未发生
num_of_registers += current_byte_num;
}
last_index = Some(index);
} else {
// first found
off_set = d.from;
num_of_registers = current_byte_num;
last_index = Some(index);
}
}
// 搜索到末尾
if num_of_registers > 0 {
result.push((off_set, num_of_registers));
}
result.shrink_to_fit();
result
} else {
vec![]
};
}
}
impl MemoryPosixTp {
pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Self, (usize, usize)> {
let content = std::fs::read(&path).map_err(|_| (0, 0))?;
// let content = if env::IS_ENCRYPT {
// decrypt_vec(content.as_slice())
// } else {
// content
// };
let extension = path.as_ref().extension();
let csv_bytes = if let Some(suffix) = extension {
if suffix.eq("xlsx") || suffix.eq("xls") {
let mut r = excel_bytes_to_csv_bytes(content.as_slice()).unwrap_or_default();
if r.is_empty() {
return Err((0, 0));
}
r.pop().unwrap()
} else {
content
}
} else {
content
};
Self::from_csv_bytes(csv_bytes.as_slice())
}
pub fn from_csv(path: &str) -> Result<Self, (usize, usize)> {
let content = std::fs::read(path).map_err(|_| (0, 0))?;
// if env::IS_ENCRYPT {
// let plain_t = decrypt_vec(content.as_slice());
// ModbusTcpClientTp::from_csv_bytes(plain_t.as_slice())
// } else {
// ModbusTcpClientTp::from_csv_bytes(content.as_slice())
// }
Self::from_csv_bytes(content.as_slice())
}
pub fn from_csv_bytes(content: &[u8]) -> Result<Self, (usize, usize)> {
let content_new = transfer_to_utf8(content.to_vec()).map_err(|_| (0, 0))?;
let content = content_new.as_slice();
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.from_reader(content);
let mut records = rdr.records();
let rc = (0usize, 1);
let name = csv_string(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let rc = (1usize, 1);
let conn_num: usize =
csv_usize(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let rc = (2usize, 1);
let path_tmp =
csv_string(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let path = if path_tmp.trim().is_empty() {
None
} else {
Some(path_tmp)
};
let rc = (3usize, 1);
let is_transfer =
csv_string(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).unwrap_or("FALSE".to_string());
let is_transfer = if is_transfer.to_uppercase() == "TRUE" {
true
} else {
false
};
let mut connections = Vec::with_capacity(conn_num);
for i in 0..conn_num {
let connection = MemConnection::from_csv_records_posix(content, i * 9 + 3)?;
connections.push(connection);
}
Ok(MemoryPosixTp {
id: 0,
name,
path,
is_transfer,
connections,
})
}
pub fn get_point_ids(&self) -> Vec<u64> {
let mut size = 0;
for conn in &self.connections {
size += conn.data.len();
size += 1;
}
let mut r: Vec<u64> = Vec::with_capacity(size);
for conn in &self.connections {
for rd in &conn.data {
r.push(rd.point_id)
}
}
r
}
// 导出CSV文件内容
pub fn export_csv(&self, text_map: &HashMap<String, String>) -> String {
"".to_string()
}
}
impl MemorySystemVTp {
pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Self, (usize, usize)> {
let content = std::fs::read(&path).map_err(|_| (0, 0))?;
// let content = if env::IS_ENCRYPT {
// decrypt_vec(content.as_slice())
// } else {
// content
// };
let extension = path.as_ref().extension();
let csv_bytes = if let Some(suffix) = extension {
if suffix.eq("xlsx") || suffix.eq("xls") {
let mut r = excel_bytes_to_csv_bytes(content.as_slice()).unwrap_or_default();
if r.is_empty() {
return Err((0, 0));
}
r.pop().unwrap()
} else {
content
}
} else {
content
};
Self::from_csv_bytes(csv_bytes.as_slice())
}
pub fn from_csv(path: &str) -> Result<Self, (usize, usize)> {
let content = std::fs::read(path).map_err(|_| (0, 0))?;
// if env::IS_ENCRYPT {
// let plain_t = decrypt_vec(content.as_slice());
// ModbusTcpClientTp::from_csv_bytes(plain_t.as_slice())
// } else {
// ModbusTcpClientTp::from_csv_bytes(content.as_slice())
// }
Self::from_csv_bytes(content.as_slice())
}
pub fn from_csv_bytes(content: &[u8]) -> Result<Self, (usize, usize)> {
let content_new = transfer_to_utf8(content.to_vec()).map_err(|_| (0, 0))?;
let content = content_new.as_slice();
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.from_reader(content);
let mut records = rdr.records();
let rc = (0usize, 1);
let name = csv_string(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let rc = (1usize, 1);
let point_num: usize =
csv_usize(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let rc = (2usize, 1);
let path = csv_string(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let rc = (3usize, 1);
let identifier = csv_i32(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
// 5th line
let rc = (4usize, 1);
let total_size = csv_usize(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
// 6th line
let rc = (5usize, 1);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let s = csv_str(&record, rc.1).ok_or(rc)?;
let default_polling_period_in_milli: u64 = if s.is_empty() {
DEFAULT_POLLING_PERIOD_IN_MILLI
} else {
s.parse().map_err(|_| rc)?
};
// 7th line
let rc = (6usize, 1);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let s = csv_str(&record, rc.1).ok_or(rc).unwrap_or_default();
let lock_method = match s.trim().to_uppercase().as_str() {
"NONE" => MemLock::None,
"SEMAPHORE" => MemLock::Semaphore,
"MUTEX" => {
// 8th line
let rc = (7usize, 1);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let mutex_num = csv_usize(&record, rc.1).ok_or(rc)?;
MemLock::Mutex(mutex_num)
}
_ => MemLock::None,
};
// 9th line
let rc = (6usize, 1);
let is_transfer =
csv_string(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).unwrap_or("FALSE".to_string());
let is_transfer = if is_transfer.to_uppercase() == "TRUE" {
true
} else {
false
};
let mut mem_data = Vec::with_capacity(point_num);
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.from_reader(content);
let mut records = rdr.records();
let rc = (0, 3);
records.next().ok_or(rc)?.map_err(|_| rc)?;
for row in 1..=point_num {
let rc = (row, 3);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let data = MemData::parse_mem_data(&record, rc.0, rc.1)?;
mem_data.push(data);
}
let mut connection = MemConnection {
name: name.clone(),
base_addr: 0,
total_size: Some(total_size),
default_polling_period_in_milli,
data: mem_data,
lock_method,
..Default::default()
};
connection.create_data_config().map_err(|(r, c, _)| (r, c))?;
Ok(MemorySystemVTp {
id: 0,
name,
path,
identifier,
is_transfer,
connection,
})
}
pub fn get_point_ids(&self) -> Vec<u64> {
self.connection.data.iter().map(|d| d.point_id).collect()
}
// 导出CSV文件内容
pub fn export_csv(&self, text_map: &HashMap<String, String>) -> String {
"".to_string()
}
}
impl MemData {
fn parse_mem_data(
record: &StringRecord,
row: usize,
first_col: usize,
) -> Result<Self, (usize, usize)> {
let rc = (row, first_col);
let s = csv_str(record, rc.1).ok_or(rc)?;
let is_writable = match s {
"FALSE" => false,
"TRUE" => true,
_ => false,
};
let rc = (row, first_col + 1);
let from = csv_usize(record, rc.1).ok_or(rc)?;
let rc = (row, first_col + 2);
let s = csv_str(record, rc.1).ok_or(rc)?;
let data_type = DataType::from_str(s).map_err(|_| rc)?;
let rc = (row, first_col + 3);
let polling_period_in_milli = csv_u64(record, rc.1).ok_or(rc)?;
// 对应的测点Id
let rc = (row, first_col + 4);
let point_id = csv_u64(record, rc.1).ok_or(rc)?;
Ok(MemData {
is_writable,
from,
data_type,
polling_period_in_milli,
point_id,
})
}
}
\ No newline at end of file
use core::fmt;
use std::{collections::{BTreeMap, HashMap, HashSet}, str::FromStr};
use std::{cmp::min, convert::TryInto};
use std::fmt::{Display, Formatter};
#[cfg(target_family = "unix")]
use std::path::PathBuf;
use csv::StringRecord;
use serde::{Deserialize, Serialize};
use crate::{create_parity, csv_str, csv_string, csv_u16, csv_u32, csv_u64, csv_u8, csv_usize, DEFAULT_TCP_CLIENT_LIMIT, get_csv_str, MAX_POLLING_PERIOD, SerialPara, SerialParity, UNKNOWN_POINT_ID, UNKNOWN_TCP_PORT};
use crate::excel::{excel_bytes_to_csv_bytes, transfer_to_utf8};
use crate::prop::DataType;
// const DEFAULT_MAX_ADDR_READ_BINARY: u16 = 65535;
// const DEFAULT_MAX_ADDR_WRITE_COILS: u16 = 65535;
// const DEFAULT_MAX_ADDR_WRITE_REGISTERS: u16 = 65535;
// 这里存储地址用的是u16,所以最大值是65535
const DEFAULT_MAX_COUNT_READ_NUMERIC: u16 = 125;
const DEFAULT_MAX_COUNT_READ_BIT: u16 = 2000;
const DEFAULT_MAX_COUNT_WRITE_COILS: u16 = 1968;
const DEFAULT_MAX_COUNT_WRITE_REGISTERS: u16 = 120;
const DEFAULT_TIMEOUT_IN_MILLI: u64 = 3000;
const DEFAULT_POLLING_PERIOD_IN_MILLI: u64 = 5000;
// 默认是20ms
pub const DEFAULT_DELAY_BETWEEN_REQUESTS: u64 = 20;
/**
* @api {枚举_注册类型} /RegisterType RegisterType
* @apiPrivate
* @apiGroup A_Enum
* @apiSuccess {String} COILS COILS
* @apiSuccess {String} DISCRETE DISCRETE
* @apiSuccess {String} INPUT INPUT
* @apiSuccess {String} HOLDING HOLDING
*/
#[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq, PartialOrd, Ord)]
pub enum RegisterType {
COILS,
DISCRETE,
INPUT,
HOLDING,
}
/**
* @api {枚举_MbProtocolType} /MbProtocolType MbProtocolType
* @apiPrivate
* @apiGroup A_Enum
* @apiSuccess {String} ENCAP ENCAP
* @apiSuccess {String} XA XA
* @apiSuccess {String} RTU RTU
*/
#[derive(Serialize, Deserialize, Clone, Copy, Debug, PartialEq)]
pub enum MbProtocolType {
ENCAP,
XA,
RTU,
}
impl Display for MbProtocolType {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl Display for RegisterType {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl From<&str> for MbProtocolType {
fn from(value: &str) -> Self {
// let value = serialized_string.to_uppercase();
match value.to_uppercase().as_str() {
"ENCAP" => MbProtocolType::ENCAP,
"XA" => MbProtocolType::XA,
"RTU" => MbProtocolType::RTU,
_ => panic!("Error deserializing MbProtoType"),
}
}
}
impl FromStr for RegisterType {
type Err = ();
fn from_str(value: &str) -> Result<Self, Self::Err> {
match value {
"DISCRETE" => Ok(RegisterType::DISCRETE),
"COILS" => Ok(RegisterType::COILS),
"INPUT" => Ok(RegisterType::INPUT),
"HOLDING" => Ok(RegisterType::HOLDING),
_ => Err(()),
}
}
}
/**
* @api {Modbus注册信息} /ModbusRegisterData ModbusRegisterData
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {RegisterType} register_type register_type
* @apiSuccess {u16} from from
* @apiSuccess {DataType} data_type data_type
* @apiSuccess {bool} should_new_request 是否必须新开一个请求
* @apiSuccess {u64} polling_period_in_milli 轮询周期,毫秒
* @apiSuccess {u64} point_id 对应的测点Id
*/
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct RegisterData {
pub register_type: RegisterType,
pub from: u16,
pub data_type: DataType,
// 是否必须新开一个请求
pub should_new_request: bool,
// 轮询周期
pub polling_period_in_milli: u64,
// 对应的测点Id
pub point_id: u64,
}
/**
* @api {ModbusTcpClientTp} /ModbusTcpClientTp ModbusTcpClientTp
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u64} id 通道id
* @apiSuccess {String} name 通道名称
* @apiSuccess {tuple} tcp_server 服务端的ip和port,tuple格式为(ip:String, port:u32)
* @apiSuccess {MbConnection[]} connections connections
*/
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct ModbusTcpClientTp {
pub id: u64,
/// 通道名称
pub name: String,
/// 服务端的ip和port
pub tcp_server: (String, u32),
pub connections: Vec<MbConnection>,
}
/**
* @api {ModbusTcpServerTp} /ModbusTcpServerTp ModbusTcpServerTp
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u64} id 通道id
* @apiSuccess {String} name 通道名称
* @apiSuccess {u16} tcp_server_port 服务的port
* @apiSuccess {tuple[]} connections 数组,tuple格式为(String, MbConnection)
*/
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct ModbusTcpServerTp {
pub id: u64,
/// 通道名称
pub name: String,
/// 服务的port
pub tcp_server_port: u16,
pub connections: Vec<(String, MbConnection)>,
}
/**
* @api {ModbusRtuClientTp} /ModbusRtuClientTp ModbusRtuClientTp
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u64} id 通道id
* @apiSuccess {String} name 通道名称
* @apiSuccess {SerialPara} para 串口参数
* @apiSuccess {MbConnection[]} connections connections
*/
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone, Default)]
pub struct ModbusRtuClientTp {
pub id: u64,
/// 通道名称
pub name: String,
/// 串口参数
pub para: SerialPara,
pub connections: Vec<MbConnection>,
}
/**
* @api {ModbusRtuServerTp} /ModbusRtuServerTp ModbusRtuServerTp
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u64} id 通道id
* @apiSuccess {String} name 通道名称
* @apiSuccess {SerialPara} para 串口参数
* @apiSuccess {MbConnection} connection connection
*/
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct ModbusRtuServerTp {
pub id: u64,
/// 通道名称
pub name: String,
/// 串口参数
pub para: SerialPara,
pub connection: MbConnection,
}
impl Default for ModbusRtuServerTp {
fn default() -> Self {
ModbusRtuServerTp {
id: 0,
name: String::new(),
para: SerialPara::default(),
connection: MbConnection {
protocol_type: MbProtocolType::RTU,
..Default::default()
},
}
}
}
/**
* @api {Modbus通道连接信息} /MbConnection MbConnection
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u8} slave_id slave_id
* @apiSuccess {String} name 名称
* @apiSuccess {MbProtocolType} protocol_type protocol类型
* @apiSuccess {u16} max_read_register_count max_read_register_count
* @apiSuccess {u16} max_read_bit_count max_read_bit_count
* @apiSuccess {u16} max_write_register_count max_write_register_count
* @apiSuccess {u16} max_write_bit_count max_write_bit_count
* @apiSuccess {u64} timeout_in_milli 超时时间_毫秒
* @apiSuccess {u64} delay_between_requests 两条请求直接的间隔
* @apiSuccess {u64} point_id 通道状态对应的测点号
* @apiSuccess {u64} default_polling_period_in_milli 默认的轮询周期
* @apiSuccess {ModbusRegisterData[]} mb_data_configure register settings
* @apiSuccess {Map} point_id_to_rd HashMap<point_id:u64, position_of_register_data:u16>
* @apiSuccess {Map} register_addr_to_rd HashMap<寄存器地址:u16, setting中ModbusRegisterData[]的位置:u16>
* @apiSuccess {Map} polling_period_to_data 轮询周期不同的数据,有序Map<轮询周期_毫秒数:u64, position:u16[]>
* @apiSuccess {u8} [coil_write_code] if write code is set, yt and yk will use this code to send
* @apiSuccess {u8} [holding_write_code] holding_write_code
*/
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct MbConnection {
pub slave_id: u8,
pub name: String,
pub protocol_type: MbProtocolType,
pub max_read_register_count: u16,
pub max_read_bit_count: u16,
pub max_write_register_count: u16,
pub max_write_bit_count: u16,
/// 超时设置
pub timeout_in_milli: u64,
/// 两条请求直接的间隔
pub delay_between_requests: u64,
/// 通道状态对应的测点号
pub point_id: u64,
// 默认的轮询周期
pub default_polling_period_in_milli: u64,
/// register settings
pub mb_data_configure: Vec<RegisterData>,
/// key is point id, value is position of register data
pub point_id_to_rd: HashMap<u64, u16>,
/// key:寄存器地址,value:setting中vec<RegisterData>的位置
pub register_addr_to_rd: HashMap<u16, u16>,
/// 轮询周期不同的数据, key is period in milli, value is position.
pub polling_period_to_data: BTreeMap<u64, Vec<u16>>,
// if write code is set, yt and yk will use this code to send
pub coil_write_code: Option<u8>,
pub holding_write_code: Option<u8>,
}
impl Default for MbConnection {
fn default() -> Self {
MbConnection {
slave_id: 1,
name: "new".to_string(),
protocol_type: MbProtocolType::ENCAP,
max_read_register_count: DEFAULT_MAX_COUNT_READ_BIT,
max_read_bit_count: DEFAULT_MAX_COUNT_WRITE_REGISTERS,
max_write_register_count: DEFAULT_MAX_COUNT_WRITE_REGISTERS,
max_write_bit_count: DEFAULT_MAX_COUNT_WRITE_COILS,
timeout_in_milli: DEFAULT_TIMEOUT_IN_MILLI,
delay_between_requests: 0,
point_id: 0,
default_polling_period_in_milli: DEFAULT_POLLING_PERIOD_IN_MILLI,
mb_data_configure: vec![],
point_id_to_rd: HashMap::new(),
register_addr_to_rd: HashMap::new(),
polling_period_to_data: BTreeMap::new(),
coil_write_code: None,
holding_write_code: None,
}
}
}
pub enum MbConnectionError {
Repeat(u64),
}
impl MbConnection {
/// 返回的寄存器顺序是:coil, discrete, input, holding
#[allow(clippy::type_complexity)]
pub fn create_request(
&self,
period: u64,
) -> (Vec<(u16, u16)>, Vec<(u16, u16)>, Vec<(u16, u16)>, Vec<(u16, u16)>) {
let max_read_bit_count = if self.max_read_bit_count > 0 {
min(DEFAULT_MAX_COUNT_WRITE_COILS, self.max_read_bit_count)
} else {
DEFAULT_MAX_COUNT_WRITE_COILS
};
let max_register_count = if self.max_read_register_count > 0 {
min(DEFAULT_MAX_COUNT_READ_NUMERIC, self.max_read_register_count)
} else {
DEFAULT_MAX_COUNT_READ_NUMERIC
};
return if let Some(positions) = self.polling_period_to_data.get(&period) {
let mut registers = positions.clone();
// 先根据寄存器地址进行排序
registers.sort_by(|a, b| {
self.mb_data_configure[*a as usize]
.from
.partial_cmp(&self.mb_data_configure[*b as usize].from)
.unwrap()
});
let register_type = RegisterType::COILS;
let coils = self.get_request(&registers, register_type, max_read_bit_count);
let register_type = RegisterType::DISCRETE;
let discretes = self.get_request(&registers, register_type, max_read_bit_count);
let register_type = RegisterType::INPUT;
let inputs = self.get_request(&registers, register_type, max_register_count);
let register_type = RegisterType::HOLDING;
let holdings = self.get_request(&registers, register_type, max_register_count);
(coils, discretes, inputs, holdings)
} else {
// 没有找到对应的寄存器列表,返回空
(vec![], vec![], vec![], vec![])
};
}
// 这里传入的register是已经按照地址排好序
fn get_request(&self, registers: &Vec<u16>, register_type: RegisterType, limit: u16) -> Vec<(u16, u16)> {
let mut off_set = 0u16;
let mut num_of_registers = 0u16;
let mut result: Vec<(u16, u16)> = Vec::with_capacity(registers.len());
let mut last_index = None;
for index in 0..registers.len() {
let d = &self.mb_data_configure[registers[index] as usize];
let current_word_num = get_register_count(d);
// 寄存器类型发生了改变
if d.register_type != register_type {
continue;
} else if last_index.is_some() {
let last = &self.mb_data_configure[registers[last_index.unwrap()] as usize];
let last_word_num = get_register_count(last);
// 地址不连续
if d.from - last.from != last_word_num
// 配置了必须重开一个请求的参数,且为true
|| d.should_new_request
// 达到一次Request的最大地址范围
|| num_of_registers + current_word_num > limit
{
result.push((off_set, num_of_registers));
off_set = d.from;
num_of_registers = current_word_num;
} else {
// 以上情况都未发生
num_of_registers += current_word_num;
}
last_index = Some(index);
} else {
// first found
off_set = d.from;
num_of_registers = current_word_num;
last_index = Some(index);
}
}
// 搜索到末尾
if num_of_registers > 0 {
result.push((off_set, num_of_registers));
}
result.shrink_to_fit();
result
}
fn create_csv_row(&self, index: usize) -> String {
let mut ch_code = ";".to_string();
if let Some(c) = self.coil_write_code {
ch_code = format!("{};", c);
}
if let Some(h) = self.holding_write_code {
ch_code += h.to_string().as_str();
}
let mut timeout_str = self.timeout_in_milli.to_string();
if self.delay_between_requests != 0 {
timeout_str += &format!(";{}", self.delay_between_requests);
}
match index {
0 => self.mb_data_configure.len().to_string(),
1 => self.slave_id.to_string(),
2 => self.protocol_type.to_string().to_uppercase(),
3 => self.max_read_register_count.to_string(),
4 => self.max_read_bit_count.to_string(),
5 => self.max_write_register_count.to_string(),
6 => self.max_write_bit_count.to_string(),
7 => self.default_polling_period_in_milli.to_string(),
8 => timeout_str,
9 => self.point_id.to_string(),
10 => ch_code,
_ => "unknown".to_string(),
}
}
pub fn create_data_config(&mut self) -> Result<(), (usize, usize, String)> {
let size = self.mb_data_configure.len();
let mut point_id_to_rd: HashMap<u64, u16> = HashMap::with_capacity(size);
let mut register_addr_to_rd: HashMap<u16, u16> = HashMap::with_capacity(size);
let mut polling_period_to_data: BTreeMap<u64, Vec<u16>> = BTreeMap::new();
polling_period_to_data.insert(self.default_polling_period_in_milli, Vec::with_capacity(size));
// 开始统计不同轮询周期的数据,同时检查地址是否超过最大范围
let mut tmp: HashMap<u64, u16> = HashMap::with_capacity(10);
for rd in &self.mb_data_configure {
if let Some(ori) = tmp.get_mut(&rd.polling_period_in_milli) {
*ori += 1;
} else {
tmp.insert(rd.polling_period_in_milli, 1);
}
}
for (i, num) in tmp {
// 对于不需要采集的数据可以通过设置一个很大的轮询周期
if i >= MAX_POLLING_PERIOD {
continue;
}
let mut a_v: Vec<u16> = Vec::with_capacity(num as usize);
for (index, rd) in self.mb_data_configure.iter().enumerate() {
if rd.polling_period_in_milli == i {
a_v.push(index.try_into().unwrap());
}
}
polling_period_to_data.insert(i, a_v);
}
for (index, rd) in self.mb_data_configure.iter().enumerate() {
// 越界
// if index >= u16::MAX as usize {
// let tip = format!("Invalid register point (id :{}):\nThe number of register points is out of range", rd.point_id);
// return Err((index, 1, tip));
// }
// 测点号重复
if point_id_to_rd.contains_key(&rd.point_id) {
let tip = format!("Invalid register point (id :{}):\nThe point ID is already existed", rd.point_id);
return Err((index + 1, 8, tip));
}
point_id_to_rd.insert(rd.point_id, index.try_into().unwrap());
// 起始地址重复
if register_addr_to_rd.contains_key(&rd.from) {
let tip = format!("Invalid register point (id :{}):\nThe register address is already existed", rd.point_id);
return Err((index + 1, 4, tip));
}
register_addr_to_rd.insert(rd.from, index.try_into().unwrap());
}
// 判断地址之间有没有互相覆盖
let mut last_addr = u16::MIN;
let mut keys: Vec<&u16> = register_addr_to_rd.keys().collect();
keys.sort(); // 按照起始地址排序
for addr in keys {
let index = register_addr_to_rd.get(addr).unwrap();
let rd = self.mb_data_configure.get(*index as usize).unwrap();
// 如果开始地址在已经被使用的地址范围
if rd.from < last_addr {
let tip = format!("Invalid register point (id :{}):\nThe start address is in the range of addresses that are already in use", rd.point_id);
return Err(((index + 1) as usize, 4, tip));
}
last_addr = rd.from + get_register_count(rd);
}
self.point_id_to_rd = point_id_to_rd;
self.register_addr_to_rd = register_addr_to_rd;
self.polling_period_to_data = polling_period_to_data;
Ok(())
}
}
impl ModbusTcpClientTp {
pub fn from_file(path: &str) -> Result<Self, (usize, usize)> {
let content = std::fs::read(path).map_err(|_| (0, 0))?;
// let content = if env::IS_ENCRYPT {
// decrypt_vec(content.as_slice())
// } else {
// content
// };
let csv_bytes = if path.ends_with(".xlsx") || path.ends_with(".xls") {
let r = excel_bytes_to_csv_bytes(content.as_slice()).unwrap_or_default();
if r.is_empty() {
return Err((0, 0));
}
r[0].clone()
} else {
content
};
Self::from_csv_bytes(csv_bytes.as_slice())
}
pub fn from_csv(path: &str) -> Result<ModbusTcpClientTp, (usize, usize)> {
let content = std::fs::read(path).map_err(|_| (0, 0))?;
// if env::IS_ENCRYPT {
// let plain_t = decrypt_vec(content.as_slice());
// ModbusTcpClientTp::from_csv_bytes(plain_t.as_slice())
// } else {
// ModbusTcpClientTp::from_csv_bytes(content.as_slice())
// }
ModbusTcpClientTp::from_csv_bytes(content.as_slice())
}
pub fn from_csv_bytes(content: &[u8]) -> Result<ModbusTcpClientTp, (usize, usize)> {
let content_new = transfer_to_utf8(content.to_vec()).map_err(|_| (0, 0))?;
let content = content_new.as_slice();
let tp = ModbusTcpClientTp::from_csv_records(content, 0)?;
let rc = (2usize, 1);
// check ip address format
tp.tcp_server.0.parse::<std::net::Ipv4Addr>().map_err(|_| rc)?;
Ok(tp)
}
fn from_csv_records(
content: &[u8],
offset: usize,
) -> Result<ModbusTcpClientTp, (usize, usize)> {
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.from_reader(content);
let mut records = rdr.records();
// 1st line
let rc = (0usize, 1 + offset);
let name = csv_string(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
// 2nd line
let rc = (1usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let point_num = csv_usize(&record, rc.1).ok_or(rc)?;
if point_num as u16 > u16::MAX {
return Err(rc);
}
// 3rd line
let rc = (2usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let tcp_server_ip = csv_string(&record, rc.1).ok_or(rc)?;
// 4th line
let rc = (3usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let tcp_server_port = csv_u32(&record, rc.1).ok_or(rc)?;
// 5th line
let rc = (4usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let slave_id = csv_u8(&record, rc.1).ok_or(rc)?;
// 6th line
let rc = (5usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let type_str = csv_str(&record, rc.1).ok_or(rc)?;
let protocol_type = MbProtocolType::from(type_str);
// 7th line
let rc = (6usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let s = csv_str(&record, rc.1).ok_or(rc)?;
let max_read_register_count: u16 = if s.is_empty() {
DEFAULT_MAX_COUNT_READ_NUMERIC
} else {
s.parse().map_err(|_| rc)?
};
// 8th line
let rc = (7usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let s = csv_str(&record, rc.1).ok_or(rc)?;
let max_read_bit_count: u16 = if s.is_empty() {
DEFAULT_MAX_COUNT_READ_BIT
} else {
s.parse().map_err(|_| rc)?
};
// 9th line
let rc = (8usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let s = csv_str(&record, rc.1).ok_or(rc)?;
let max_write_register_count: u16 = if s.is_empty() {
DEFAULT_MAX_COUNT_WRITE_REGISTERS
} else {
s.parse().map_err(|_| rc)?
};
// 10th line
let rc = (9usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let s = csv_str(&record, rc.1).ok_or(rc)?;
let max_write_bit_count: u16 = if s.is_empty() {
DEFAULT_MAX_COUNT_WRITE_COILS
} else {
s.parse().map_err(|_| rc)?
};
// 11th line
let rc = (10usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let s = csv_str(&record, rc.1).ok_or(rc)?;
let default_polling_period_in_milli: u64 = if s.is_empty() {
DEFAULT_POLLING_PERIOD_IN_MILLI
} else {
s.parse().map_err(|_| rc)?
};
// 12th line
let rc = (11usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let s = csv_str(&record, rc.1).ok_or(rc)?;
let (timeout_in_milli, delay_between_requests) = if s.is_empty() {
(DEFAULT_TIMEOUT_IN_MILLI, 0u64)
} else {
let times: Vec<&str> = s.split(';').collect();
if times.len() == 2 {
(
times[0].parse().map_err(|_| rc)?,
times[1].parse().map_err(|_| rc)?,
)
} else if times.len() == 1 {
(times[0].parse().map_err(|_| rc)?, 0u64)
} else {
(DEFAULT_TIMEOUT_IN_MILLI, 0u64)
}
};
// 13th line
let rc = (12usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let s = csv_str(&record, rc.1).ok_or(rc)?;
let point_id: u64 = if s.is_empty() {
UNKNOWN_POINT_ID
} else {
s.parse().map_err(|_| rc)?
};
// 14th line
let rc = (13usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let (coil_write_code, holding_write_code) = if let Some(codes) = csv_str(&record, rc.1) {
let codes: Vec<&str> = codes.split(';').collect();
if codes.len() == 2 {
let c1 = if let Ok(code) = codes[0].parse::<u8>() {
if code != 0x05 && code != 0x0F {
None
} else {
Some(code)
}
} else {
None
};
let c2 = if let Ok(code) = codes[1].parse::<u8>() {
if code != 0x06 && code != 0x10 {
None
} else {
Some(code)
}
} else {
None
};
(c1, c2)
} else {
(None, None)
}
} else {
(None, None)
};
// 15th line ...
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.from_reader(content);
let mut records = rdr.records();
let rc = (0, 3 + offset);
records.next().ok_or(rc)?.map_err(|_| rc)?;
let mut mb_data_configure: Vec<RegisterData> = Vec::with_capacity(point_num);
for row in 1..=point_num {
let rc = (row, 3 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
mb_data_configure.push(RegisterData::parse_register_data(&record, rc.0, rc.1)?);
}
let mut conn = MbConnection {
slave_id,
name: name.clone(),
protocol_type,
max_read_register_count,
max_read_bit_count,
max_write_register_count,
max_write_bit_count,
timeout_in_milli,
delay_between_requests,
point_id,
default_polling_period_in_milli,
mb_data_configure,
coil_write_code,
holding_write_code,
..Default::default()
};
conn.create_data_config().map_err(|(r, c, _)|(r, c + offset))?;
Ok(ModbusTcpClientTp {
id: 0,
name: name.clone(),
tcp_server: (tcp_server_ip, tcp_server_port),
connections: vec![conn],
})
}
pub fn from_file2(path: &str) -> Result<Self, (usize, usize)> {
let content = std::fs::read(path).map_err(|_| (0, 0))?;
// let content = if env::IS_ENCRYPT {
// decrypt_vec(content.as_slice())
// } else {
// content
// };
let csv_bytes = if path.ends_with(".xlsx") || path.ends_with(".xls") {
let r = excel_bytes_to_csv_bytes(content.as_slice()).unwrap_or_default();
if r.is_empty() {
return Err((0, 0));
}
r[0].clone()
} else {
content
};
Self::from_csv_bytes2(csv_bytes.as_slice())
}
pub fn from_csv2(path: &str) -> Result<ModbusTcpClientTp, (usize, usize)> {
let content = std::fs::read(path).map_err(|_| (0, 0))?;
// if env::IS_ENCRYPT {
// let plain_t = decrypt_vec(content.as_slice());
// ModbusTcpClientTp::from_csv_bytes2(plain_t.as_slice())
// } else {
// ModbusTcpClientTp::from_csv_bytes2(content.as_slice())
// }
ModbusTcpClientTp::from_csv_bytes2(content.as_slice())
}
pub fn from_csv_bytes2(content: &[u8]) -> Result<ModbusTcpClientTp, (usize, usize)> {
let content_new = transfer_to_utf8(content.to_vec()).map_err(|_| (0, 0))?;
let content = content_new.as_slice();
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.from_reader(content);
let mut records = rdr.records();
let rc = (0usize, 1);
let name = csv_string(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let rc = (1usize, 1);
let conn_num: usize =
csv_usize(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
if conn_num == 0 {
// 连接数不能为0
return Err(rc);
}
let rc = (2usize, 1);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let tcp_server_ip = csv_string(&record, rc.1).ok_or(rc)?;
tcp_server_ip
.parse::<std::net::Ipv4Addr>()
.map_err(|_| rc)?;
let rc = (3usize, 1);
let tcp_server_port: u32 =
csv_u32(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let mut connections: Vec<MbConnection> = Vec::with_capacity(conn_num);
for i in 0..conn_num {
let connection = ModbusTcpClientTp::from_csv_records2(content, i * 10 + 3)?;
// 检查具有相同ip的client是否配置一样
for conn in &connections {
// 这里允许slave id一样,解决不同功能码地址重复的问题,可以通过设不同的连接来解决
if conn.slave_id == connection.slave_id && conn.point_id != connection.point_id {
return Err((12, i * 10 + 4));
}
// 协议类型(xa或encap)必须一样
if conn.protocol_type != connection.protocol_type {
return Err((3, i * 10 + 4));
}
// 测点不能一样
let mut row = 1;
for rd in &connection.mb_data_configure {
if conn.point_id_to_rd.contains_key(&rd.point_id) {
return Err((row, i * 10 + 11));
}
row += 1;
}
}
connections.push(connection);
}
Ok(ModbusTcpClientTp {
id: 0,
name,
tcp_server: (tcp_server_ip, tcp_server_port),
connections,
})
}
fn from_csv_records2(
content: &[u8],
offset: usize,
) -> Result<MbConnection, (usize, usize)> {
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.from_reader(content);
let mut records = rdr.records();
// 1th line
let rc = (0usize, 1 + offset);
let name = csv_string(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
// 2th line
let rc = (1usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let point_num = csv_usize(&record, rc.1).ok_or(rc)?;
if point_num > u16::MAX as usize {
return Err(rc);
}
// 3th line
let rc = (2usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let slave_id = csv_u8(&record, rc.1).ok_or(rc)?;
// 4th line
let rc = (3usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let type_str = csv_str(&record, rc.1).ok_or(rc)?;
let protocol_type = MbProtocolType::from(type_str);
// 5th line
let rc = (4usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let s = csv_str(&record, rc.1).ok_or(rc)?;
let max_read_register_count: u16 = if s.is_empty() {
DEFAULT_MAX_COUNT_READ_NUMERIC
} else {
s.parse().map_err(|_| rc)?
};
// 6th line
let rc = (5usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let s = csv_str(&record, rc.1).ok_or(rc)?;
let max_read_bit_count: u16 = if s.is_empty() {
DEFAULT_MAX_COUNT_READ_BIT
} else {
s.parse().map_err(|_| rc)?
};
// 7th line
let rc = (6usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let s = csv_str(&record, rc.1).ok_or(rc)?;
let max_write_register_count: u16 = if s.is_empty() {
DEFAULT_MAX_COUNT_WRITE_REGISTERS
} else {
s.parse().map_err(|_| rc)?
};
// 8th line
let rc = (7usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let s = csv_str(&record, rc.1).ok_or(rc)?;
let max_write_bit_count: u16 = if s.is_empty() {
DEFAULT_MAX_COUNT_WRITE_COILS
} else {
s.parse().map_err(|_| rc)?
};
// 9th line
let rc = (8usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let s = csv_str(&record, rc.1).ok_or(rc)?;
let default_polling_period_in_milli: u64 = if s.is_empty() {
DEFAULT_POLLING_PERIOD_IN_MILLI
} else {
s.parse().map_err(|_| rc)?
};
// 10th line
let rc = (9usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let s = csv_str(&record, rc.1).ok_or(rc)?;
let (timeout_in_milli, delay_between_requests) = if s.is_empty() {
(DEFAULT_TIMEOUT_IN_MILLI, 0u64)
} else {
let times: Vec<&str> = s.split(';').collect();
if times.len() == 2 {
(
times[0].parse().map_err(|_| rc)?,
times[1].parse().map_err(|_| rc)?,
)
} else if times.len() == 1 {
(times[0].parse().map_err(|_| rc)?, 0u64)
} else {
(DEFAULT_TIMEOUT_IN_MILLI, 0u64)
}
};
// 11th line
let rc = (10usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let s = csv_str(&record, rc.1).ok_or(rc)?;
let point_id: u64 = if s.is_empty() {
UNKNOWN_POINT_ID
} else {
s.parse().map_err(|_| rc)?
};
// 12th line
let rc = (11usize, 1 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
let (coil_write_code, holding_write_code) = if let Some(codes) = csv_str(&record, rc.1) {
let codes: Vec<&str> = codes.split(';').collect();
if codes.len() == 2 {
let c1 = if let Ok(code) = codes[0].parse::<u8>() {
if code != 0x05 && code != 0x0F {
None
} else {
Some(code)
}
} else {
None
};
let c2 = if let Ok(code) = codes[1].parse::<u8>() {
if code != 0x06 && code != 0x10 {
None
} else {
Some(code)
}
} else {
None
};
(c1, c2)
} else {
(None, None)
}
} else {
(None, None)
};
// 13th line ...
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.from_reader(content);
let mut records = rdr.records();
let rc = (0, 3 + offset);
records.next().ok_or(rc)?.map_err(|_| rc)?;
let mut mb_data_configure: Vec<RegisterData> = Vec::with_capacity(point_num);
for row in 1..=point_num {
let rc = (row, 3 + offset);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
mb_data_configure.push(RegisterData::parse_register_data(&record, rc.0, rc.1)?);
}
let mut conn = MbConnection {
slave_id,
name,
protocol_type,
max_read_register_count,
max_read_bit_count,
max_write_register_count,
max_write_bit_count,
timeout_in_milli,
delay_between_requests,
point_id,
default_polling_period_in_milli,
mb_data_configure,
coil_write_code,
holding_write_code,
..Default::default()
};
conn.create_data_config().map_err(|(r, c, _)|(r, c + offset))?;
Ok(conn)
}
pub fn get_point_ids(&self) -> Vec<u64> {
let mut size = 0;
for conn in &self.connections {
size += conn.mb_data_configure.len();
size += 1;
}
let mut r: Vec<u64> = Vec::with_capacity(size);
for conn in &self.connections {
for rd in &conn.mb_data_configure {
r.push(rd.point_id)
}
if conn.point_id != UNKNOWN_POINT_ID {
r.push(conn.point_id);
}
}
r
}
// 导出 Modbus Tcp客户端 文件内容
pub fn export_csv(&self, text_map: &HashMap<String, String>) -> String {
if self.connections.len() == 1 {
// 第一排
let mut result = format!("{},{},,",
text_map.get("tp_name").unwrap_or(&"Transport Name".to_string()),
get_csv_str(&self.name));
result += &format!(
"{},{},{},{},{},{},{},{},{},\n",
text_map.get("conn_name").unwrap_or(&"Connection Name".to_string()),
get_csv_str(&self.connections[0].name),
text_map.get("index").unwrap_or(&"Index".to_string()),
text_map.get("register_type").unwrap_or(&"Register Type".to_string()),
text_map.get("start_addr").unwrap_or(&"Start Address".to_string()),
text_map.get("data_type").unwrap_or(&"Data Type".to_string()),
text_map.get("new_request_flag").unwrap_or(&"New Request".to_string()),
text_map.get("register_period_name").unwrap_or(&"Polling Period(ms)".to_string()),
text_map.get("tp_point_id").unwrap_or(&"Point ID".to_string()),
);
// 第二至十二排
let title_conn = vec![
text_map.get("point_number").unwrap_or(&"Point Count".to_string()).clone(),
"Slave ID".to_string(),
text_map.get("protocol").unwrap_or(&"Protocol".to_string()).clone(),
text_map.get("max_rrc").unwrap_or(&"Max Read Register Count".to_string()).clone(),
text_map.get("max_rbc").unwrap_or(&"Max Read Bit Count".to_string()).clone(),
text_map.get("max_wrc").unwrap_or(&"Max Write Register Count".to_string()).clone(),
text_map.get("max_wbc").unwrap_or(&"Max Write Bit Count".to_string()).clone(),
text_map.get("register_period_name").unwrap_or(&"Polling Period(ms)".to_string()).clone(),
text_map.get("timeout_delay_ms").unwrap_or(&"Timeout;Delay(Optional)(ms)".to_string()).clone(),
text_map.get("status_point_id").unwrap_or(&"Status Point ID".to_string()).clone(),
text_map.get("coil_holding_code").unwrap_or(&"Coil/Holding Code".to_string()).clone(),
];
let title_tp = vec![
format!(
"{},{},",
text_map.get("conn_num").unwrap_or(&"Connection Count".to_string()),
self.connections.len()
),
format!(
"{},{},",
text_map.get("server_ip").unwrap_or(&"Server IP".to_string()),
self.tcp_server.0
),
format!(
"{},{},",
text_map.get("server_port").unwrap_or(&"Server Port".to_string()),
self.tcp_server.1
),
",,".to_string(),
",,".to_string(),
",,".to_string(),
",,".to_string(),
",,".to_string(),
",,".to_string(),
",,".to_string(),
",,".to_string(),
];
let conn_len = if self.connections.is_empty() {
0
} else {
self.connections[0].mb_data_configure.len()
};
for cnt in 0..11 {
result += &title_tp[cnt];
if conn_len > cnt {
let r = &self.connections[0].mb_data_configure[cnt];
let content_conn = self.connections[0].create_csv_row(cnt);
result += &format!(
",{},{},{},{},{},{},{},{},{},",
title_conn[cnt],
content_conn,
cnt + 1,
r.register_type,
r.from,
r.data_type,
r.should_new_request.to_string().to_uppercase(),
r.polling_period_in_milli,
r.point_id
);
} else {
let content_conn = self.connections[0].create_csv_row(cnt);
result += &format!(",{},{},,,,,,,,", title_conn[cnt], content_conn);
}
result += "\n";
}
// 剩余的
for row in 11..conn_len {
result += ",,,";
if conn_len > row {
let r = &self.connections[0].mb_data_configure[row];
result += &format!(
",,{},{},{},{},{},{},{},",
row + 1,
r.register_type,
r.from,
r.data_type,
r.should_new_request.to_string().to_uppercase(),
r.polling_period_in_milli,
r.point_id
);
} else {
result += ",,,,,,,,,,,";
}
result += "\n";
}
result
} else {
let len_conn = self.connections.len();
// 第一排
let mut result = format!("{},{},,",
text_map.get("tp_name").unwrap_or(&"Transport Name".to_string()),
get_csv_str(&self.name));
for i in 0..len_conn {
result += &format!(
"{},{},{},{},{},{},{},{},{}",
text_map.get("conn_name").unwrap_or(&"Connection Name".to_string()),
get_csv_str(&self.connections[i].name),
text_map.get("index").unwrap_or(&"Index".to_string()),
text_map.get("register_type").unwrap_or(&"Register Type".to_string()),
text_map.get("start_addr").unwrap_or(&"Start Address".to_string()),
text_map.get("data_type").unwrap_or(&"Data Type".to_string()),
text_map.get("new_request_flag").unwrap_or(&"New Request".to_string()),
text_map.get("register_period_name").unwrap_or(&"Polling Period(ms)".to_string()),
text_map.get("tp_point_id").unwrap_or(&"Point ID".to_string()),
);
if i != len_conn - 1 {
result += ",,";
}
}
result += "\n";
// 第二至十二排
let title_conn = vec![
text_map.get("point_number").unwrap_or(&"Point Count".to_string()).clone(),
"Slave ID".to_string(),
text_map.get("protocol").unwrap_or(&"Protocol".to_string()).clone(),
text_map.get("max_rrc").unwrap_or(&"Max Read Register Count".to_string()).clone(),
text_map.get("max_rbc").unwrap_or(&"Max Read Bit Count".to_string()).clone(),
text_map.get("max_wrc").unwrap_or(&"Max Write Register Count".to_string()).clone(),
text_map.get("max_wbc").unwrap_or(&"Max Write Bit Count".to_string()).clone(),
text_map.get("register_period_name").unwrap_or(&" Polling Period(ms)".to_string()).clone(),
text_map.get("timeout_delay_ms").unwrap_or(&"Timeout;Delay(Optional)(ms)".to_string()).clone(),
text_map.get("status_point_id").unwrap_or(&"Status Point ID".to_string()).clone(),
text_map.get("coil_holding_code").unwrap_or(&"Coil/Holding Code".to_string()).clone(),
];
let title_tp = vec![
format!(
"{},{},",
text_map.get("conn_num").unwrap_or(&"Connection Count".to_string()),
self.connections.len()
),
format!(
"{},{},",
text_map.get("server_ip").unwrap_or(&"Server IP".to_string()),
self.tcp_server.0
),
format!(
"{},{},",
text_map.get("server_port").unwrap_or(&"Server Port".to_string()),
self.tcp_server.1
),
",,".to_string(),
",,".to_string(),
",,".to_string(),
",,".to_string(),
",,".to_string(),
",,".to_string(),
",,".to_string(),
",,".to_string(),
];
for cnt in 0..11 {
result += &title_tp[cnt];
for i in 0..len_conn {
if self.connections[i].mb_data_configure.len() > cnt {
let r = &self.connections[i].mb_data_configure[cnt];
let content_conn = self.connections[i].create_csv_row(cnt);
result += &format!(
",{},{},{},{},{},{},{},{},{}",
title_conn[cnt],
content_conn,
cnt + 1,
r.register_type,
r.from,
r.data_type,
r.should_new_request.to_string().to_uppercase(),
r.polling_period_in_milli,
r.point_id
);
if i != len_conn - 1 {
result += ",";
}
} else {
let content_conn = self.connections[i].create_csv_row(cnt);
result += &format!(",{},{},,,,,,,,", title_conn[cnt], content_conn);
}
}
result += "\n";
}
// 剩余的
let mut max_data_len = if self.connections.is_empty() {
0
} else {
self.connections[0].mb_data_configure.len()
};
for c in &self.connections {
if c.mb_data_configure.len() > max_data_len {
max_data_len = c.mb_data_configure.len();
}
}
for row in 11..max_data_len {
result += ",,";
for i in 0..len_conn {
if self.connections[i].mb_data_configure.len() > row {
let r = &self.connections[i].mb_data_configure[row];
result += &format!(
",,{},{},{},{},{},{},{}",
row + 1,
r.register_type,
r.from,
r.data_type,
r.should_new_request.to_string().to_uppercase(),
r.polling_period_in_milli,
r.point_id
);
if i != len_conn - 1 {
result += ",";
}
} else {
result += ",,,,,,,,,,";
if i != len_conn - 1 {
result += ",";
}
}
}
result += "\n";
}
result
}
}
}
impl RegisterData {
fn parse_register_data(
record: &StringRecord,
row: usize,
first_col: usize,
) -> Result<Self, (usize, usize)> {
//let start: usize = 3 + offset;
let rc = (row, first_col);
let s = csv_str(record, rc.1).ok_or(rc)?;
let register_type = RegisterType::from_str(s).map_err(|_|rc)?;
let rc = (row, first_col + 1);
let from: u16 = csv_u16(record, rc.1).ok_or(rc)?;
let rc = (row, first_col + 2);
let s = csv_str(record, rc.1).ok_or(rc)?;
let data_type = DataType::from_str(s).map_err(|_| rc)?;
// 这里判断: coils和discrete只能是binary,input和holding不能是binary
match register_type {
RegisterType::COILS => {
if !matches!(data_type, DataType::Binary) {
return Err(rc);
}
}
RegisterType::DISCRETE => {
if !matches!(data_type, DataType::Binary) {
return Err(rc);
}
}
RegisterType::INPUT => {
if matches!(data_type, DataType::Binary) {
return Err(rc);
}
}
RegisterType::HOLDING => {
if matches!(data_type, DataType::Binary) {
return Err(rc);
}
}
}
// 是否必须新开一个请求
let rc = (row, first_col + 3);
let s = csv_str(record, rc.1).ok_or(rc)?.to_uppercase();
let should_new_request = match s.as_str() {
"FALSE" => false,
"TRUE" => true,
_ => false,
};
// 轮询周期
let rc = (row, first_col + 4);
let polling_period_in_milli = csv_u64(record, rc.1).ok_or(rc)?;
// 对应的测点Id
let rc = (row, first_col + 5);
let point_id = csv_u64(record, rc.1).ok_or(rc)?;
Ok(RegisterData {
register_type,
from,
data_type,
should_new_request,
polling_period_in_milli,
point_id,
})
}
}
impl ModbusTcpServerTp {
pub fn from_file(path: &str) -> Result<Self, (usize, usize)> {
let content = std::fs::read(path).map_err(|_| (0, 0))?;
// let content = if env::IS_ENCRYPT {
// decrypt_vec(content.as_slice())
// } else {
// content
// };
let csv_bytes = if path.ends_with(".xlsx") || path.ends_with(".xls") {
let r = excel_bytes_to_csv_bytes(content.as_slice()).unwrap_or_default();
if r.is_empty() {
return Err((0, 0));
}
r[0].clone()
} else {
content
};
Self::from_csv_bytes(csv_bytes.as_slice())
}
pub fn from_csv(path: &str) -> Result<ModbusTcpServerTp, (usize, usize)> {
let content = std::fs::read(path).map_err(|_| (0, 0))?;
// let content = if env::IS_ENCRYPT {
// let plain_t = decrypt_vec(content.as_slice());
// plain_t
// } else {
// content
// };
ModbusTcpServerTp::from_csv_bytes(content.as_slice())
}
pub fn from_csv_bytes(content: &[u8]) -> Result<ModbusTcpServerTp, (usize, usize)> {
let content_new = transfer_to_utf8(content.to_vec()).map_err(|_| (0, 0))?;
let content = content_new.as_slice();
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.from_reader(content);
let mut records = rdr.records();
let rc = (0usize, 1);
let name = csv_string(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let rc = (1usize, 1);
let conn_num: usize =
csv_usize(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let rc = (2usize, 1);
let tcp_server_port: u16 =
csv_u16(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let mut connections: Vec<(String, MbConnection)> = Vec::with_capacity(conn_num);
for i in 0..conn_num {
let mut tp = ModbusTcpClientTp::from_csv_records(content, i * 10 + 3)?;
let (client_ip, client_port) = tp.tcp_server;
if client_ip != "+" {
// 如果不是通配符
let rc = (2usize, i * 10 + 4);
client_ip.parse::<std::net::Ipv4Addr>().map_err(|_| rc)?;
}
// 检查具有相同ip的client是否配置一样
for (key, conn) in &connections {
// 这里连接名字和通道名称是一样的
if *key == format!("{}/{}/{}", client_ip, client_port, conn.name) {
// 地址配置必须一样
if *conn.mb_data_configure != tp.connections[0].mb_data_configure {
return Err((0, i * 10 + 3));
}
}
}
let port_str = client_port.to_string();
let port = if client_port == UNKNOWN_TCP_PORT {
"+"
} else {
port_str.as_str()
};
// 利用point_id简化多个client配置,测点号1-100预留,不允许设置
if tp.connections[0].point_id > 1
&& tp.connections[0].point_id <= DEFAULT_TCP_CLIENT_LIMIT as u64 {
let count = tp.connections[0].point_id;
for i in 1..count {
let key = format!("{}/{}/{}@{}", client_ip, port, tp.name, i);
let mut connection = tp.connections[0].clone();
connection.point_id = UNKNOWN_POINT_ID; // 多个通道共用一个配置,状态点号无需设置
connections.push((key, connection));
}
let key = format!("{}/{}/{}@{}", client_ip, port, tp.name, count);
tp.connections[0].point_id = UNKNOWN_POINT_ID;
connections.push((key, tp.connections.pop().unwrap()))
} else {
let key: String = format!("{}/{}/{}", client_ip, port, tp.name);
connections.push((key, tp.connections.pop().unwrap()))
}
}
Ok(ModbusTcpServerTp {
id: 0,
name,
tcp_server_port,
connections,
})
}
pub fn get_point_ids(&self) -> Vec<u64> {
let mut size = 0;
for (_, conn) in &self.connections {
size += conn.mb_data_configure.len()
}
size += self.connections.len();
let mut r = HashSet::with_capacity(size);
for (_, conn) in &self.connections {
if conn.point_id != UNKNOWN_POINT_ID {
r.insert(conn.point_id);
}
for rd in &conn.mb_data_configure {
r.insert(rd.point_id);
}
}
r.into_iter().collect()
}
pub fn export_csv(&self, text_map: &HashMap<String, String>) -> String {
let mut len_conn = 0;
let mut unkown_ip_map: HashMap<u32, usize> = HashMap::new();
for (s, _) in &self.connections {
if s.starts_with("+/") {
let info: Vec<&str> = s.split('/').collect();
if info.len() != 3 {
continue;
}
let port = if info[1] == "+" {
UNKNOWN_TCP_PORT
} else {
info[1].parse::<u32>().unwrap_or(UNKNOWN_TCP_PORT)
};
if let Some(multi_count) = unkown_ip_map.get_mut(&port) {
*multi_count += 1;
continue;
} else {
unkown_ip_map.insert(port, 1);
len_conn += 1;
}
} else {
len_conn += 1;
}
}
// 第一排
let mut result = format!("{},{},,",
text_map.get("tp_name").unwrap_or(&"Transport Name".to_string()),
get_csv_str(&self.name));
let mut i = 0;
let mut multi_found = false;
for (s, conn) in &self.connections {
result += &format!(
"{},{},{},{},{},{},{},{},{}",
text_map.get("conn_name").unwrap_or(&"Connection Name".to_string()),
get_csv_str(&conn.name),
text_map.get("index").unwrap_or(&"Index".to_string()),
text_map.get("register_type").unwrap_or(&"Register Type".to_string()),
text_map.get("start_addr").unwrap_or(&"Start Address".to_string()),
text_map.get("data_type").unwrap_or(&"Data Type".to_string()),
text_map.get("new_request_flag").unwrap_or(&"New Request".to_string()),
text_map.get("register_period_name").unwrap_or(&"Polling Period(ms)".to_string()),
text_map.get("tp_point_id").unwrap_or(&"Point ID".to_string()),
);
if i != len_conn - 1 {
result += ",,";
} else {
break;
}
if s.starts_with("+/") {
if multi_found {
continue;
} else {
multi_found = true;
i += 1;
}
} else {
i += 1;
}
}
result += "\n";
// 第二至十三排
let title_conn = vec![
text_map.get("point_number").unwrap_or(&"Point Count".to_string()).clone(),
text_map.get("client_ip").unwrap_or(&"Client IP".to_string()).clone(),
text_map.get("client_port").unwrap_or(&"Client Port".to_string()).clone(),
"Slave ID".to_string(),
text_map.get("protocol_type").unwrap_or(&"Protocol Type".to_string()).clone(),
text_map.get("max_rrc").unwrap_or(&"Max Read Register Count".to_string()).clone(),
text_map.get("max_rbc").unwrap_or(&"Max Read Bit Count".to_string()).clone(),
text_map.get("max_wrc").unwrap_or(&"Max Write Register Count".to_string()).clone(),
text_map.get("max_wbc").unwrap_or(&"Max Write Bit Count".to_string()).clone(),
text_map.get("register_period_name").unwrap_or(&"Polling Period(ms)".to_string()).clone(),
text_map.get("timeout_ms").unwrap_or(&"Timeout(ms)".to_string()).clone(),
text_map.get("tp_point_id").unwrap_or(&"Point ID".to_string()).clone(),
];
let title_tp = vec![
format!("{},{},", text_map.get("conn_num").unwrap_or(&"Connection Count".to_string()), len_conn),
format!(
"{},{},",
text_map.get("server_port").unwrap_or(&"Server Port".to_string()),
self.tcp_server_port
),
",,".to_string(),
",,".to_string(),
",,".to_string(),
",,".to_string(),
",,".to_string(),
",,".to_string(),
",,".to_string(),
",,".to_string(),
",,".to_string(),
",,".to_string(),
];
for cnt in 0..12 {
result += &title_tp[cnt];
let mut i = 0;
let mut multi_found = HashSet::with_capacity(unkown_ip_map.len());
for conn in &self.connections {
if conn.0.starts_with("+/") {
let info: Vec<&str> = conn.0.split('/').collect();
if info.len() != 3 {
continue;
}
let port = if info[1] == "+" {
UNKNOWN_TCP_PORT
} else {
info[1].parse::<u32>().unwrap_or(UNKNOWN_TCP_PORT)
};
if multi_found.contains(&port) {
continue;
} else {
multi_found.insert(port);
i += 1;
}
} else {
i += 1;
}
if conn.1.mb_data_configure.len() > cnt {
let r = &conn.1.mb_data_configure[cnt];
let content_conn = if cnt == 11 && conn.0.starts_with("+/") {
let info: Vec<&str> = conn.0.split('/').collect();
if info.len() != 3 {
continue;
}
let port = if info[1] == "+" {
UNKNOWN_TCP_PORT
} else {
info[1].parse::<u32>().unwrap_or(UNKNOWN_TCP_PORT)
};
unkown_ip_map.get(&port).unwrap_or(&0).to_string()
} else {
Self::get_mbd_conn_csv(conn, cnt)
};
result += &format!(
",{},{},{},{},{},{},{},{},{}",
title_conn[cnt],
content_conn,
cnt + 1,
r.register_type,
r.from,
r.data_type,
r.should_new_request.to_string().to_uppercase(),
r.polling_period_in_milli,
r.point_id
);
} else {
let content_conn = if cnt == 11 && conn.0.starts_with("+/") {
let info: Vec<&str> = conn.0.split('/').collect();
if info.len() != 3 {
continue;
}
let port = if info[1] == "+" {
UNKNOWN_TCP_PORT
} else {
info[1].parse::<u32>().unwrap_or(UNKNOWN_TCP_PORT)
};
unkown_ip_map.get(&port).unwrap_or(&0).to_string()
} else {
Self::get_mbd_conn_csv(conn, cnt)
};
result += &format!(",{},{},,,,,,,", title_conn[cnt], content_conn);
}
if i != len_conn {
result += ",";
} else {
break;
}
}
result += "\n";
}
// 剩余的
let mut max_data_len = if self.connections.is_empty() {
0
} else {
self.connections[0].1.mb_data_configure.len()
};
for c in &self.connections {
if c.1.mb_data_configure.len() > max_data_len {
max_data_len = c.1.mb_data_configure.len();
}
}
if max_data_len < 12 {
result += ",,,,,,,,,,,";
}
for row in 12..max_data_len {
result += ",,,,,";
let mut i = 0;
let mut multi_found = HashSet::with_capacity(unkown_ip_map.len());
for (s, conn) in &self.connections {
if s.starts_with("+/") {
let info: Vec<&str> = s.split('/').collect();
if info.len() != 3 {
continue;
}
let port = if info[1] == "+" {
UNKNOWN_TCP_PORT
} else {
info[1].parse::<u32>().unwrap_or(UNKNOWN_TCP_PORT)
};
if multi_found.contains(&port) {
continue;
} else {
multi_found.insert(port);
i += 1;
}
} else {
i += 1;
}
if conn.mb_data_configure.len() > row {
let r = &conn.mb_data_configure[row];
result += &format!(
"{},{},{},{},{},{},{}",
row + 1,
r.register_type,
r.from,
r.data_type,
r.should_new_request.to_string().to_uppercase(),
r.polling_period_in_milli,
r.point_id
);
} else {
result += ",,,,,,,,,";
}
if i != len_conn {
result += ",";
} else {
break;
}
}
result += "\n";
}
result
}
fn get_mbd_conn_csv(conn: &(String, MbConnection), index: usize) -> String {
return match index {
0 => conn.1.mb_data_configure.len().to_string(),
1 => {
let info: Vec<&str> = conn.0.split('/').collect();
if info.len() != 3 {
return "".to_string();
}
info[0].to_string()
}
2 => {
let info: Vec<&str> = conn.0.split('/').collect();
if info.len() != 3 {
return "".to_string();
}
if info[1] == "+" {
UNKNOWN_TCP_PORT.to_string()
} else {
info[1].to_string()
}
}
3 => conn.1.slave_id.to_string(),
4 => conn.1.protocol_type.to_string(),
5 => conn.1.max_read_register_count.to_string(),
6 => conn.1.max_read_bit_count.to_string(),
7 => conn.1.max_write_register_count.to_string(),
8 => conn.1.max_write_bit_count.to_string(),
9 => conn.1.default_polling_period_in_milli.to_string(),
10 => conn.1.timeout_in_milli.to_string(),
11 => conn.1.point_id.to_string(),
_ => "".to_string(),
};
}
}
impl ModbusRtuClientTp {
pub fn from_file(path: &str) -> Result<Self, (usize, usize)> {
let content = std::fs::read(path).map_err(|_| (0, 0))?;
// let content = if env::IS_ENCRYPT {
// decrypt_vec(content.as_slice())
// } else {
// content
// };
let csv_bytes = if path.ends_with(".xlsx") || path.ends_with(".xls") {
let r = excel_bytes_to_csv_bytes(content.as_slice()).unwrap_or_default();
if r.is_empty() {
return Err((0, 0));
}
r[0].clone()
} else {
content
};
Self::from_csv_bytes(csv_bytes.as_slice())
}
pub fn from_csv(path: &str) -> Result<ModbusRtuClientTp, (usize, usize)> {
let content = std::fs::read(path).map_err(|_| (0, 0))?;
// let content = if env::IS_ENCRYPT {
// let plain_t = decrypt_vec(content.as_slice());
// plain_t
// } else {
// content
// };
ModbusRtuClientTp::from_csv_bytes(content.as_slice())
}
pub fn from_csv_bytes(content: &[u8]) -> Result<ModbusRtuClientTp, (usize, usize)> {
let content_new = transfer_to_utf8(content.to_vec()).map_err(|_| (0, 0))?;
let content = content_new.as_slice();
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.from_reader(content);
let mut records = rdr.records();
let rc = (0usize, 1);
let name = csv_string(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let rc = (1usize, 1);
let conn_num: usize =
csv_usize(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let rc = (2usize, 1);
let baud_rate = csv_u32(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let rc = (3usize, 1);
let file_path = csv_string(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
#[cfg(target_family = "unix")]
let file_path = if PathBuf::from(file_path.clone()).is_relative() {
"/dev/".to_string() + file_path.as_str()
} else {
file_path
};
// 下面三行是可选的
// 第5行
let record = records.next();
let data_bits = if let Some(Ok(tmp)) = record {
if let Some(v) = csv_u8(&tmp, 1) {
v
} else {
8 // 默认是8
}
} else {
8
};
// 第6行
let record = records.next();
let stop_bits = if let Some(Ok(tmp)) = record {
if let Some(v) = csv_u8(&tmp, 1) {
v
} else {
1 // 默认是1
}
} else {
1
};
// 第7行
let record = records.next();
let parity = if let Some(Ok(tmp)) = record {
if let Some(v) = csv_str(&tmp, 1) {
create_parity(v)
} else {
SerialParity::None
}
} else {
SerialParity::None
};
// 第8行
// 3.5个字符,每个字符 1起始+8+1校验(或0个)+1结尾 38.5个字符
let mut delay_between_requests = (38500. / (baud_rate as f64)).ceil() as u64;
if delay_between_requests == 0 {
delay_between_requests = DEFAULT_DELAY_BETWEEN_REQUESTS;
}
let record = records.next();
if let Some(Ok(tmp)) = record {
if let Some(v) = csv_u64(&tmp, 1) {
if v > 0 {
delay_between_requests = v;
}
}
}
let mut connections: Vec<MbConnection> = Vec::with_capacity(conn_num);
for i in 0..conn_num {
let mut tp = ModbusTcpClientTp::from_csv_records(content, i * 10 + 3)?;
connections.push(tp.connections.pop().unwrap());
}
let para: SerialPara = SerialPara {
file_path,
baud_rate,
data_bits,
stop_bits,
parity,
delay_between_requests,
};
Ok(ModbusRtuClientTp {
id: 0,
name,
para,
connections,
})
}
pub fn export_csv(&self, text_map: &HashMap<String, String>) -> String {
let len_conn = self.connections.len();
// 第一排
let mut result = format!("{},{},,",
text_map.get("tp_name").unwrap_or(&"Transport Name".to_string()),
get_csv_str(&self.name));
for i in 0..len_conn {
result += &format!(
"{},{},{},{},{},{},{},{},{}",
text_map.get("conn_name").unwrap_or(&"Connection Name".to_string()),
get_csv_str(&self.connections[i].name),
text_map.get("index").unwrap_or(&"Index".to_string()),
text_map.get("register_type").unwrap_or(&"Register Type".to_string()),
text_map.get("start_addr").unwrap_or(&"Start Address".to_string()),
text_map.get("data_type").unwrap_or(&"Data Type".to_string()),
text_map.get("new_request_flag").unwrap_or(&"New Request".to_string()),
text_map.get("register_period_name").unwrap_or(&"Polling Period(ms)".to_string()),
text_map.get("tp_point_id").unwrap_or(&"Point ID".to_string()),
);
if i != len_conn - 1 {
result += ",,";
}
}
result += "\n";
// 第二至十四排
let title_conn = vec![
text_map.get("point_number").unwrap_or(&"Point Count".to_string()).clone(),
text_map.get("desc").unwrap_or(&"Description".to_string()).clone(),
text_map.get("priority").unwrap_or(&"Priority".to_string()).clone(),
"Slave ID".to_string(),
text_map.get("protocol_type").unwrap_or(&"Protocol Type".to_string()).clone(),
text_map.get("max_rrc").unwrap_or(&"Max Read Register Count".to_string()).clone(),
text_map.get("max_rbc").unwrap_or(&"Max Read Bit Count".to_string()).clone(),
text_map.get("max_wrc").unwrap_or(&"Max Write Register Count".to_string()).clone(),
text_map.get("max_wbc").unwrap_or(&"Max Write Bit Count".to_string()).clone(),
text_map.get("register_period_name").unwrap_or(&"Polling Period(ms)".to_string()).clone(),
text_map.get("timeout_ms").unwrap_or(&"Timeout(ms)".to_string()).clone(),
text_map.get("tp_point_id").unwrap_or(&"Point ID".to_string()).clone(),
text_map.get("coil_holding_code").unwrap_or(&"Coil/Holding Code".to_string()).clone(),
];
let title_tp = vec![
format!(
"{},{},",
text_map.get("conn_num").unwrap_or(&"Connection Count".to_string()),
self.connections.len()
),
format!(
"{},{},",
text_map.get("baud_rate").unwrap_or(&"Baud Rate".to_string()),
self.para.baud_rate
),
format!(
"{},{},",
text_map.get("file_path").unwrap_or(&"File Path".to_string()),
self.para.file_path
),
format!(
"{},{},",
text_map.get("data_bits_op").unwrap_or(&"Data Bits".to_string()),
self.para.data_bits
),
format!(
"{},{},",
text_map.get("stop_bits_op").unwrap_or(&"Stop Bits".to_string()),
self.para.stop_bits
),
format!(
"{},{:?},",
text_map.get("parity_op").unwrap_or(&"Parity".to_string()),
self.para.parity
)
.to_uppercase(),
format!(
"{},{},",
text_map.get("delay_time_op").unwrap_or(&"Delay Time(ms)".to_string()),
self.para.delay_between_requests
),
",,".to_string(),
",,".to_string(),
",,".to_string(),
",,".to_string(),
",,".to_string(),
",,".to_string(),
];
for cnt in 0..13 {
result += &title_tp[cnt];
for i in 0..len_conn {
if self.connections[i].mb_data_configure.len() > cnt {
let r = &self.connections[i].mb_data_configure[cnt];
let content_conn = Self::get_rtu_mbc_conn_csv(&self.connections[i], cnt);
result += &format!(
",{},{},{},{},{},{},{},{},{}",
title_conn[cnt],
content_conn,
cnt + 1,
r.register_type,
r.from,
r.data_type,
r.should_new_request.to_string().to_uppercase(),
r.polling_period_in_milli,
r.point_id
);
if i != len_conn - 1 {
result += ",";
}
} else {
let content_conn = Self::get_rtu_mbc_conn_csv(&self.connections[i], cnt);
result += &format!(",{},{},,,,,,,", title_conn[cnt], content_conn);
if i != len_conn - 1 {
result += ",";
}
}
}
result += "\n";
}
// 剩余的
let mut max_data_len = if self.connections.is_empty() {
0
} else {
self.connections[0].mb_data_configure.len()
};
for c in &self.connections {
if c.mb_data_configure.len() > max_data_len {
max_data_len = c.mb_data_configure.len();
}
}
if max_data_len < 13 {
result += ",,,,,,,,,,,";
}
for row in 13..max_data_len {
//如果Data Type输出完了但测点寄存器还有
result += ",,,,";
for i in 0..len_conn {
if self.connections[i].mb_data_configure.len() > row {
let r = &self.connections[i].mb_data_configure[row];
result += &format!(
",{},{},{},{},{},{},{}",
row + 1,
r.register_type,
r.from,
r.data_type,
r.should_new_request.to_string().to_uppercase(),
r.polling_period_in_milli,
r.point_id
);
if i != len_conn - 1 {
result += ",";
}
} else {
result += ",,,,,,,,,";
if i != len_conn - 1 {
result += ",";
}
}
}
result += "\n";
}
result
}
fn get_rtu_mbc_conn_csv(conn: &MbConnection, index: usize) -> String {
let mut ch_code = ";".to_string();
if let Some(c) = conn.coil_write_code {
ch_code = format!("{};", c);
}
if let Some(h) = conn.holding_write_code {
ch_code += h.to_string().as_str();
}
match index {
0 => conn.mb_data_configure.len().to_string(),
1 => "描述".to_string(),
2 => "1".to_string(),
3 => conn.slave_id.to_string(),
4 => conn.protocol_type.to_string().to_uppercase(),
5 => conn.max_read_register_count.to_string(),
6 => conn.max_read_bit_count.to_string(),
7 => conn.max_write_register_count.to_string(),
8 => conn.max_write_bit_count.to_string(),
9 => conn.default_polling_period_in_milli.to_string(),
10 => conn.timeout_in_milli.to_string(),
11 => conn.point_id.to_string(),
12 => ch_code,
_ => "unknown".to_string(),
}
}
pub fn get_point_ids(&self) -> Vec<u64> {
let mut size = 0;
for conn in &self.connections {
size += conn.mb_data_configure.len()
}
size += self.connections.len();
let mut r: Vec<u64> = Vec::with_capacity(size);
for conn in &self.connections {
for rd in &conn.mb_data_configure {
r.push(rd.point_id)
}
if conn.point_id != UNKNOWN_POINT_ID {
r.push(conn.point_id);
}
}
r
}
}
impl ModbusRtuServerTp {
pub fn from_file(path: &str) -> Result<Self, (usize, usize)> {
let content = std::fs::read(path).map_err(|_| (0, 0))?;
// let content = if env::IS_ENCRYPT {
// decrypt_vec(content.as_slice())
// } else {
// content
// };
let csv_bytes = if path.ends_with(".xlsx") || path.ends_with(".xls") {
let r = excel_bytes_to_csv_bytes(content.as_slice()).unwrap_or_default();
if r.is_empty() {
return Err((0, 0));
}
r[0].clone()
} else {
content
};
Self::from_csv_bytes(csv_bytes.as_slice())
}
pub fn from_csv(path: &str) -> Result<ModbusRtuServerTp, (usize, usize)> {
let content = std::fs::read(path).map_err(|_| (0, 0))?;
// let content = if env::IS_ENCRYPT {
// let plain_t = decrypt_vec(content.as_slice());
// plain_t
// } else {
// content
// };
ModbusRtuServerTp::from_csv_bytes(content.as_slice())
}
pub fn from_csv_bytes(content: &[u8]) -> Result<ModbusRtuServerTp, (usize, usize)> {
let content_new = transfer_to_utf8(content.to_vec()).map_err(|_| (0, 0))?;
let content = content_new.as_slice();
let mut tp = ModbusTcpClientTp::from_csv_records(content, 0)?;
// 获取串口参数
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.from_reader(content);
let mut records = rdr.records().skip(13);
// 第14行
let record = records.next();
let data_bits = if let Some(Ok(tmp)) = record {
if let Some(v) = csv_u8(&tmp, 1) {
v
} else {
8 // 默认是8
}
} else {
8
};
// 第15行
let record = records.next();
let stop_bits = if let Some(Ok(tmp)) = record {
if let Some(v) = csv_u8(&tmp, 1) {
v
} else {
1 // 默认是1
}
} else {
1
};
// 第16行
let record = records.next();
let parity = if let Some(Ok(tmp)) = record {
if let Some(v) = csv_str(&tmp, 1) {
create_parity(v)
} else {
SerialParity::None
}
} else {
SerialParity::None
};
let para: SerialPara = SerialPara {
file_path: tp.tcp_server.0,
baud_rate: tp.tcp_server.1,
data_bits,
stop_bits,
parity,
delay_between_requests: 0,
};
Ok(ModbusRtuServerTp {
id: 0,
name: tp.name,
para,
connection: tp.connections.pop().unwrap(),
})
}
pub fn export_csv(&self, text_map: &HashMap<String, String>) -> String {
let title = vec![
text_map.get("point_number").unwrap_or(&"Point Count".to_string()).clone(),
text_map.get("file_path").unwrap_or(&"File Path".to_string()).clone(),
text_map.get("baud_rate").unwrap_or(&"Baud Rate".to_string()).clone(),
"Slave ID".to_string(),
text_map.get("protocol_type").unwrap_or(&"Protocol Type".to_string()).clone(),
text_map.get("max_rrc").unwrap_or(&"Max Read Register Count".to_string()).clone(),
text_map.get("max_rbc").unwrap_or(&"Max Read Bit Count".to_string()).clone(),
text_map.get("max_wrc").unwrap_or(&"Max Write Register Count".to_string()).clone(),
text_map.get("max_wbc").unwrap_or(&"Max Write Bit Count".to_string()).clone(),
text_map.get("register_period_name").unwrap_or(&"Polling Period(ms)".to_string()).clone(),
text_map.get("timeout_ms").unwrap_or(&"Timeout(ms)".to_string()).clone(),
text_map.get("tp_point_id").unwrap_or(&"Point ID".to_string()).clone(),
text_map.get("data_bits_op").unwrap_or(&"Data Bits".to_string()).clone(),
text_map.get("stop_bits_op").unwrap_or(&"Stop Bits".to_string()).clone(),
text_map.get("parity_op").unwrap_or(&"Parity".to_string()).clone(),
];
let content = vec![
format!("{}", self.connection.mb_data_configure.len()),
format!("{}", self.para.file_path),
format!("{}", self.para.baud_rate),
format!("{}", self.connection.slave_id),
"RTU".to_string(),
format!("{}", self.connection.max_read_register_count),
format!("{}", self.connection.max_read_bit_count),
format!("{}", self.connection.max_write_register_count),
format!("{}", self.connection.max_write_bit_count),
format!("{}", self.connection.default_polling_period_in_milli),
format!("{}", self.connection.timeout_in_milli),
format!("{}", self.connection.point_id),
format!("{}", self.para.data_bits),
format!("{}", self.para.stop_bits),
format!("{:?}", self.para.parity),
];
let mut result = format!(
"{},{},{},{},{},{},{},{},{}\n",
text_map.get("conn_name").unwrap_or(&"Connection Name".to_string()),
get_csv_str(&self.name),
text_map.get("index").unwrap_or(&"Index".to_string()),
text_map.get("register_type").unwrap_or(&"Register Type".to_string()),
text_map.get("start_addr").unwrap_or(&"Start Address".to_string()),
text_map.get("data_type").unwrap_or(&"Data Type".to_string()),
text_map.get("new_request_flag").unwrap_or(&"New Request".to_string()),
text_map.get("register_period_name").unwrap_or(&"Polling Period(ms)".to_string()),
text_map.get("register_point_id").unwrap_or(&"Point ID".to_string()),
).to_string();
let p = &self.connection.mb_data_configure;
for i in 0_usize..15_usize {
if p.len() > i {
let bool_status = if p[i].should_new_request {
"TRUE"
} else {
"FALSE"
};
result += &format!(
"{},{},{},{},{},{},{},{},{}\n",
title[i],
content[i],
i + 1,
p[i].register_type,
p[i].from,
p[i].data_type,
bool_status,
p[i].polling_period_in_milli,
p[i].point_id
);
} else {
result += &format!("{},{},,,,,,,\n", title[i], content[i]);
}
}
if p.len() > 15 {
let mut index = 15_usize;
while index < p.len() {
let bool_status = if p[index].should_new_request {
"TRUE"
} else {
"FALSE"
};
result += &format!(
",,{},{},{},{},{},{},{}\n",
index + 1,
p[index].register_type,
p[index].from,
p[index].data_type,
bool_status,
p[index].polling_period_in_milli,
p[index].point_id
);
index += 1;
}
}
result
}
pub fn get_point_ids(&self) -> Vec<u64> {
let size = self.connection.mb_data_configure.len() + 1;
let mut r: Vec<u64> = Vec::with_capacity(size);
for rd in &self.connection.mb_data_configure {
r.push(rd.point_id)
}
if self.connection.point_id != UNKNOWN_POINT_ID {
r.push(self.connection.point_id);
}
r
}
}
pub fn get_register_count(d: &RegisterData) -> u16 {
let count = d.data_type.get_byte_count();
if count > 1 {
count / 2
} else {
count
}
}
\ No newline at end of file
use std::collections::{BTreeMap, HashMap, HashSet};
use serde::{Deserialize, Serialize};
use serde_json::{Map, Number, Value};
use crate::excel::{excel_bytes_to_csv_bytes, transfer_to_utf8};
use crate::{csv_str, csv_string, csv_u16, csv_u64, csv_usize, get_csv_str};
/**
* @api {Mqtt通道信息} /MqttTransport MqttTransport
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u64} id 通道id
* @apiSuccess {String} name 通道名称
* @apiSuccess {tuple} mqtt_broker 服务端的ip和por,tuple格式为(ip:String, port:u16)
* @apiSuccess {u64} point_id 通道状态对应的测点号
* @apiSuccess {tuple[]} point_ids 通过mqtt读写的测点,数组,tuple格式为(u64, bool)
* @apiSuccess {String} read_topic 读测点的主题
* @apiSuccess {String} write_topic 写测点的主题
* @apiSuccess {bool} is_json 是否是json编码格式,默认是false,表示protobuf格式
* @apiSuccess {String} [user_name] 用户名,可选
* @apiSuccess {String} [user_password] 密码,可选
*/
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone, Default)]
pub struct MqttTransport {
pub id: u64,
/// 通道名称
pub name: String,
/// 服务端的ip和por
pub mqtt_broker: (String, u16),
/// 通道状态对应的测点号
pub point_id: u64,
/// 通过mqtt读写的测点
pub point_ids: Vec<(u64, bool)>,
/// 读测点的主题
pub read_topic: String,
/// 写测点的主题
pub write_topic: String,
/// 编码格式,默认是protobuf
pub is_json: bool,
/// 是否转发通道
pub is_transfer: bool,
/// 心跳时间
pub keep_alive: Option<u16>,
/// 用户名,可选
pub user_name: Option<String>,
/// 用户密码,可选
pub user_password: Option<String>,
/// json格式过滤器
pub json_filters: Option<Vec<Vec<String>>>,
/// json测点对应的数据标识, key是过滤器对应Array的json字符串,value是标识以及测点的索引
pub json_tags: Option<HashMap<String, HashMap<String, usize>>>,
/// json写测点模板
pub json_write_template: Option<HashMap<u64, String>>,
/// json写测点模板
pub json_write_tag: Option<HashMap<u64, String>>,
}
impl MqttTransport {
pub fn from_file(path: &str) -> Result<Self, (usize, usize)> {
let content = std::fs::read(path).map_err(|_| (0, 0))?;
// let content = if env::IS_ENCRYPT {
// decrypt_vec(content.as_slice())
// } else {
// content
// };
let csv_bytes = if path.ends_with(".xlsx") || path.ends_with(".xls") {
let r = excel_bytes_to_csv_bytes(content.as_slice()).unwrap_or_default();
if r.is_empty() {
return Err((0, 0));
}
r[0].clone()
} else {
content
};
Self::from_csv_bytes(csv_bytes.as_slice())
}
pub fn from_csv(path: &str) -> Result<MqttTransport, (usize, usize)> {
let content = std::fs::read(path).map_err(|_| (0, 0))?;
// let content = if env::IS_ENCRYPT {
// let content = decrypt_vec(content.as_slice());
// content
// } else {
// content
// };
MqttTransport::from_csv_bytes(content.as_slice())
}
pub fn from_csv_bytes(content: &[u8]) -> Result<MqttTransport, (usize, usize)> {
let content_new = transfer_to_utf8(content.to_vec()).map_err(|_| (0, 0))?;
let content = content_new.as_slice();
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.from_reader(content);
let mut records = rdr.records();
let rc = (0usize, 1);
let name = csv_string(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let rc = (1usize, 1);
let broker_ip = csv_string(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let rc = (2usize, 1);
let broker_port = csv_u16(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let mqtt_broker = (broker_ip, broker_port);
let rc = (3usize, 1);
let point_num = csv_usize(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let rc = (4usize, 1);
let point_id = csv_u64(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let rc = (5usize, 1);
let read_topic = csv_string(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
let rc = (6usize, 1);
let write_topic = csv_string(&records.next().ok_or(rc)?.map_err(|_| rc)?, rc.1).ok_or(rc)?;
// 下面5个是可选的
let rc = (7usize, 1);
let mut user_name = None;
let mut user_password = None;
let mut is_json = false;
let mut is_transfer = false;
let mut keep_alive = None;
if let Some(Ok(line)) = records.next() {
user_name = csv_string(&line, rc.1);
if Some("".to_string()) == user_name {
user_name = None;
}
let rc = (8usize, 1);
if let Some(Ok(line)) = records.next() {
user_password = csv_string(&line, rc.1);
if Some("".to_string()) == user_password {
user_password = None;
}
let record = records.next();
if let Some(Ok(tmp)) = record {
if let Some(v) = csv_string(&tmp, 1) {
if v.to_uppercase() == "TRUE" {
is_json = true;
}
}
}
let record = records.next();
if let Some(Ok(tmp)) = record {
if let Some(v) = csv_string(&tmp, 1) {
if v.to_uppercase() == "TRUE" {
is_transfer = true;
}
}
}
let record = records.next();
if let Some(Ok(tmp)) = record {
if let Some(v) = csv_u16(&tmp, 1) {
keep_alive = Some(v);
}
}
}
}
// 开启读取测点信息
let mut point_ids = Vec::with_capacity(point_num);
// 从新加载
let mut rdr = csv::ReaderBuilder::new()
.has_headers(true)
.from_reader(content);
let mut records = rdr.records();
let mut tmp = HashSet::with_capacity(point_num);
let mut json_filters = Vec::new();
let mut json_tags: HashMap<String, HashMap<String, usize>> = HashMap::new();
let mut write_templates: HashMap<u64, String> = HashMap::new();
let mut write_tags: HashMap<u64, String> = HashMap::new();
let mut is_custom = false;
for i in 0..point_num {
let rc = (i + 1, 3);
let record = records.next().ok_or(rc)?.map_err(|_| rc)?;
if i == 0 {
is_custom = is_json && record.get(6).is_some() && !record.get(6).as_ref().unwrap().is_empty();
}
let id = csv_u64(&record, rc.1).ok_or(rc)?;
// 测点不能重复
if tmp.contains(&id) {
return Err(rc);
}
let rc = (i + 1, 4);
let s = csv_str(&record, rc.1).ok_or(rc)?.to_uppercase();
let is_writable = match s.as_str() {
"FALSE" => false,
"TRUE" => true,
_ => false,
};
point_ids.push((id, is_writable));
tmp.insert(id);
if !is_custom {
continue;
}
// 下面这列是可选的
let rc = (i + 1, 5);
if let Some(s) = csv_str(&record, rc.1) {
let filter = if s.is_empty() {
Value::Object(Map::with_capacity(0))
} else {
serde_json::from_str::<Value>(s).map_err(|_| rc)?
};
let map = split_json(&filter);
let mut k = Vec::with_capacity(map.len() + 1);
let mut keys = Vec::with_capacity(map.len());
for (key, v) in map {
k.push(v);
keys.push(key);
}
let mut is_exist = false;
for i in 0..json_filters.len() {
if json_filters[i] == keys {
is_exist = true;
k.push(Value::Number(Number::from(i)));
break;
}
}
if !is_exist {
k.push(Value::Number(Number::from(json_filters.len())));
json_filters.push(keys);
}
let rc = (i + 1, 6);
let tag = csv_string(&record, rc.1).ok_or(rc)?;
let k_json = Value::Array(k).to_string();
if let Some(m) = json_tags.get_mut(&k_json) {
if m.contains_key(&tag) {
return Err(rc);
}
m.insert(tag, i);
} else {
let mut m = HashMap::new();
m.insert(tag, i);
json_tags.insert(k_json, m);
}
let rc = (i + 1, 7);
if let Some(template) = csv_string(&record, rc.1) {
write_templates.insert(id, template);
let rc = (i + 1, 8);
let tag = csv_string(&record, rc.1).ok_or(rc)?;
write_tags.insert(id, tag);
}
}
}
let json_filters = if !json_filters.is_empty() {
Some(json_filters)
} else {
None
};
let json_tags = if !json_tags.is_empty() {
Some(json_tags)
} else {
None
};
let json_write_template = if !write_templates.is_empty() {
Some(write_templates)
} else {
None
};
let json_write_tag = if !write_tags.is_empty() {
Some(write_tags)
} else {
None
};
Ok(MqttTransport {
id: 0,
name,
mqtt_broker,
point_id,
point_ids,
read_topic,
write_topic,
is_json,
is_transfer,
user_name,
user_password,
keep_alive,
json_filters,
json_tags,
json_write_template,
json_write_tag,
})
}
pub fn export_csv(&self, text_map: &HashMap<String, String>) -> String {
let title = vec![
text_map.get("broker_ip").unwrap_or(&"Broker Ip".to_string()).clone(),
text_map.get("broker_port").unwrap_or(&"Broker Port".to_string()).clone(),
text_map.get("point_number").unwrap_or(&"Point Count".to_string()).clone(),
text_map.get("status_point").unwrap_or(&"Status Point".to_string()).clone(),
text_map.get("mqtt_r_topic").unwrap_or(&"Read Topic".to_string()).clone(),
text_map.get("mqtt_w_topic").unwrap_or(&"Write Topic".to_string()).clone(),
text_map.get("user_name").unwrap_or(&"User Name".to_string()).clone(),
text_map.get("user_password").unwrap_or(&"User Password".to_string()).clone(),
text_map.get("json_format").unwrap_or(&"JSON Format".to_string()).clone(),
text_map.get("transfer").unwrap_or(&"Transfer".to_string()).clone(),
];
let mut content = vec![
format!("{}", self.mqtt_broker.0),
format!("{}", self.mqtt_broker.1),
format!("{}", self.point_ids.len()),
format!("{}", self.point_id),
format!("{}", self.read_topic),
format!("{}", self.write_topic),
];
if let Some(user_name) = &self.user_name {
content.push(get_csv_str(user_name.as_str()));
} else {
content.push("".to_string());
};
if let Some(user_password) = &self.user_password {
content.push(get_csv_str(user_password.as_str()));
} else {
content.push("".to_string());
};
content.push(self.is_json.to_string().to_uppercase());
content.push(self.is_transfer.to_string().to_uppercase());
let mut result = format!(
"{},{},{},{},{}",
text_map.get("tp_name").unwrap_or(&"Transport Name".to_string()),
get_csv_str(&self.name),
text_map.get("index").unwrap_or(&"Index".to_string()),
text_map.get("status_point").unwrap_or(&"Status Point".to_string()).clone(),
text_map.get("is_writable").unwrap_or(&"Is Writable".to_string()).clone(),
).to_string();
if self.json_filters.is_some() {
let s = format!(
",{},{}",
text_map.get("json_read_filter").unwrap_or(&"Json Filter".to_string()).clone(),
text_map.get("json_read_tag").unwrap_or(&"Json Tag".to_string()).clone(),
);
result += &s;
if self.json_write_template.is_some() {
let s = format!(
",{},{}\n",
text_map.get("json_wirte_template").unwrap_or(&"Json Write Template".to_string()).clone(),
text_map.get("json_write_tag").unwrap_or(&"Json Write Tag".to_string()).clone(),
);
result += &s;
} else {
result.push('\n');
}
} else {
result.push('\n');
}
let mut pos_to_str = HashMap::new();
if let Some(map) = &self.json_tags {
for (k, v) in map {
let mut values = vec![];
if let Ok(k_json) = serde_json::from_str::<Value>(k.as_str()) {
if let Value::Array(obj) = k_json {
values = obj;
}
}
if values.is_empty() {
continue;
}
let index: usize;
if let Value::Number(n) = &values[values.len() - 1] {
if let Some(i) = n.as_u64() {
index = i as usize;
} else {
continue;
}
} else {
continue;
}
if let Some(filters) = &self.json_filters {
let value = merge_json(&filters[index], &values[0..values.len() - 1]);
for (tag, i) in v {
let json_str = value.to_string();
pos_to_str.insert(*i, format!("{},{}", get_csv_str(&json_str), tag));
}
}
}
}
let p = self.point_ids.clone();
for i in 0_usize..10_usize {
if p.len() > i {
let w_status = if p[i].1 { "TRUE" } else { "FALSE" };
result += &format!(
"{},{},{},{},{}",
title[i],
content[i],
i + 1,
p[i].0,
w_status
);
if let Some(s) = pos_to_str.get(&i) {
result.push(',');
result += &s.as_str();
} else if self.json_filters.is_some() {
result += ",,";
if let Some(templates) = &self.json_write_template {
result += ",";
if let Some(s) = templates.get(&p[i].0) {
result += &get_csv_str(s.as_str());
} else {
result += ",";
}
if let Some(write_tags) = &self.json_write_tag {
if let Some(s) = write_tags.get(&p[i].0) {
result += &get_csv_str(s.as_str());
} else {
result += ",";
}
} else {
result += ",";
}
}
}
result.push('\n');
} else {
result += &format!("{},{},,,", title[i], content[i]);
if self.json_filters.is_some() {
result += ",,";
if self.json_write_template.is_some() {
result += ",,";
}
}
result.push('\n');
}
}
if p.len() > 10 {
let mut index = 10_usize;
while index < p.len() {
let w_status = if p[index].1 { "TRUE" } else { "FALSE" };
result += &format!(" ,,{},{},{}", index + 1, p[index].0, w_status);
if let Some(s) = pos_to_str.get(&index) {
result.push(',');
result += &s.as_str();
} else if self.json_filters.is_some() {
result += ",,";
if let Some(templates) = &self.json_write_template {
result += ",";
if let Some(s) = templates.get(&p[index].0) {
result += &get_csv_str(s.as_str());
} else {
result += ",";
}
if let Some(write_tags) = &self.json_write_tag {
if let Some(s) = write_tags.get(&p[index].0) {
result += &get_csv_str(s.as_str());
} else {
result += ",";
}
} else {
result += ",";
}
}
}
result.push('\n');
index += 1;
}
}
result
}
pub fn get_point_ids(&self) -> Vec<u64> {
let mut result = Vec::with_capacity(self.point_ids.len());
for (id, _) in &self.point_ids {
result.push(*id);
}
result
}
}
pub fn split_json(value: &Value) -> BTreeMap<String, Value> {
let mut result = BTreeMap::new();
let mut stack = Vec::with_capacity(16);
stack.push((value, String::new()));
while let Some((v, s)) = stack.pop() {
match v {
Value::Object(obj) => {
for (k, new_v) in obj {
let new_k = if !s.is_empty() {
format!("{}/{}", s, k)
} else {
k.clone()
};
stack.push((new_v, new_k));
}
}
_ => {
result.insert(s, v.clone());
}
}
}
result
}
pub fn merge_json(tags: &[String], values: &[Value]) -> Value {
let mut result = Value::Object(Map::new());
for i in 0..tags.len() {
let mut stack = vec![&mut result];
let names: Vec<&str> = tags[i].split('/').collect();
for j in 0..names.len() {
if j == names.len() - 1 {
let obj = stack.pop().unwrap();
obj[&names[j]] = values[i].clone();
} else {
let father = stack.pop().unwrap();
if father[&names[j]] != Value::Null {
stack.push(father.get_mut(names[j]).unwrap());
} else {
let value = Value::Object(Map::new());
father[&names[j]] = value;
stack.push(father.get_mut(names[j]).unwrap());
}
}
}
}
result
}
\ No newline at end of file
......@@ -2,7 +2,11 @@ use std::{fmt, str::FromStr};
use std::fmt::{Display, Formatter};
use serde::{Deserialize, Serialize};
use crate::DataUnitError;
#[derive(Debug, Clone, PartialEq)]
pub enum DataUnitError {
UnknownDataUnit(String),
}
/**
* @api {枚举_采集数据类型} /DataType DataType
......@@ -133,6 +137,8 @@ impl FromStr for DataType {
"EightByteIntSigned" => DataType::EightByteIntSigned,
"EightByteIntUnsignedSwapped" => DataType::EightByteIntUnsignedSwapped,
"EightByteIntSignedSwapped" => DataType::EightByteIntSignedSwapped,
"EightByteIntUnsignedSwappedSwapped" => DataType::EightByteIntUnsignedSwappedSwapped,
"EightByteIntSignedSwappedSwapped" => DataType::EightByteIntSignedSwappedSwapped,
"EightByteFloat" => DataType::EightByteFloat,
"EightByteFloatSwapped" => DataType::EightByteFloatSwapped,
"EightByteMod10kSwapped" => DataType::EightByteMod10kSwapped,
......@@ -692,7 +698,7 @@ impl Display for DataUnit {
#[cfg(test)]
mod tests {
use crate::DataUnit;
use crate::prop::DataUnit;
#[test]
fn test() {
......
File mode changed from 100755 to 100644
static EIG_IN: &str = "GwIn";
static EIG_OUT: &str = "GwOut";
// --------------------- from users to gateway -------------------
/// 控制指令下发,内容是 PbSetPoints
pub fn set_points(bee_id: &str) -> String {
format!("{EIG_IN}/C/{bee_id}")
}
/// aoe调度指令,内容是 PbAoeOperation
pub fn aoe_control(bee_id: &str) -> String {
format!("{EIG_IN}/Aoe/{bee_id}")
}
/// 重置,无内容
pub fn reset(bee_id: &str) -> String {
format!("{EIG_IN}/Reset/{bee_id}")
}
/// recover,无内容
pub fn recover(bee_id: &str) -> String {
format!("{EIG_IN}/Recover/{bee_id}")
}
/// 重置AOE文件,内容是 PbFile
pub fn reload_aoe_file(bee_id: &str) -> String {
format!("{EIG_IN}/AoeFile/{bee_id}")
}
/// 重置通道文件, 内容是 PbFile
pub fn reload_tp_file(bee_id: &str) -> String {
format!("{EIG_IN}/TpFile/{bee_id}")
}
/// 重置测点文件,内容是 PbFile
pub fn reload_point_file(bee_id: &str) -> String {
format!("{EIG_IN}/PtFile/{bee_id}")
}
/// 重置配置文件,内容是 PbFile
pub fn reload_config_file(bee_id: &str) -> String {
format!("{EIG_IN}/conf/{bee_id}")
}
/// 重置svg文件,内容是 PbFile
pub fn reload_svg_file(bee_id: &str) -> String {
format!("{EIG_IN}/SvgFile/{bee_id}")
}
/// 查询当前所有数据,内容为空
pub fn call_all(bee_id: &str) -> String {
format!("{EIG_IN}/AM/{bee_id}")
}
/// 查询网关Ping消息,内容为空
pub fn gw_ping_req() -> String {
format!("{EIG_IN}/PING/REQ")
}
// --------------------- from gateway to users ------------------------
/// 测量值变化数据上传,内容是 PbPointValues
pub fn measure_changed(bee_id: &str) -> String {
format!("{EIG_OUT}/SM_/{bee_id}")
}
/// 所有当前所有量测值的命令,内容是 PbPointValues
pub fn call_alled(bee_id: &str) -> String {
format!("{EIG_OUT}/AM_/{bee_id}")
}
/// 网关通道、测点、svg三类文件的概况信息,内容是 PbEigProfile
pub fn gw_peeked(bee_id: &str) -> String {
format!("{EIG_OUT}/GP_/{bee_id}")
}
/// 网关的概况,内容是 pbEigPing
pub fn gw_ping_res() -> String {
format!("{EIG_OUT}/PING/RES")
}
/// 网关里的文件,内容是 PbFile
pub fn gw_file_res(file_url: &str) -> String {
format!("{EIG_OUT}/FR_/{file_url}")
}
/// 内容是 PbEigAlarms
pub fn gw_alarmed(bee_id: &str) -> String {
format!("{EIG_OUT}/ALARM_/{bee_id}")
}
pub fn gw_loged(file_url: &str) -> String {
format!("{EIG_OUT}/LOG_/{file_url}")
}
/// 设点结果,内容是 PbSetPointResults
pub fn set_points_result(bee_id: &str) -> String {
format!("{EIG_OUT}/C_/{bee_id}")
}
/// aoe运行结果,内容是 PbAoeResult
pub fn aoe_executed(bee_id: &str) -> String {
format!("{EIG_OUT}/AH_/{bee_id}")
}
pub fn standby_topic(bee_id: &str) -> String {
format!("standby/{}", bee_id)
}
\ No newline at end of file
use std::collections::HashMap;
use serde::{Serialize, Deserialize};
use crate::PbEigPingRes;
use crate::proto::eig::PbEigPingRes;
/**
* @api {Eig配置对象} /EigConfig EigConfig
......
......@@ -3,7 +3,7 @@ use std::collections::HashMap;
use ndarray::{Array, array, Array2, Ix2};
use num_complex::{Complex64, ComplexFloat};
use eig_domain::DataUnit;
use eig_domain::prop::DataUnit;
use mems::model::dev::MeasPhase;
pub fn get_pf_nlp_constraints(
......
......@@ -3,7 +3,7 @@ use std::collections::HashMap;
use csv::StringRecordsIter;
use ndarray::Array2;
use eig_domain::DataUnit;
use eig_domain::prop::DataUnit;
const MAT_SIZE: usize = 18;
......
use std::collections::HashMap;
use csv::StringRecordsIter;
use eig_domain::DataUnit;
use eig_domain::prop::DataUnit;
use mems::model::dev::MeasPhase;
pub fn read_shunt_measures(records: &mut StringRecordsIter<&[u8]>)
......
......@@ -6,7 +6,7 @@ use csv::StringRecordsIter;
use log::{info, warn};
use ndarray::{Array2, ArrayBase, Ix2, OwnedRepr};
use eig_domain::PropValue;
use eig_domain::prop::PropValue;
use mems::model::{get_csv_str, get_df_from_in_plugin, get_island_from_plugin_input, PluginInput, PluginOutput};
use mems::model::dev::PsRsrType;
......
......@@ -5,7 +5,7 @@ use bytes::{Buf, BufMut, BytesMut};
use ds_common::{DEV_TOPO_DF_NAME, DYN_TOPO_DF_NAME, POINT_DF_NAME, STATIC_TOPO_DF_NAME, TERMINAL_DF_NAME};
use ds_common::static_topo::{read_point_terminal, read_static_topo, read_terminal_cn_dev};
use eig_domain::DataUnit;
use eig_domain::prop::DataUnit;
use mems::model::{get_df_from_in_plugin, get_meas_from_plugin_input, ModelType, PluginInput, PluginOutput};
static mut OUTPUT: Vec<u8> = vec![];
......
......@@ -10,7 +10,7 @@ use ds_common::{DEV_TOPO_DF_NAME, POINT_DF_NAME, SHUNT_MEAS_DF_NAME, TERMINAL_DF
use ds_common::dyn_topo::read_dev_topo;
use ds_common::static_topo::{read_point_terminal, read_terminal_cn_dev};
use ds_common::tn_input::read_shunt_measures;
use eig_domain::{DataUnit, MeasureValue};
use eig_domain::{prop::DataUnit, MeasureValue};
use mems::model::{get_df_from_in_plugin, get_meas_from_plugin_input, PluginInput, PluginOutput};
use mems::model::dev::{MeasPhase, PsRsrType};
......
use std::cmp::PartialOrd;
use std::collections::HashMap;
use std::fmt;
use std::fs::File;
use std::hash::Hash;
use std::io::Write;
use std::str::FromStr;
use serde::{Deserialize, Serialize};
use eig_domain::{csv_f64, csv_str, csv_string, csv_u64, csv_u8, DataUnit, Measurement, MeasureValue, prop::*};
use eig_domain::excel::transfer_to_utf8;
use eig_domain::prop::*;
/**
* @api {枚举_电力设备类型} /PsRsrType PsRsrType
......@@ -488,505 +485,6 @@ pub struct Island {
pub cns: Vec<CN>,
}
pub fn write_to_file(path: &str, content: &[u8]) {
let mut file = File::create(path).unwrap();
file.write_all(content).unwrap();
file.flush().unwrap();
}
pub fn prop_def_from_csv(path: &str) -> Result<Vec<PropDefine>, (usize, usize)> {
let content = std::fs::read(path).map_err(|_| (0, 0))?;
// if env::IS_ENCRYPT {
// let plain_t = decrypt_vec(content.as_slice());
// prop_def_from_csv_bytes(plain_t.as_slice(), true)
// } else {
// prop_def_from_csv_bytes(content.as_slice(), true)
// }
prop_def_from_csv_bytes(content.as_slice(), true)
}
pub fn prop_def_from_csv_bytes(
content: &[u8],
has_headers: bool,
) -> Result<Vec<PropDefine>, (usize, usize)> {
let content_new = transfer_to_utf8(content.to_vec()).map_err(|_| (0, 0))?;
let content = content_new.as_slice();
let mut rdr = csv::ReaderBuilder::new()
.has_headers(has_headers)
.from_reader(content);
let start_row = if has_headers { 1 } else { 0 };
let mut records = rdr.records();
let offset: usize = 1;
let mut row: usize = start_row;
let mut props = Vec::new();
while let Some(Ok(record)) = records.next() {
let rc = (row, offset);
let id = csv_u64(&record, rc.1).ok_or(rc)?;
let rc = (row, offset + 1);
let name = csv_string(&record, rc.1).ok_or(rc)?;
let rc = (row, offset + 2);
let desc = csv_string(&record, rc.1).ok_or(rc)?;
let rc = (row, offset + 3);
let data_type_str = csv_str(&record, rc.1).ok_or(rc)?;
let data_type = PropType::from_str(data_type_str).unwrap_or_default();
let rc = (row, offset + 4);
let data_unit_str = csv_str(&record, rc.1).ok_or(rc)?;
let data_unit = DataUnit::from_str(data_unit_str).map_err(|_| rc)?;
props.push(PropDefine { id, name, desc, data_type, data_unit });
row += 1;
}
props.shrink_to_fit();
Ok(props)
}
pub fn dev_def_from_csv(path: &str) -> Result<Vec<RsrDefine>, (usize, usize)> {
let content = std::fs::read(path).map_err(|_| (0, 0))?;
// if env::IS_ENCRYPT {
// let plain_t = decrypt_vec(content.as_slice());
// dev_def_from_csv_bytes(plain_t.as_slice(), true)
// } else {
// dev_def_from_csv_bytes(content.as_slice(), true)
// }
dev_def_from_csv_bytes(content.as_slice(), true)
}
pub fn dev_def_from_csv_bytes(
content: &[u8],
has_headers: bool,
) -> Result<Vec<RsrDefine>, (usize, usize)> {
let content_new = transfer_to_utf8(content.to_vec()).map_err(|_| (0, 0))?;
let content = content_new.as_slice();
let mut rdr = csv::ReaderBuilder::new()
.has_headers(has_headers)
.from_reader(content);
let start_row = if has_headers { 1 } else { 0 };
let mut records = rdr.records();
let offset: usize = 1;
let mut row: usize = start_row;
let mut defines = vec![];
while let Some(Ok(record)) = records.next() {
let rc = (row, offset);
let id = csv_u64(&record, rc.1).ok_or(rc)?;
let rc = (row, offset + 1);
let dev_type_str = csv_string(&record, rc.1).ok_or(rc)?;
let rsr_type = PsRsrType::from(dev_type_str);
let rc = (row, offset + 2);
let name = csv_string(&record, rc.1).ok_or(rc)?;
let rc = (row, offset + 3);
let desc = csv_string(&record, rc.1).ok_or(rc)?;
let rc = (row, offset + 4);
let terminal_num = csv_u8(&record, rc.1).ok_or(rc)?;
let mut col = 5;
let mut prop_groups = Vec::new();
loop {
// 如果当前列已经超出记录的总长度了,则退出循环
if record.len() <= offset + col {
break;
}
let rc = (row, offset + col);
let name = csv_string(&record, rc.1).ok_or(rc)?;
if name.is_empty() {
break;
}
let rc = (row, offset + col + 1);
let desc = csv_string(&record, rc.1).ok_or(rc)?;
let rc = (row, offset + col + 2);
let prop_def_ids = csv_str(&record, rc.1).ok_or(rc)?;
let r: Vec<Result<u64, (usize, usize)>> = if !prop_def_ids.is_empty() {
prop_def_ids.split(';')
.map(|s| s.parse::<u64>().map_err(|_| rc))
.collect()
} else {
vec![]
};
let mut prop_defines = Vec::with_capacity(r.len());
for id in r {
prop_defines.push(id?);
}
prop_groups.push(PropGroupDefine { name, desc, prop_defines });
col += 3;
}
prop_groups.shrink_to_fit();
defines.push(RsrDefine { id, rsr_type, name, desc, terminal_num, prop_groups });
row += 1;
}
defines.shrink_to_fit();
Ok(defines)
}
pub fn dev_from_csv(
path: &str,
defines: &HashMap<u64, RsrDefine>,
prop_defines: &HashMap<u64, PropDefine>,
) -> Result<(HashMap<u64, NetworkRsr>, HashMap<u64, RsrPropGroup>), (usize, usize)> {
let content = std::fs::read(path).map_err(|_| (0, 0))?;
// if env::IS_ENCRYPT {
// let plain_t = decrypt_vec(content.as_slice());
// dev_from_csv_bytes(plain_t.as_slice(), true, defines, prop_defines)
// } else {
// dev_from_csv_bytes(content.as_slice(), true, defines, prop_defines)
// }
dev_from_csv_bytes(content.as_slice(), true, defines, prop_defines)
}
pub fn dev_from_csv_bytes(
content: &[u8],
has_headers: bool,
defines: &HashMap<u64, RsrDefine>,
prop_defines: &HashMap<u64, PropDefine>,
) -> Result<(HashMap<u64, NetworkRsr>, HashMap<u64, RsrPropGroup>), (usize, usize)> {
let content_new = transfer_to_utf8(content.to_vec()).map_err(|_| (0, 0))?;
let content = content_new.as_slice();
let mut rdr = csv::ReaderBuilder::new()
.has_headers(has_headers)
.from_reader(content);
let start_row = if has_headers { 1 } else { 0 };
let mut records = rdr.records();
let offset: usize = 1;
let mut row: usize = start_row;
let mut devs: HashMap<u64, NetworkRsr> = HashMap::new();
let mut prop_groups: HashMap<u64, RsrPropGroup> = HashMap::new();
while let Some(Ok(record)) = records.next() {
let rc = (row, offset);
let rsr_id = csv_u64(&record, rc.1).ok_or(rc)?;
let rc = (row, offset + 1);
let dev_define_id = csv_u64(&record, rc.1).ok_or(rc)?;
let define = defines.get(&dev_define_id).ok_or(rc)?;
let rc = (row, offset + 6);
let prop_group_name = csv_str(&record, rc.1).ok_or(rc)?;
let mut prop_group_id = None;
if !prop_group_name.is_empty() {
for prop_def in define.prop_groups.iter() {
if prop_group_name == prop_def.name {
let rc = (row, offset + 7);
let group_id = csv_u64(&record, rc.1).ok_or(rc)?;
let mut col = offset + 8;
let mut props = Vec::with_capacity(prop_def.prop_defines.len());
let mut defines = Vec::with_capacity(prop_def.prop_defines.len());
for _ in 0..prop_def.prop_defines.len() {
let rc = (row, col);
if let Some(prop_def_id) = csv_u64(&record, rc.1) {
let rc = (row, col + 1);
if let Some(prop_def) = prop_defines.get(&prop_def_id) {
if let Some(s) = csv_str(&record, rc.1) {
let t = prop_def.data_type;
let prop = PropValue::from_str(t, s).ok_or(rc)?;
props.push(prop);
defines.push(prop_def_id);
}
}
}
col += 2;
}
let group = RsrPropGroup {
id: group_id,
name: prop_group_name.to_string(),
rsr_id,
defines,
props,
};
prop_groups.insert(group_id, group);
prop_group_id = Some(group_id);
break;
}
}
}
if let Some(dev) = devs.get_mut(&rsr_id) {
if let Some(group_id) = &prop_group_id {
dev.prop_group_ids.push(*group_id);
}
} else {
let rc = (row, offset + 2);
let name = csv_string(&record, rc.1).ok_or(rc)?;
let rc = (row, offset + 3);
let desc = csv_string(&record, rc.1).ok_or(rc)?;
let rc = (row, offset + 4);
let terminal_id_str = csv_str(&record, rc.1).ok_or(rc)?;
let r: Vec<Result<u64, (usize, usize)>> = if terminal_id_str.is_empty() {
vec![]
} else {
terminal_id_str.split(';')
.map(|s| s.parse::<u64>().map_err(|_| rc))
.collect()
};
let mut dev_terminals = Vec::with_capacity(r.len());
for tid in r {
let terminal_id = tid?;
dev_terminals.push(Terminal {
device: rsr_id,
id: terminal_id,
});
}
let rc = (row, offset + 5);
let container_id = csv_string(&record, rc.1).ok_or(rc)?;
let container_id = if container_id.is_empty() {
None
} else {
Some(container_id.parse::<u64>().map_err(|_| rc)?)
};
let prop_groups = if let Some(group_id) = &prop_group_id {
vec![*group_id]
} else {
vec![]
};
let rsr = NetworkRsr {
id: rsr_id,
define_id: dev_define_id,
name,
desc,
container_id,
terminals: dev_terminals,
prop_group_ids: prop_groups,
};
devs.insert(rsr_id, rsr);
}
row += 1;
}
Ok((devs, prop_groups))
}
pub fn meas_def_from_csv(
path: &str,
) -> Result<Vec<MeasureDef>, (usize, usize)> {
let content = std::fs::read(path).map_err(|_| (0, 0))?;
// if env::IS_ENCRYPT {
// let plain_t = decrypt_vec(content.as_slice());
// meas_def_from_csv_bytes(plain_t.as_slice(), true)
// } else {
// meas_def_from_csv_bytes(content.as_slice(), true)
// }
meas_def_from_csv_bytes(content.as_slice(), true)
}
pub fn meas_def_from_csv_bytes(
content: &[u8],
has_headers: bool,
) -> Result<Vec<MeasureDef>, (usize, usize)> {
let content_new = transfer_to_utf8(content.to_vec()).map_err(|_| (0, 0))?;
let content = content_new.as_slice();
let mut rdr = csv::ReaderBuilder::new()
.has_headers(has_headers)
.from_reader(content);
let start_row = if has_headers { 1 } else { 0 };
let mut records = rdr.records();
let offset: usize = 1;
let mut row: usize = start_row;
let mut defs: Vec<MeasureDef> = Vec::new();
while let Some(Ok(record)) = records.next() {
let rc = (row, offset);
let id = csv_u64(&record, rc.1).ok_or(rc)?;
let rc = (row, offset + 1);
let point_id = csv_u64(&record, rc.1).ok_or(rc)?;
let rc = (row, offset + 2);
let dev_id = csv_u64(&record, rc.1).ok_or(rc)?;
let rc = (row, offset + 3);
let terminal_id = csv_u64(&record, rc.1).ok_or(rc)?;
let rc = (row, offset + 4);
let phase = if let Some(s) = csv_str(&record, rc.1) {
MeasPhase::from(s)
} else {
MeasPhase::Unknown
};
defs.push(MeasureDef { id, point_id, terminal_id, dev_id, phase });
row += 1;
}
Ok(defs)
}
pub fn measures_from_csv(
path: &str,
) -> Result<Vec<MeasureValue>, (usize, usize)> {
let content = std::fs::read(path).map_err(|_| (0, 0))?;
// if env::IS_ENCRYPT {
// let plain_t = decrypt_vec(content.as_slice());
// measures_from_csv_bytes(plain_t.as_slice(), true)
// } else {
// measures_from_csv_bytes(content.as_slice(), true)
// }
measures_from_csv_bytes(content.as_slice(), true)
}
pub fn measures_from_csv_bytes(
content: &[u8],
has_headers: bool,
) -> Result<Vec<MeasureValue>, (usize, usize)> {
let content_new = transfer_to_utf8(content.to_vec()).map_err(|_| (0, 0))?;
let content = content_new.as_slice();
let mut rdr = csv::ReaderBuilder::new()
.has_headers(has_headers)
.from_reader(content);
let start_row = if has_headers { 1 } else { 0 };
let mut records = rdr.records();
let offset: usize = 1;
let mut row: usize = start_row;
let mut defs: Vec<MeasureValue> = Vec::new();
while let Some(Ok(record)) = records.next() {
let rc = (row, offset);
let point_id = csv_u64(&record, rc.1).ok_or(rc)?;
let rc = (row, offset + 1);
let value = csv_f64(&record, rc.1).ok_or(rc)?;
defs.push(MeasureValue {
point_id,
is_discrete: false,
timestamp: 0,
analog_value: value,
discrete_value: value as i64,
is_transformed: false,
transformed_analog: 0.0,
transformed_discrete: 0,
});
row += 1;
}
Ok(defs)
}
pub fn cns_from_csv(
path: &str,
) -> Result<Vec<CN>, (usize, usize)> {
let content = std::fs::read(path).map_err(|_| (0, 0))?;
// if env::IS_ENCRYPT {
// let plain_t = decrypt_vec(content.as_slice());
// cns_from_csv_bytes(plain_t.as_slice(), true)
// } else {
// cns_from_csv_bytes(content.as_slice(), true)
// }
cns_from_csv_bytes(content.as_slice(), true)
}
pub fn cns_from_csv_bytes(
content: &[u8],
has_headers: bool,
) -> Result<Vec<CN>, (usize, usize)> {
let content_new = transfer_to_utf8(content.to_vec()).map_err(|_| (0, 0))?;
let content = content_new.as_slice();
let mut rdr = csv::ReaderBuilder::new()
.has_headers(has_headers)
.from_reader(content);
let start_row = if has_headers { 1 } else { 0 };
let mut records = rdr.records();
let offset: usize = 0;
let mut row: usize = start_row;
let mut cns: HashMap<u64, CN> = HashMap::new();
while let Some(Ok(record)) = records.next() {
let rc = (row, offset);
let index = csv_u64(&record, rc.1).ok_or(rc)?;
let rc = (row, offset + 1);
let terminal = csv_u64(&record, rc.1).ok_or(rc)?;
cns.entry(index).or_insert(CN { id: index, terminals: vec![] }).terminals.push(terminal);
row += 1;
}
Ok(cns.into_values().collect())
}
impl CN {
pub fn get_csv_header(text_map: &HashMap<String, String>) -> String {
format!(
"{},{}",
text_map.get("CN").unwrap_or(&"CN".to_string()),
text_map.get("terminal").unwrap_or(&"Terminal".to_string()),
)
}
pub fn to_csv_str(&self) -> String {
let mut result = String::new();
for t in &self.terminals {
result.push_str(&format!("\n{},{}", self.id, t));
}
result
}
}
impl RsrDefine {
pub fn create_rsr(&self) -> NetworkRsr {
NetworkRsr {
id: 0,
define_id: self.id,
name: "".to_string(),
desc: "".to_string(),
container_id: None,
terminals: Vec::with_capacity(self.terminal_num as usize),
prop_group_ids: Vec::with_capacity(self.prop_groups.len()),
}
}
pub fn to_csv_str(&self, max_prop_group_len: usize) -> String {
let mut prop_group_result = "".to_string();
for group_index in 0..max_prop_group_len {
if self.prop_groups.len() > group_index {
let prop_group = &self.prop_groups[group_index];
let prop_defines_result = &prop_group.prop_defines
.iter()
.map(|c| c.to_string())
.collect::<Vec<_>>()
.join(";");
prop_group_result += &format!(",{},{},{}", prop_group.name.clone(), prop_group.desc.clone(), prop_defines_result);
} else {
prop_group_result += ",,,";
}
}
format!("{},{},{},{},{}{}", self.id, self.rsr_type, self.name,
self.desc, self.terminal_num, prop_group_result)
}
pub fn get_csv_header(text_map: &HashMap<String, String>, max_prop_group_len: usize) -> String {
// 组建表头
let mut group_header = "".to_string();
for i in 0..max_prop_group_len {
let group_header_name = &format!("{}{}_{}",
text_map.get("my_devpropgroupdefine")
.unwrap_or(&"Property Group".to_string()),
i + 1,
text_map.get("name").unwrap_or(&"Name".to_string()),
);
let group_header_desc = &format!("{}{}_{}",
text_map.get("my_devpropgroupdefine")
.unwrap_or(&"Property Group".to_string()),
i + 1,
text_map.get("desc").unwrap_or(&"Description".to_string()),
);
let group_header_prop = &format!("{}{}_{}",
text_map.get("my_devpropgroupdefine")
.unwrap_or(&"Property Group".to_string()),
i + 1,
text_map.get("dev_property")
.unwrap_or(&"Property".to_string()),
);
group_header += &format!(",{},{},{}",
group_header_name,
group_header_desc,
group_header_prop,
);
}
format!(
"{},{},{},{},{},{}{}",
text_map.get("index").unwrap_or(&"Index".to_string()),
text_map.get("devdefine_id").unwrap_or(&"ID".to_string()),
text_map.get("rsr_type").unwrap_or(&"Resource Type".to_string()),
text_map.get("devdefine_name").unwrap_or(&"Name".to_string()),
text_map.get("devdefine_desc").unwrap_or(&"Description".to_string()),
text_map.get("devdefine_terminal_num").unwrap_or(&"Terminal Number".to_string()),
group_header
)
}
}
impl PropDefine {
pub fn get_csv_header(text_map: &HashMap<String, String>) -> String {
format!(
"{},{},{},{},{}, {}",
text_map.get("index").unwrap_or(&"Index".to_string()),
text_map.get("devpropdefine_id").unwrap_or(&"ID".to_string()),
text_map.get("devpropdefine_name").unwrap_or(&"Name".to_string()),
text_map.get("devpropdefine_desc").unwrap_or(&"Description".to_string()),
text_map.get("devpropdefine_datatype").unwrap_or(&"Data Type".to_string()),
text_map.get("devpropdefine_dataunit").unwrap_or(&"Data Unit".to_string()),
)
}
pub fn to_csv_str(&self) -> String {
format!("{},{},{},{},{}", self.id, self.name, self.desc, self.data_type, self.data_unit)
}
}
impl NetworkRsr {
// 获取设备类型
......@@ -1016,7 +514,7 @@ impl NetworkRsr {
}
pub fn get_prop_value2(&self, prop_name: &str, prop_groups: &HashMap<u64, RsrPropGroup>,
prop_defs: &HashMap<u64, &PropDefine>) -> PropValue {
prop_defs: &HashMap<u64, &PropDefine>) -> PropValue {
for prop_group_id in &self.prop_group_ids {
if let Some(rpg) = prop_groups.get(prop_group_id) {
for i in 0..rpg.defines.len() {
......@@ -1030,286 +528,4 @@ impl NetworkRsr {
}
PropValue::Unknown
}
pub fn get_prop_value_by_desc(&self, prop_desc: &str, prop_groups: &HashMap<u64, RsrPropGroup>,
prop_defs: &HashMap<u64, PropDefine>) -> PropValue {
for prop_group_id in &self.prop_group_ids {
if let Some(rpg) = prop_groups.get(prop_group_id) {
for i in 0..rpg.defines.len() {
if let Some(prop_def) = prop_defs.get(&rpg.defines[i]) {
if prop_def.desc == prop_desc {
return rpg.props[i].clone();
}
}
}
}
}
PropValue::Unknown
}
pub fn get_csv_header(text_map: &HashMap<String, String>, max_prop_len: usize) -> String {
let mut group_header = format!(",{},{}",
text_map.get("devpropgroupdefine_name").unwrap_or(&"Name".to_string()),
text_map.get("devpropgroupdefine_id").unwrap_or(&"ID".to_string()),
);
for _ in 0..max_prop_len {
group_header += &format!(",{},{}",
text_map.get("dev_property_id").unwrap_or(&"Property ID".to_string()),
text_map.get("dev_property").unwrap_or(&"Property Value".to_string())
);
}
format!(
"{},{},{},{},{},{},{}{}",
text_map.get("index").unwrap_or(&"Index".to_string()),
text_map.get("dev_id").unwrap_or(&"ID".to_string()),
text_map.get("dev_define_id").unwrap_or(&"Device Define Id".to_string()),
text_map.get("dev_name").unwrap_or(&"Name".to_string()),
text_map.get("dev_desc").unwrap_or(&"Description".to_string()),
text_map.get("dev_terminal").unwrap_or(&"Terminal".to_string()),
text_map.get("dev_container").unwrap_or(&"Container".to_string()),
group_header
)
}
pub fn to_csv_str(&self, index: usize, max_prop_len: usize,
prop_group_map: &HashMap<u64, RsrPropGroup>) -> String {
let dev_terminals = &self.terminals
.iter()
.map(|c| c.id.to_string())
.collect::<Vec<_>>()
.join(";");
let container_id = if let Some(container_id) = &self.container_id {
container_id.to_string()
} else {
"".to_string()
};
if !self.prop_group_ids.is_empty() {
let mut result = "".to_string();
// 如果有属性分组,则每个属性分组一行
for (group_index, prop_group_id) in self.prop_group_ids.iter().enumerate() {
if let Some(prop_group) = prop_group_map.get(prop_group_id) {
let mut prop_group_result = format!(",{},{}", prop_group.name, prop_group.id);
for prop_index in 0..max_prop_len {
if prop_group.props.len() > prop_index {
let prop_id = &prop_group.defines[prop_index];
let prop = &prop_group.props[prop_index];
prop_group_result += &format!(",{}", prop_id);
prop_group_result += &format!(",{}", prop);
} else {
prop_group_result += ",,";
}
}
let sn = if group_index == 0 { index.to_string() } else { "".to_string() };
result += &format!("{},{},{},{},{},{},{}{}", sn, self.id, self.define_id, self.name,
self.desc, dev_terminals, container_id, prop_group_result);
if group_index != self.prop_group_ids.len() - 1 {
result += "\n";
}
}
}
result
} else {
// 如果没有属性分组,则需要补充空列
let mut prop_group_result = ",,".to_string();
for _ in 0..max_prop_len {
prop_group_result += ",,";
}
format!("{},{},{},{},{},{},{}{}", index, self.id, self.define_id, self.name,
self.desc, dev_terminals, container_id, prop_group_result)
}
}
}
impl Island {
pub fn create_dev_tree(&self) -> HashMap<u64, String> {
let mut result = HashMap::new();
let ids: Vec<u64> = self.resources.keys().copied().collect();
let mut finded = vec![false; ids.len()];
loop {
(0..finded.len()).for_each(|i| {
if finded[i] {
return;
}
let id = ids[i];
let rsr = self.resources.get(&id).unwrap();
if let Some(parent_id) = &rsr.container_id {
if let Some(path) = result.get(parent_id) {
let path = format!("{}/{}({})", path, rsr.name, id);
result.insert(id, path);
finded[i] = true;
} else {
// parent is not ready
}
} else {
let path = format!("/{}({})", rsr.name, id);
result.insert(id, path);
finded[i] = true;
}
});
if result.len() == self.resources.len() {
break;
}
}
result
}
pub fn create_dev_tree_with_measure(&self) -> HashMap<u64, String> {
let mut result = self.create_dev_tree();
for (rsr_id, meas_defs) in self.measures.iter() {
for meas_def in meas_defs {
if let Some(dev_path) = result.get(rsr_id) {
let path = format!("{}/{}", dev_path, meas_def.point_id);
result.insert(meas_def.point_id, path);
}
}
}
result
}
pub fn create_measure_tree(&self, point_names: &HashMap<u64, String>) -> HashMap<u64, String> {
let mut result = self.create_dev_tree();
for (rsr_id, meas_defs) in self.measures.iter() {
for meas_def in meas_defs {
let point_id = meas_def.point_id;
let point_name = if let Some(name) = point_names.get(&point_id) {
format!("{}({})", name, point_id)
} else {
format!("Not found({})", point_id)
};
let path = format!("{}/{}", result.get(rsr_id).unwrap(), point_name);
result.insert(point_id, path);
}
}
result
}
pub fn create_measure_tree2(&self, points: &HashMap<u64, Measurement>) -> HashMap<u64, String> {
let mut result = self.create_dev_tree();
for (rsr_id, meas_defs) in self.measures.iter() {
for meas_def in meas_defs {
let point_id = meas_def.point_id;
let point_name = if let Some(p) = points.get(&point_id) {
format!("{}({})", p.point_name, point_id)
} else {
format!("Not found({})", point_id)
};
let path = format!("{}/{}", result.get(rsr_id).unwrap(), point_name);
result.insert(point_id, path);
}
}
result
}
pub fn to_dev_csv_str(&self, text_map: &HashMap<String, String>) -> String {
// 先循环一遍,记录最大的属性数量
let mut max_prop_len = 0;
for dev in self.resources.values() {
for prop_group_id in &dev.prop_group_ids {
if let Some(prop_group) = self.prop_groups.get(prop_group_id) {
if max_prop_len < prop_group.props.len() {
max_prop_len = prop_group.props.len();
}
}
}
}
// 组建表头
let mut result = NetworkRsr::get_csv_header(text_map, max_prop_len);
if !self.resources.is_empty() {
result.push('\n');
}
// 组建表体
let mut index = 0;
for dev in self.resources.values() {
index += 1;
result += &dev.to_csv_str(index, max_prop_len, &self.prop_groups);
if index < self.resources.len() {
result.push('\n');
}
}
result
}
pub fn to_meas_csv_str(&self, text_map: &HashMap<String, String>) -> String {
// 组建表头
let mut result = MeasureDef::get_csv_header(text_map);
if !self.measures.is_empty() {
result.push('\n');
}
// 组建表体
let mut count = 0;
for defs in self.measures.values() {
count += defs.len();
}
let mut index = 0;
for defs in self.measures.values() {
for def in defs {
index += 1;
result.push_str(&index.to_string());
result.push(',');
result += &def.to_csv_str();
if index < count {
result.push('\n');
}
}
}
result
}
pub fn to_cns_csv_str(&self, text_map: &HashMap<String, String>) -> String {
// 组建表头
let mut result = CN::get_csv_header(text_map);
// 组建表体
for cn in &self.cns {
if !cn.terminals.is_empty() {
result += &cn.to_csv_str();
}
}
result
}
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use crate::model::dev::{dev_def_from_csv, dev_from_csv, meas_def_from_csv, prop_def_from_csv, PsRsrType};
#[test]
fn test_type_to_u16() {
assert_eq!(1, PsRsrType::Switch as u16);
assert_eq!(16, PsRsrType::Feeder as u16);
assert_eq!(10000, PsRsrType::Company as u16);
assert_eq!(30001, PsRsrType::UserDefine1 as u16);
assert_eq!(65535, PsRsrType::Unknown as u16);
}
#[test]
fn test_ningbo_parse() {
let dev_defs = dev_def_from_csv("tests/ningbo/dev_def.csv");
assert!(dev_defs.is_ok());
let dev_defs = dev_defs.unwrap();
let mut defines = HashMap::with_capacity(dev_defs.len());
for def in &dev_defs {
defines.insert(def.id, def.clone());
}
let prop_defs = prop_def_from_csv("tests/ningbo/dev_prop_def.csv");
assert!(prop_defs.is_ok());
let prop_defs = prop_defs.unwrap();
let mut prop_defines = HashMap::with_capacity(prop_defs.len());
for def in &prop_defs {
prop_defines.insert(def.id, def.clone());
}
let r = dev_from_csv("tests/ningbo/devices.csv", &defines, &prop_defines);
if let Err((row, col)) = r {
println!("row={}, col={}", row, col);
}
assert!(r.is_ok());
let r = meas_def_from_csv("tests/ningbo/measures.csv");
assert!(r.is_ok());
}
#[test]
fn test_to_string() {
println!("{}", PsRsrType::ACline);
}
}
\ No newline at end of file
......@@ -3,7 +3,7 @@ use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use eig_aoe::aoe::AoeModel;
use eig_domain::{DataUnit, Measurement, MeasureValue};
use eig_domain::{prop::DataUnit, Measurement, MeasureValue};
use crate::model::dev::{Island, PropDefine, RsrDefine};
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论