Commit 8242e769 by dongshufeng

remove all rust codes

parent d2683ead
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "addr2line"
version = "0.24.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1"
dependencies = [
"gimli",
]
[[package]]
name = "adler2"
version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa"
[[package]]
name = "aho-corasick"
version = "1.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
dependencies = [
"memchr",
]
[[package]]
name = "allocator-api2"
version = "0.2.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923"
[[package]]
name = "android-tzdata"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0"
[[package]]
name = "android_system_properties"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311"
dependencies = [
"libc",
]
[[package]]
name = "anyhow"
version = "1.0.99"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0674a1ddeecb70197781e945de4b3b8ffb61fa939a5597bcf48503737663100"
[[package]]
name = "anymap2"
version = "0.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d301b3b94cb4b2f23d7917810addbbaff90738e0ca2be692bd027e70d7e0330c"
[[package]]
name = "approx"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cab112f0a86d568ea0e627cc1d6be74a1e9cd55214684db5561995f6dad897c6"
dependencies = [
"num-traits",
]
[[package]]
name = "arbitrary"
version = "1.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c3d036a3c4ab069c7b410a2ce876bd74808d2d0888a82667669f8e783a898bf1"
dependencies = [
"derive_arbitrary",
]
[[package]]
name = "arrow-schema"
version = "56.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dfa93af9ff2bb80de539e6eb2c1c8764abd0f4b73ffb0d7c82bf1f9868785e66"
dependencies = [
"serde",
]
[[package]]
name = "atoi_simd"
version = "0.16.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2a49e05797ca52e312a0c658938b7d00693ef037799ef7187678f212d7684cf"
dependencies = [
"debug_unsafe",
]
[[package]]
name = "autocfg"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8"
[[package]]
name = "backtrace"
version = "0.3.75"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002"
dependencies = [
"addr2line",
"cfg-if",
"libc",
"miniz_oxide",
"object",
"rustc-demangle",
"windows-targets 0.52.6",
]
[[package]]
name = "bincode"
version = "1.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad"
dependencies = [
"serde",
]
[[package]]
name = "bitflags"
version = "2.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2261d10cca569e4643e526d8dc2e62e433cc8aba21ab764233731f8d369bf394"
[[package]]
name = "boolinator"
version = "2.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cfa8873f51c92e232f9bac4065cddef41b714152812bfc5f7672ba16d6ef8cd9"
[[package]]
name = "bumpalo"
version = "3.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43"
[[package]]
name = "bytemuck"
version = "1.23.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3995eaeebcdf32f91f980d360f78732ddc061097ab4e39991ae7a6ace9194677"
[[package]]
name = "byteorder"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
[[package]]
name = "bytes"
version = "1.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a"
[[package]]
name = "calamine"
version = "0.30.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a9aeb09f84576a634da713630e11e431a744b91f1f8114c2ff0760189783a8a1"
dependencies = [
"atoi_simd",
"byteorder",
"codepage",
"encoding_rs",
"fast-float2",
"log",
"quick-xml",
"serde",
"zip",
]
[[package]]
name = "cc"
version = "1.2.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "590f9024a68a8c40351881787f1934dc11afd69090f5edb6831464694d836ea3"
dependencies = [
"find-msvc-tools",
"shlex",
]
[[package]]
name = "cfg-if"
version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9"
[[package]]
name = "chrono"
version = "0.4.41"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d"
dependencies = [
"android-tzdata",
"iana-time-zone",
"js-sys",
"num-traits",
"wasm-bindgen",
"windows-link",
]
[[package]]
name = "chrono-tz"
version = "0.10.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a6139a8597ed92cf816dfb33f5dd6cf0bb93a6adc938f11039f371bc5bcd26c3"
dependencies = [
"chrono",
"phf",
]
[[package]]
name = "ciborium"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e"
dependencies = [
"ciborium-io",
"ciborium-ll",
"serde",
]
[[package]]
name = "ciborium-io"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757"
[[package]]
name = "ciborium-ll"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9"
dependencies = [
"ciborium-io",
"half 2.6.0",
]
[[package]]
name = "codepage"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "48f68d061bc2828ae826206326e61251aca94c1e4a5305cf52d9138639c918b4"
dependencies = [
"encoding_rs",
]
[[package]]
name = "console_error_panic_hook"
version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a06aeb73f470f66dcdbf7223caeebb85984942f22f1adb2a088cf9668146bbbc"
dependencies = [
"cfg-if",
"wasm-bindgen",
]
[[package]]
name = "core-foundation-sys"
version = "0.8.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b"
[[package]]
name = "crc32fast"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511"
dependencies = [
"cfg-if",
]
[[package]]
name = "crunchy"
version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5"
[[package]]
name = "csv"
version = "1.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "acdc4883a9c96732e4733212c01447ebd805833b7275a73ca3ee080fd77afdaf"
dependencies = [
"csv-core",
"itoa",
"ryu",
"serde",
]
[[package]]
name = "csv-core"
version = "0.1.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7d02f3b0da4c6504f86e9cd789d8dbafab48c2321be74e9987593de5a894d93d"
dependencies = [
"memchr",
]
[[package]]
name = "debug_unsafe"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85d3cef41d236720ed453e102153a53e4cc3d2fde848c0078a50cf249e8e3e5b"
[[package]]
name = "derive_arbitrary"
version = "1.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e567bd82dcff979e4b03460c307b3cdc9e96fde3d73bed1496d2bc75d9dd62a"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.106",
]
[[package]]
name = "derive_more"
version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678"
dependencies = [
"derive_more-impl",
]
[[package]]
name = "derive_more-impl"
version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.106",
"unicode-xid",
]
[[package]]
name = "ds-3phase-pf"
version = "0.1.0"
dependencies = [
"arrow-schema",
"bytes",
"ciborium",
"csv",
"ds-common",
"eig-domain",
"mems",
"nalgebra",
"ndarray",
"num-complex",
"serde_json",
]
[[package]]
name = "ds-common"
version = "0.1.0"
dependencies = [
"csv",
"eig-domain",
"mems",
"serde_json",
]
[[package]]
name = "ds-dev-ohm-cal"
version = "0.1.0"
dependencies = [
"arrow-schema",
"bytes",
"ciborium",
"csv",
"eig-domain",
"log",
"mems",
"ndarray",
"serde_json",
]
[[package]]
name = "ds-dyn-topo"
version = "0.1.0"
dependencies = [
"arrow-schema",
"bytes",
"ciborium",
"csv",
"ds-common",
"eig-domain",
"log",
"mems",
]
[[package]]
name = "ds-guizhou"
version = "0.1.0"
dependencies = [
"arrow-schema",
"bytes",
"chrono",
"chrono-tz",
"ciborium",
"csv",
"log",
"mems",
]
[[package]]
name = "ds-static-topo"
version = "0.1.0"
dependencies = [
"arrow-schema",
"bytes",
"ciborium",
"log",
"mems",
"petgraph",
]
[[package]]
name = "ds-tn-input"
version = "0.1.0"
dependencies = [
"arrow-schema",
"bytes",
"ciborium",
"csv",
"ds-common",
"eig-domain",
"log",
"mems",
]
[[package]]
name = "eig-aoe"
version = "0.1.0"
dependencies = [
"eig-domain",
"eig-expr",
"log",
"petgraph",
"serde",
]
[[package]]
name = "eig-domain"
version = "0.1.0"
dependencies = [
"calamine",
"csv",
"eig-expr",
"encoding_rs",
"protobuf",
"protobuf-codegen",
"serde",
]
[[package]]
name = "eig-expr"
version = "0.1.0"
dependencies = [
"fnv",
"ndarray",
"nom",
"num-complex",
"num-traits",
"serde",
]
[[package]]
name = "either"
version = "1.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719"
[[package]]
name = "encoding_rs"
version = "0.8.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3"
dependencies = [
"cfg-if",
]
[[package]]
name = "equivalent"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f"
[[package]]
name = "errno"
version = "0.3.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad"
dependencies = [
"libc",
"windows-sys 0.60.2",
]
[[package]]
name = "fast-float2"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f8eb564c5c7423d25c886fb561d1e4ee69f72354d16918afa32c08811f6b6a55"
[[package]]
name = "fastrand"
version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
[[package]]
name = "find-msvc-tools"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e178e4fba8a2726903f6ba98a6d221e76f9c12c650d5dc0e6afdc50677b49650"
[[package]]
name = "fixedbitset"
version = "0.5.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99"
[[package]]
name = "flate2"
version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4a3d7db9596fecd151c5f638c0ee5d5bd487b6e0ea232e5dc96d5250f6f94b1d"
dependencies = [
"crc32fast",
"libz-rs-sys",
"miniz_oxide",
]
[[package]]
name = "fnv"
version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
[[package]]
name = "foldhash"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2"
[[package]]
name = "form_urlencoded"
version = "1.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf"
dependencies = [
"percent-encoding",
]
[[package]]
name = "futures"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876"
dependencies = [
"futures-channel",
"futures-core",
"futures-io",
"futures-sink",
"futures-task",
"futures-util",
]
[[package]]
name = "futures-channel"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10"
dependencies = [
"futures-core",
"futures-sink",
]
[[package]]
name = "futures-core"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e"
[[package]]
name = "futures-io"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6"
[[package]]
name = "futures-macro"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.106",
]
[[package]]
name = "futures-sink"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7"
[[package]]
name = "futures-task"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988"
[[package]]
name = "futures-util"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81"
dependencies = [
"futures-channel",
"futures-core",
"futures-io",
"futures-macro",
"futures-sink",
"futures-task",
"memchr",
"pin-project-lite",
"pin-utils",
"slab",
]
[[package]]
name = "getrandom"
version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592"
dependencies = [
"cfg-if",
"js-sys",
"libc",
"wasi 0.11.1+wasi-snapshot-preview1",
"wasm-bindgen",
]
[[package]]
name = "getrandom"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4"
dependencies = [
"cfg-if",
"libc",
"r-efi",
"wasi 0.14.3+wasi-0.2.4",
]
[[package]]
name = "gimli"
version = "0.31.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f"
[[package]]
name = "glam"
version = "0.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "333928d5eb103c5d4050533cec0384302db6be8ef7d3cebd30ec6a35350353da"
[[package]]
name = "glam"
version = "0.15.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3abb554f8ee44336b72d522e0a7fe86a29e09f839a36022fa869a7dfe941a54b"
[[package]]
name = "glam"
version = "0.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4126c0479ccf7e8664c36a2d719f5f2c140fbb4f9090008098d2c291fa5b3f16"
[[package]]
name = "glam"
version = "0.17.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e01732b97afd8508eee3333a541b9f7610f454bb818669e66e90f5f57c93a776"
[[package]]
name = "glam"
version = "0.18.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "525a3e490ba77b8e326fb67d4b44b4bd2f920f44d4cc73ccec50adc68e3bee34"
[[package]]
name = "glam"
version = "0.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b8509e6791516e81c1a630d0bd7fbac36d2fa8712a9da8662e716b52d5051ca"
[[package]]
name = "glam"
version = "0.20.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f43e957e744be03f5801a55472f593d43fabdebf25a4585db250f04d86b1675f"
[[package]]
name = "glam"
version = "0.21.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "518faa5064866338b013ff9b2350dc318e14cc4fcd6cb8206d7e7c9886c98815"
[[package]]
name = "glam"
version = "0.22.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "12f597d56c1bd55a811a1be189459e8fad2bbc272616375602443bdfb37fa774"
[[package]]
name = "glam"
version = "0.23.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e4afd9ad95555081e109fe1d21f2a30c691b5f0919c67dfa690a2e1eb6bd51c"
[[package]]
name = "glam"
version = "0.24.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b5418c17512bdf42730f9032c74e1ae39afc408745ebb2acf72fbc4691c17945"
[[package]]
name = "glam"
version = "0.25.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "151665d9be52f9bb40fc7966565d39666f2d1e69233571b71b87791c7e0528b3"
[[package]]
name = "glam"
version = "0.27.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9e05e7e6723e3455f4818c7b26e855439f7546cf617ef669d1adedb8669e5cb9"
[[package]]
name = "glam"
version = "0.28.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "779ae4bf7e8421cf91c0b3b64e7e8b40b862fba4d393f59150042de7c4965a94"
[[package]]
name = "glam"
version = "0.29.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8babf46d4c1c9d92deac9f7be466f76dfc4482b6452fc5024b5e8daf6ffeb3ee"
[[package]]
name = "glam"
version = "0.30.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f2d1aab06663bdce00d6ca5e5ed586ec8d18033a771906c993a1e3755b368d85"
[[package]]
name = "gloo"
version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "28999cda5ef6916ffd33fb4a7b87e1de633c47c0dc6d97905fee1cdaa142b94d"
dependencies = [
"gloo-console 0.2.3",
"gloo-dialogs 0.1.1",
"gloo-events 0.1.2",
"gloo-file 0.2.3",
"gloo-history 0.1.5",
"gloo-net 0.3.1",
"gloo-render 0.1.1",
"gloo-storage 0.2.2",
"gloo-timers 0.2.6",
"gloo-utils 0.1.7",
"gloo-worker 0.2.1",
]
[[package]]
name = "gloo"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cd35526c28cc55c1db77aed6296de58677dbab863b118483a27845631d870249"
dependencies = [
"gloo-console 0.3.0",
"gloo-dialogs 0.2.0",
"gloo-events 0.2.0",
"gloo-file 0.3.0",
"gloo-history 0.2.2",
"gloo-net 0.4.0",
"gloo-render 0.2.0",
"gloo-storage 0.3.0",
"gloo-timers 0.3.0",
"gloo-utils 0.2.0",
"gloo-worker 0.4.0",
]
[[package]]
name = "gloo-console"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "82b7ce3c05debe147233596904981848862b068862e9ec3e34be446077190d3f"
dependencies = [
"gloo-utils 0.1.7",
"js-sys",
"serde",
"wasm-bindgen",
"web-sys",
]
[[package]]
name = "gloo-console"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2a17868f56b4a24f677b17c8cb69958385102fa879418052d60b50bc1727e261"
dependencies = [
"gloo-utils 0.2.0",
"js-sys",
"serde",
"wasm-bindgen",
"web-sys",
]
[[package]]
name = "gloo-dialogs"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "67062364ac72d27f08445a46cab428188e2e224ec9e37efdba48ae8c289002e6"
dependencies = [
"wasm-bindgen",
"web-sys",
]
[[package]]
name = "gloo-dialogs"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bf4748e10122b01435750ff530095b1217cf6546173459448b83913ebe7815df"
dependencies = [
"wasm-bindgen",
"web-sys",
]
[[package]]
name = "gloo-events"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "68b107f8abed8105e4182de63845afcc7b69c098b7852a813ea7462a320992fc"
dependencies = [
"wasm-bindgen",
"web-sys",
]
[[package]]
name = "gloo-events"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "27c26fb45f7c385ba980f5fa87ac677e363949e065a083722697ef1b2cc91e41"
dependencies = [
"wasm-bindgen",
"web-sys",
]
[[package]]
name = "gloo-file"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a8d5564e570a38b43d78bdc063374a0c3098c4f0d64005b12f9bbe87e869b6d7"
dependencies = [
"gloo-events 0.1.2",
"js-sys",
"wasm-bindgen",
"web-sys",
]
[[package]]
name = "gloo-file"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "97563d71863fb2824b2e974e754a81d19c4a7ec47b09ced8a0e6656b6d54bd1f"
dependencies = [
"gloo-events 0.2.0",
"js-sys",
"wasm-bindgen",
"web-sys",
]
[[package]]
name = "gloo-history"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85725d90bf0ed47063b3930ef28e863658a7905989e9929a8708aab74a1d5e7f"
dependencies = [
"gloo-events 0.1.2",
"gloo-utils 0.1.7",
"serde",
"serde-wasm-bindgen 0.5.0",
"serde_urlencoded",
"thiserror",
"wasm-bindgen",
"web-sys",
]
[[package]]
name = "gloo-history"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "903f432be5ba34427eac5e16048ef65604a82061fe93789f2212afc73d8617d6"
dependencies = [
"getrandom 0.2.16",
"gloo-events 0.2.0",
"gloo-utils 0.2.0",
"serde",
"serde-wasm-bindgen 0.6.5",
"serde_urlencoded",
"thiserror",
"wasm-bindgen",
"web-sys",
]
[[package]]
name = "gloo-net"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a66b4e3c7d9ed8d315fd6b97c8b1f74a7c6ecbbc2320e65ae7ed38b7068cc620"
dependencies = [
"futures-channel",
"futures-core",
"futures-sink",
"gloo-utils 0.1.7",
"http",
"js-sys",
"pin-project",
"serde",
"serde_json",
"thiserror",
"wasm-bindgen",
"wasm-bindgen-futures",
"web-sys",
]
[[package]]
name = "gloo-net"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ac9e8288ae2c632fa9f8657ac70bfe38a1530f345282d7ba66a1f70b72b7dc4"
dependencies = [
"futures-channel",
"futures-core",
"futures-sink",
"gloo-utils 0.2.0",
"http",
"js-sys",
"pin-project",
"serde",
"serde_json",
"thiserror",
"wasm-bindgen",
"wasm-bindgen-futures",
"web-sys",
]
[[package]]
name = "gloo-render"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2fd9306aef67cfd4449823aadcd14e3958e0800aa2183955a309112a84ec7764"
dependencies = [
"wasm-bindgen",
"web-sys",
]
[[package]]
name = "gloo-render"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "56008b6744713a8e8d98ac3dcb7d06543d5662358c9c805b4ce2167ad4649833"
dependencies = [
"wasm-bindgen",
"web-sys",
]
[[package]]
name = "gloo-storage"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5d6ab60bf5dbfd6f0ed1f7843da31b41010515c745735c970e821945ca91e480"
dependencies = [
"gloo-utils 0.1.7",
"js-sys",
"serde",
"serde_json",
"thiserror",
"wasm-bindgen",
"web-sys",
]
[[package]]
name = "gloo-storage"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fbc8031e8c92758af912f9bc08fbbadd3c6f3cfcbf6b64cdf3d6a81f0139277a"
dependencies = [
"gloo-utils 0.2.0",
"js-sys",
"serde",
"serde_json",
"thiserror",
"wasm-bindgen",
"web-sys",
]
[[package]]
name = "gloo-timers"
version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c"
dependencies = [
"js-sys",
"wasm-bindgen",
]
[[package]]
name = "gloo-timers"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbb143cf96099802033e0d4f4963b19fd2e0b728bcf076cd9cf7f6634f092994"
dependencies = [
"js-sys",
"wasm-bindgen",
]
[[package]]
name = "gloo-utils"
version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "037fcb07216cb3a30f7292bd0176b050b7b9a052ba830ef7d5d65f6dc64ba58e"
dependencies = [
"js-sys",
"serde",
"serde_json",
"wasm-bindgen",
"web-sys",
]
[[package]]
name = "gloo-utils"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b5555354113b18c547c1d3a98fbf7fb32a9ff4f6fa112ce823a21641a0ba3aa"
dependencies = [
"js-sys",
"serde",
"serde_json",
"wasm-bindgen",
"web-sys",
]
[[package]]
name = "gloo-worker"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09110b5555bcafe508cee0fb94308af9aac7a85f980d3c88b270d117c6c6911d"
dependencies = [
"anymap2",
"bincode",
"gloo-console 0.2.3",
"gloo-utils 0.1.7",
"js-sys",
"serde",
"slab",
"wasm-bindgen",
"web-sys",
]
[[package]]
name = "gloo-worker"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "13471584da78061a28306d1359dd0178d8d6fc1c7c80e5e35d27260346e0516a"
dependencies = [
"anymap2",
"bincode",
"gloo-console 0.2.3",
"gloo-utils 0.1.7",
"js-sys",
"serde",
"wasm-bindgen",
"wasm-bindgen-futures",
"web-sys",
]
[[package]]
name = "gloo-worker"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "76495d3dd87de51da268fa3a593da118ab43eb7f8809e17eb38d3319b424e400"
dependencies = [
"bincode",
"futures",
"gloo-utils 0.2.0",
"gloo-worker-macros",
"js-sys",
"pinned",
"serde",
"thiserror",
"wasm-bindgen",
"wasm-bindgen-futures",
"web-sys",
]
[[package]]
name = "gloo-worker-macros"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "956caa58d4857bc9941749d55e4bd3000032d8212762586fa5705632967140e7"
dependencies = [
"proc-macro-crate",
"proc-macro2",
"quote",
"syn 2.0.106",
]
[[package]]
name = "half"
version = "1.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b43ede17f21864e81be2fa654110bf1e793774238d86ef8555c37e6519c0403"
[[package]]
name = "half"
version = "2.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "459196ed295495a68f7d7fe1d84f6c4b7ff0e21fe3017b2f283c6fac3ad803c9"
dependencies = [
"cfg-if",
"crunchy",
]
[[package]]
name = "hashbrown"
version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
[[package]]
name = "hashbrown"
version = "0.15.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1"
dependencies = [
"allocator-api2",
"equivalent",
"foldhash",
]
[[package]]
name = "hermit-abi"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c"
[[package]]
name = "home"
version = "0.5.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "589533453244b0995c858700322199b2becb13b627df2851f64a2775d024abcf"
dependencies = [
"windows-sys 0.59.0",
]
[[package]]
name = "http"
version = "0.2.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1"
dependencies = [
"bytes",
"fnv",
"itoa",
]
[[package]]
name = "iana-time-zone"
version = "0.1.63"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8"
dependencies = [
"android_system_properties",
"core-foundation-sys",
"iana-time-zone-haiku",
"js-sys",
"log",
"wasm-bindgen",
"windows-core",
]
[[package]]
name = "iana-time-zone-haiku"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f"
dependencies = [
"cc",
]
[[package]]
name = "iesplan"
version = "0.1.0"
dependencies = [
"csv",
"eig-domain",
"gloo-timers 0.3.0",
"js-sys",
"serde",
"serde_cbor",
"wasm-bindgen",
"web-sys",
"yew 0.21.0",
"yew-bulma",
]
[[package]]
name = "implicit-clone"
version = "0.3.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "92d6dc8dce3dddd0bc92f5741980eb67c04cb6967d90cb94482741b254ad7555"
dependencies = [
"indexmap 1.9.3",
]
[[package]]
name = "implicit-clone"
version = "0.4.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f8a9aa791c7b5a71b636b7a68207fdebf171ddfc593d9c8506ec4cbc527b6a84"
dependencies = [
"implicit-clone-derive",
"indexmap 2.11.0",
]
[[package]]
name = "implicit-clone-derive"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "699c1b6d335e63d0ba5c1e1c7f647371ce989c3bcbe1f7ed2b85fa56e3bd1a21"
dependencies = [
"quote",
"syn 2.0.106",
]
[[package]]
name = "indexmap"
version = "1.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99"
dependencies = [
"autocfg",
"hashbrown 0.12.3",
]
[[package]]
name = "indexmap"
version = "2.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f2481980430f9f78649238835720ddccc57e52df14ffce1c6f37391d61b563e9"
dependencies = [
"equivalent",
"hashbrown 0.15.5",
]
[[package]]
name = "io-uring"
version = "0.7.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "046fa2d4d00aea763528b4950358d0ead425372445dc8ff86312b3c69ff7727b"
dependencies = [
"bitflags",
"cfg-if",
"libc",
]
[[package]]
name = "itoa"
version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c"
[[package]]
name = "js-sys"
version = "0.3.77"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f"
dependencies = [
"once_cell",
"wasm-bindgen",
]
[[package]]
name = "libc"
version = "0.2.175"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543"
[[package]]
name = "libz-rs-sys"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "840db8cf39d9ec4dd794376f38acc40d0fc65eec2a8f484f7fd375b84602becd"
dependencies = [
"zlib-rs",
]
[[package]]
name = "linux-raw-sys"
version = "0.4.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab"
[[package]]
name = "linux-raw-sys"
version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12"
[[package]]
name = "log"
version = "0.4.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
[[package]]
name = "matrixmultiply"
version = "0.3.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a06de3016e9fae57a36fd14dba131fccf49f74b40b7fbdb472f96e361ec71a08"
dependencies = [
"autocfg",
"rawpointer",
]
[[package]]
name = "memchr"
version = "2.7.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0"
[[package]]
name = "mems"
version = "0.1.0"
dependencies = [
"arrow-schema",
"ciborium",
"eig-domain",
"serde",
]
[[package]]
name = "miniz_oxide"
version = "0.8.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316"
dependencies = [
"adler2",
]
[[package]]
name = "mio"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c"
dependencies = [
"libc",
"wasi 0.11.1+wasi-snapshot-preview1",
"windows-sys 0.59.0",
]
[[package]]
name = "nalgebra"
version = "0.34.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9cd59afb6639828b33677758314a4a1a745c15c02bc597095b851c8fd915cf49"
dependencies = [
"approx",
"glam 0.14.0",
"glam 0.15.2",
"glam 0.16.0",
"glam 0.17.3",
"glam 0.18.0",
"glam 0.19.0",
"glam 0.20.5",
"glam 0.21.3",
"glam 0.22.0",
"glam 0.23.0",
"glam 0.24.2",
"glam 0.25.0",
"glam 0.27.0",
"glam 0.28.0",
"glam 0.29.3",
"glam 0.30.5",
"matrixmultiply",
"nalgebra-macros",
"num-complex",
"num-rational",
"num-traits",
"simba",
"typenum",
]
[[package]]
name = "nalgebra-macros"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "973e7178a678cfd059ccec50887658d482ce16b0aa9da3888ddeab5cd5eb4889"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.106",
]
[[package]]
name = "ndarray"
version = "0.16.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "882ed72dce9365842bf196bdeedf5055305f11fc8c03dee7bb0194a6cad34841"
dependencies = [
"matrixmultiply",
"num-complex",
"num-integer",
"num-traits",
"portable-atomic",
"portable-atomic-util",
"rawpointer",
]
[[package]]
name = "nom"
version = "8.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df9761775871bdef83bee530e60050f7e54b1105350d6884eb0fb4f46c2f9405"
dependencies = [
"memchr",
]
[[package]]
name = "num-bigint"
version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9"
dependencies = [
"num-integer",
"num-traits",
]
[[package]]
name = "num-complex"
version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495"
dependencies = [
"num-traits",
]
[[package]]
name = "num-integer"
version = "0.1.46"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f"
dependencies = [
"num-traits",
]
[[package]]
name = "num-rational"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824"
dependencies = [
"num-bigint",
"num-integer",
"num-traits",
]
[[package]]
name = "num-traits"
version = "0.2.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841"
dependencies = [
"autocfg",
]
[[package]]
name = "num_cpus"
version = "1.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b"
dependencies = [
"hermit-abi",
"libc",
]
[[package]]
name = "object"
version = "0.36.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87"
dependencies = [
"memchr",
]
[[package]]
name = "once_cell"
version = "1.21.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d"
[[package]]
name = "paste"
version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a"
[[package]]
name = "percent-encoding"
version = "2.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220"
[[package]]
name = "petgraph"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "54acf3a685220b533e437e264e4d932cfbdc4cc7ec0cd232ed73c08d03b8a7ca"
dependencies = [
"fixedbitset",
"hashbrown 0.15.5",
"indexmap 2.11.0",
"serde",
"serde_derive",
]
[[package]]
name = "phf"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "913273894cec178f401a31ec4b656318d95473527be05c0752cc41cdc32be8b7"
dependencies = [
"phf_shared",
]
[[package]]
name = "phf_shared"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "06005508882fb681fd97892ecff4b7fd0fee13ef1aa569f8695dae7ab9099981"
dependencies = [
"siphasher",
]
[[package]]
name = "pin-project"
version = "1.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a"
dependencies = [
"pin-project-internal",
]
[[package]]
name = "pin-project-internal"
version = "1.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.106",
]
[[package]]
name = "pin-project-lite"
version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b"
[[package]]
name = "pin-utils"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
[[package]]
name = "pinned"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a829027bd95e54cfe13e3e258a1ae7b645960553fb82b75ff852c29688ee595b"
dependencies = [
"futures",
"rustversion",
"thiserror",
]
[[package]]
name = "portable-atomic"
version = "1.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483"
[[package]]
name = "portable-atomic-util"
version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507"
dependencies = [
"portable-atomic",
]
[[package]]
name = "prettyplease"
version = "0.1.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86"
dependencies = [
"proc-macro2",
"syn 1.0.109",
]
[[package]]
name = "prettyplease"
version = "0.2.37"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b"
dependencies = [
"proc-macro2",
"syn 2.0.106",
]
[[package]]
name = "proc-macro-crate"
version = "1.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919"
dependencies = [
"once_cell",
"toml_edit",
]
[[package]]
name = "proc-macro-error"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c"
dependencies = [
"proc-macro-error-attr",
"proc-macro2",
"quote",
"syn 1.0.109",
"version_check",
]
[[package]]
name = "proc-macro-error-attr"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869"
dependencies = [
"proc-macro2",
"quote",
"version_check",
]
[[package]]
name = "proc-macro2"
version = "1.0.101"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de"
dependencies = [
"unicode-ident",
]
[[package]]
name = "prokio"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "03b55e106e5791fa5a13abd13c85d6127312e8e09098059ca2bc9b03ca4cf488"
dependencies = [
"futures",
"gloo 0.8.1",
"num_cpus",
"once_cell",
"pin-project",
"pinned",
"tokio",
"tokio-stream",
"wasm-bindgen-futures",
]
[[package]]
name = "protobuf"
version = "3.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d65a1d4ddae7d8b5de68153b48f6aa3bba8cb002b243dbdbc55a5afbc98f99f4"
dependencies = [
"bytes",
"once_cell",
"protobuf-support",
"thiserror",
]
[[package]]
name = "protobuf-codegen"
version = "3.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5d3976825c0014bbd2f3b34f0001876604fe87e0c86cd8fa54251530f1544ace"
dependencies = [
"anyhow",
"once_cell",
"protobuf",
"protobuf-parse",
"regex",
"tempfile",
"thiserror",
]
[[package]]
name = "protobuf-parse"
version = "3.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4aeaa1f2460f1d348eeaeed86aea999ce98c1bded6f089ff8514c9d9dbdc973"
dependencies = [
"anyhow",
"indexmap 2.11.0",
"log",
"protobuf",
"protobuf-support",
"tempfile",
"thiserror",
"which",
]
[[package]]
name = "protobuf-support"
version = "3.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3e36c2f31e0a47f9280fb347ef5e461ffcd2c52dd520d8e216b52f93b0b0d7d6"
dependencies = [
"thiserror",
]
[[package]]
name = "quick-xml"
version = "0.37.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "331e97a1af0bf59823e6eadffe373d7b27f485be8748f71471c662c1f269b7fb"
dependencies = [
"encoding_rs",
"memchr",
]
[[package]]
name = "quote"
version = "1.0.40"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d"
dependencies = [
"proc-macro2",
]
[[package]]
name = "r-efi"
version = "5.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f"
[[package]]
name = "rawpointer"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3"
[[package]]
name = "regex"
version = "1.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "23d7fd106d8c02486a8d64e778353d1cffe08ce79ac2e82f540c86d0facf6912"
dependencies = [
"aho-corasick",
"memchr",
"regex-automata",
"regex-syntax",
]
[[package]]
name = "regex-automata"
version = "0.4.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6b9458fa0bfeeac22b5ca447c63aaf45f28439a709ccd244698632f9aa6394d6"
dependencies = [
"aho-corasick",
"memchr",
"regex-syntax",
]
[[package]]
name = "regex-syntax"
version = "0.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "caf4aa5b0f434c91fe5c7f1ecb6a5ece2130b02ad2a590589dda5146df959001"
[[package]]
name = "rustc-demangle"
version = "0.1.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace"
[[package]]
name = "rustix"
version = "0.38.44"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154"
dependencies = [
"bitflags",
"errno",
"libc",
"linux-raw-sys 0.4.15",
"windows-sys 0.59.0",
]
[[package]]
name = "rustix"
version = "1.0.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8"
dependencies = [
"bitflags",
"errno",
"libc",
"linux-raw-sys 0.9.4",
"windows-sys 0.60.2",
]
[[package]]
name = "rustversion"
version = "1.0.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d"
[[package]]
name = "ryu"
version = "1.0.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f"
[[package]]
name = "safe_arch"
version = "0.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "96b02de82ddbe1b636e6170c21be622223aea188ef2e139be0a5b219ec215323"
dependencies = [
"bytemuck",
]
[[package]]
name = "serde"
version = "1.0.219"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde-wasm-bindgen"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f3b143e2833c57ab9ad3ea280d21fd34e285a42837aeb0ee301f4f41890fa00e"
dependencies = [
"js-sys",
"serde",
"wasm-bindgen",
]
[[package]]
name = "serde-wasm-bindgen"
version = "0.6.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8302e169f0eddcc139c70f139d19d6467353af16f9fce27e8c30158036a1e16b"
dependencies = [
"js-sys",
"serde",
"wasm-bindgen",
]
[[package]]
name = "serde_cbor"
version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5"
dependencies = [
"half 1.8.3",
"serde",
]
[[package]]
name = "serde_derive"
version = "1.0.219"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.106",
]
[[package]]
name = "serde_json"
version = "1.0.143"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d401abef1d108fbd9cbaebc3e46611f4b1021f714a0597a71f41ee463f5f4a5a"
dependencies = [
"itoa",
"memchr",
"ryu",
"serde",
]
[[package]]
name = "serde_urlencoded"
version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd"
dependencies = [
"form_urlencoded",
"itoa",
"ryu",
"serde",
]
[[package]]
name = "shlex"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
[[package]]
name = "simba"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b3a386a501cd104797982c15ae17aafe8b9261315b5d07e3ec803f2ea26be0fa"
dependencies = [
"approx",
"num-complex",
"num-traits",
"paste",
"wide",
]
[[package]]
name = "simd-adler32"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe"
[[package]]
name = "siphasher"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d"
[[package]]
name = "slab"
version = "0.4.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589"
[[package]]
name = "syn"
version = "1.0.109"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "syn"
version = "2.0.106"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "tempfile"
version = "3.21.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "15b61f8f20e3a6f7e0649d825294eaf317edce30f82cf6026e7e4cb9222a7d1e"
dependencies = [
"fastrand",
"getrandom 0.3.3",
"once_cell",
"rustix 1.0.8",
"windows-sys 0.60.2",
]
[[package]]
name = "thiserror"
version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.106",
]
[[package]]
name = "tokio"
version = "1.47.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "89e49afdadebb872d3145a5638b59eb0691ea23e46ca484037cfab3b76b95038"
dependencies = [
"backtrace",
"io-uring",
"libc",
"mio",
"pin-project-lite",
"slab",
]
[[package]]
name = "tokio-stream"
version = "0.1.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047"
dependencies = [
"futures-core",
"pin-project-lite",
"tokio",
]
[[package]]
name = "toml_datetime"
version = "0.6.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c"
[[package]]
name = "toml_edit"
version = "0.19.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421"
dependencies = [
"indexmap 2.11.0",
"toml_datetime",
"winnow",
]
[[package]]
name = "tracing"
version = "0.1.41"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0"
dependencies = [
"pin-project-lite",
"tracing-attributes",
"tracing-core",
]
[[package]]
name = "tracing-attributes"
version = "0.1.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.106",
]
[[package]]
name = "tracing-core"
version = "0.1.34"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678"
dependencies = [
"once_cell",
]
[[package]]
name = "typenum"
version = "1.18.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f"
[[package]]
name = "unicode-ident"
version = "1.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512"
[[package]]
name = "unicode-xid"
version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853"
[[package]]
name = "version_check"
version = "0.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a"
[[package]]
name = "wasi"
version = "0.11.1+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b"
[[package]]
name = "wasi"
version = "0.14.3+wasi-0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a51ae83037bdd272a9e28ce236db8c07016dd0d50c27038b3f407533c030c95"
dependencies = [
"wit-bindgen",
]
[[package]]
name = "wasm-bindgen"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5"
dependencies = [
"cfg-if",
"once_cell",
"rustversion",
"wasm-bindgen-macro",
]
[[package]]
name = "wasm-bindgen-backend"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6"
dependencies = [
"bumpalo",
"log",
"proc-macro2",
"quote",
"syn 2.0.106",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-futures"
version = "0.4.50"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61"
dependencies = [
"cfg-if",
"js-sys",
"once_cell",
"wasm-bindgen",
"web-sys",
]
[[package]]
name = "wasm-bindgen-macro"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407"
dependencies = [
"quote",
"wasm-bindgen-macro-support",
]
[[package]]
name = "wasm-bindgen-macro-support"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.106",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-shared"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d"
dependencies = [
"unicode-ident",
]
[[package]]
name = "web-sys"
version = "0.3.77"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2"
dependencies = [
"js-sys",
"wasm-bindgen",
]
[[package]]
name = "which"
version = "4.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7"
dependencies = [
"either",
"home",
"once_cell",
"rustix 0.38.44",
]
[[package]]
name = "wide"
version = "0.7.33"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ce5da8ecb62bcd8ec8b7ea19f69a51275e91299be594ea5cc6ef7819e16cd03"
dependencies = [
"bytemuck",
"safe_arch",
]
[[package]]
name = "windows-core"
version = "0.61.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3"
dependencies = [
"windows-implement",
"windows-interface",
"windows-link",
"windows-result",
"windows-strings",
]
[[package]]
name = "windows-implement"
version = "0.60.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.106",
]
[[package]]
name = "windows-interface"
version = "0.59.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.106",
]
[[package]]
name = "windows-link"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a"
[[package]]
name = "windows-result"
version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6"
dependencies = [
"windows-link",
]
[[package]]
name = "windows-strings"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57"
dependencies = [
"windows-link",
]
[[package]]
name = "windows-sys"
version = "0.59.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
dependencies = [
"windows-targets 0.52.6",
]
[[package]]
name = "windows-sys"
version = "0.60.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb"
dependencies = [
"windows-targets 0.53.3",
]
[[package]]
name = "windows-targets"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
dependencies = [
"windows_aarch64_gnullvm 0.52.6",
"windows_aarch64_msvc 0.52.6",
"windows_i686_gnu 0.52.6",
"windows_i686_gnullvm 0.52.6",
"windows_i686_msvc 0.52.6",
"windows_x86_64_gnu 0.52.6",
"windows_x86_64_gnullvm 0.52.6",
"windows_x86_64_msvc 0.52.6",
]
[[package]]
name = "windows-targets"
version = "0.53.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91"
dependencies = [
"windows-link",
"windows_aarch64_gnullvm 0.53.0",
"windows_aarch64_msvc 0.53.0",
"windows_i686_gnu 0.53.0",
"windows_i686_gnullvm 0.53.0",
"windows_i686_msvc 0.53.0",
"windows_x86_64_gnu 0.53.0",
"windows_x86_64_gnullvm 0.53.0",
"windows_x86_64_msvc 0.53.0",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764"
[[package]]
name = "windows_aarch64_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
[[package]]
name = "windows_aarch64_msvc"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c"
[[package]]
name = "windows_i686_gnu"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
[[package]]
name = "windows_i686_gnu"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3"
[[package]]
name = "windows_i686_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
[[package]]
name = "windows_i686_gnullvm"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11"
[[package]]
name = "windows_i686_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
[[package]]
name = "windows_i686_msvc"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d"
[[package]]
name = "windows_x86_64_gnu"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
[[package]]
name = "windows_x86_64_gnu"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57"
[[package]]
name = "windows_x86_64_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
[[package]]
name = "windows_x86_64_msvc"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486"
[[package]]
name = "winnow"
version = "0.5.40"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876"
dependencies = [
"memchr",
]
[[package]]
name = "wit-bindgen"
version = "0.45.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "052283831dbae3d879dc7f51f3d92703a316ca49f91540417d38591826127814"
[[package]]
name = "yew"
version = "0.20.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5dbecfe44343b70cc2932c3eb445425969ae21754a8ab3a0966981c1cf7af1cc"
dependencies = [
"console_error_panic_hook",
"futures",
"gloo 0.8.1",
"implicit-clone 0.3.10",
"indexmap 1.9.3",
"js-sys",
"prokio",
"rustversion",
"serde",
"slab",
"thiserror",
"tokio",
"tracing",
"wasm-bindgen",
"wasm-bindgen-futures",
"web-sys",
"yew-macro 0.20.0",
]
[[package]]
name = "yew"
version = "0.21.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f1a03f255c70c7aa3e9c62e15292f142ede0564123543c1cc0c7a4f31660cac"
dependencies = [
"console_error_panic_hook",
"futures",
"gloo 0.10.0",
"implicit-clone 0.4.9",
"indexmap 2.11.0",
"js-sys",
"prokio",
"rustversion",
"serde",
"slab",
"thiserror",
"tokio",
"tracing",
"wasm-bindgen",
"wasm-bindgen-futures",
"web-sys",
"yew-macro 0.21.0",
]
[[package]]
name = "yew-agent"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b06f7c5ed97fff22816bb00d3d82ebc0fc1119d7bbb9e07e62c0d2853f51920a"
dependencies = [
"gloo-worker 0.1.2",
"yew 0.20.0",
]
[[package]]
name = "yew-bulma"
version = "0.1.0"
source = "git+https://github.com/shufengdong/yew-bulma.git#439ef35ca2f8229b01a72dff93c276a3339bfccc"
dependencies = [
"bytes",
"chrono",
"derive_more",
"gloo-utils 0.2.0",
"js-sys",
"log",
"petgraph",
"serde",
"serde_cbor",
"serde_json",
"wasm-bindgen",
"wasm-bindgen-futures",
"web-sys",
"yew 0.21.0",
"yew-agent",
]
[[package]]
name = "yew-macro"
version = "0.20.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b64c253c1d401f1ea868ca9988db63958cfa15a69f739101f338d6f05eea8301"
dependencies = [
"boolinator",
"once_cell",
"prettyplease 0.1.25",
"proc-macro-error",
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]]
name = "yew-macro"
version = "0.21.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "02fd8ca5166d69e59f796500a2ce432ff751edecbbb308ca59fd3fe4d0343de2"
dependencies = [
"boolinator",
"once_cell",
"prettyplease 0.2.37",
"proc-macro-error",
"proc-macro2",
"quote",
"syn 2.0.106",
]
[[package]]
name = "zip"
version = "4.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c034aa6c54f654df20e7dc3713bc51705c12f280748fb6d7f40f87c696623e34"
dependencies = [
"arbitrary",
"crc32fast",
"flate2",
"indexmap 2.11.0",
"memchr",
"zopfli",
]
[[package]]
name = "zlib-rs"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2f06ae92f42f5e5c42443fd094f245eb656abf56dd7cce9b8b263236565e00f2"
[[package]]
name = "zopfli"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "edfc5ee405f504cd4984ecc6f14d02d55cfda60fa4b689434ef4102aae150cd7"
dependencies = [
"bumpalo",
"crc32fast",
"log",
"simd-adler32",
]
[workspace]
resolver = "2"
members = [
"eig-expr",
"eig-domain",
"eig-aoe",
"mems",
"mems/examples/ds-powerflow/ds-common",
"mems/examples/ds-powerflow/ds-static-topo",
"mems/examples/ds-powerflow/ds-dev-ohm-cal",
"mems/examples/ds-powerflow/ds-dyn-topo",
"mems/examples/ds-powerflow/ds-pf-input",
"mems/examples/ds-powerflow/ds-3phase-pf",
"mems/examples/ds-guizhou",
"mems/examples/iesplan",
]
[workspace.package]
edition = "2021"
rust-version = "1.80.0" # MSRV
[package]
name = "eig-aoe"
version = "0.1.0"
authors = ["dongshufeng <dongshufeng@zju.edu.cn>"]
edition.workspace = true
rust-version.workspace = true
[dependencies]
log = "^0.4"
serde = { version = "1", features = ["derive"] }
petgraph = "0.8"
# this repo
eig-domain = { path = "../eig-domain"}
eig-expr = { path = "../eig-expr"}
\ No newline at end of file
// flowing should as same as in sparrowzz
use std::fmt;
use std::fmt::Display;
use std::time::Duration;
use serde::{Deserialize, Serialize};
use eig_expr::{Expr, MyF};
use crate::MeasureBuf;
/**
* @api {时间对象} /Duration Duration
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u64} secs 秒
* @apiSuccess {u32} nanos 纳秒
*/
/**
* @api {枚举_启动方式} /TriggerType TriggerType
* @apiPrivate
* @apiGroup A_Enum
* @apiSuccess {Object} SimpleRepeat 简单固定周期触发,{"SimpleRepeat": Duration}
* @apiSuccess {Object} TimeDrive cron expression,{"TimeDrive": String}
* @apiSuccess {String} EventDrive 事件驱动,AOE开始节点条件满足即触发
* @apiSuccess {Object} EventRepeatMix 事件驱动 && Simple drive,{"EventRepeatMix": Duration}
* @apiSuccess {Object} EventTimeMix 事件驱动 && Time drive,{"EventTimeMix": String}
*/
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub enum TriggerType {
// 简单固定周期触发
SimpleRepeat(Duration),
// cron expression
TimeDrive(String),
// 事件驱动,AOE开始节点条件满足即触发
EventDrive,
// 事件驱动 && Simple drive
EventRepeatMix(Duration),
// 事件驱动 && Time drive
EventTimeMix(String),
}
impl Display for TriggerType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
/**
* @api {枚举_失败模式} /FailureMode FailureMode
* @apiPrivate
* @apiGroup A_Enum
* @apiSuccess {String} Default 如果存在指向该节点的动作运行成功(可以理解为有路径到达该事件),则后续动作继续进行
* @apiSuccess {String} Ignore 忽略,不影响其他action
* @apiSuccess {String} StopAll 停止整个aoe
* @apiSuccess {String} StopFailed 只停止受影响的节点
*/
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub enum FailureMode {
// 如果存在指向该节点的动作运行成功(可以理解为有路径到达该事件),则后续动作继续进行
Default,
// 忽略,不影响其他action
Ignore,
// 停止整个aoe
StopAll,
// 只停止受影响的节点
StopFailed,
}
impl Display for FailureMode {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
/**
* @api {枚举_节点类型} /NodeType NodeType
* @apiPrivate
* @apiGroup A_Enum
* @apiSuccess {String} ConditionNode 带表达式的节点,表达式结果>0说明事件发生,进入后续事件
* @apiSuccess {String} SwitchNode 带表达式的节点,表达式结果>0进入第一条支路,否则进入第二条支路
* @apiSuccess {String} SwitchOfActionResult 不带表达式的节点,前序Action运行成功进入第一条支路,否则进入第二条支路
*/
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub enum NodeType {
// 带表达式的节点,表达式结果>0说明事件发生,进入后续事件
ConditionNode,
// 带表达式的节点,表达式结果>0进入第一条支路,否则进入第二条支路
SwitchNode,
// 不带表达式的节点,前序Action运行成功进入第一条支路,否则进入第二条支路
SwitchOfActionResult,
}
impl Display for NodeType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
/**
* @api {EventNode} /EventNode EventNode
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u64} id 节点id
* @apiSuccess {u64} aoe_id AOE_id
* @apiSuccess {String} name 节点名
* @apiSuccess {NodeType} node_type 节点类型
* @apiSuccess {Expr} expr 表达式
* @apiSuccess {u64} timeout 事件还未发生的等待超时时间
*/
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct EventNode {
pub id: u64,
pub aoe_id: u64,
pub name: String,
pub node_type: NodeType,
pub expr: Expr,
/// 事件还未发生的等待超时时间
pub timeout: u64,
}
/**
* @api {ActionEdge} /ActionEdge ActionEdge
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u64} aoe_id AOE_id
* @apiSuccess {String} name 节点名
* @apiSuccess {u64} source_node 源节点
* @apiSuccess {u64} target_node 目标节点
* @apiSuccess {FailureMode} failure_mode action失败时的处理方式
* @apiSuccess {EigAction} action 动作定义
*/
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct ActionEdge {
pub aoe_id: u64,
pub name: String,
pub source_node: u64,
pub target_node: u64,
/// action失败时的处理方式
pub failure_mode: FailureMode,
pub action: EigAction,
}
/**
* @api {枚举_动作} /EigAction EigAction
* @apiPrivate
* @apiGroup A_Enum
* @apiSuccess {String} None 无动作
* @apiSuccess {Object} SetPoints 设点动作,{"SetPoints": SetPoints}
* @apiSuccess {Object} SetPointsWithCheck 设点动作,{"SetPointsWithCheck": SetPoints}
* @apiSuccess {Object} SetPoints2 设点动作,{"SetPoints2": SetPoints2}
* @apiSuccess {Object} SetPointsWithCheck2 设点动作,{"SetPointsWithCheck2": SetPoints2}
* @apiSuccess {Object} Solve 求方程,{"Solve": SparseSolver}
* @apiSuccess {Object} Nlsolve Nlsolve,{"Nlsolve": NewtonSolver}
* @apiSuccess {Object} Milp 混合整数线性规划稀疏表示,{"Milp": SparseMILP}
* @apiSuccess {Object} SimpleMilp 混合整数线性规划稠密表示,{"SimpleMilp": MILP}
* @apiSuccess {Object} Nlp 非整数线性规划,{"Nlp": NLP}
* @apiSuccess {Object} Url 调用webservice获取EigAction并执行,{"Url": String}
*/
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub enum EigAction {
/// 无动作
None,
/// 设点动作
SetPoints(SetPoints),
/// 设点动作
SetPointsWithCheck(SetPoints),
/// 设点动作
SetPoints2(SetPoints2),
/// 设点动作
SetPointsWithCheck2(SetPoints2),
/// 求方程
Solve(crate::solvers::SparseSolver),
/// Nlsolve
Nlsolve(crate::solvers::NewtonSolver),
/// 混合整数线性规划稀疏表示
Milp(crate::solvers::SparseMILP),
/// 混合整数线性规划稠密表示
SimpleMilp(crate::solvers::MILP),
/// 非整数线性规划
Nlp(crate::solvers::NLP),
/// 调用webservice获取EigAction并执行
Url(String),
}
/**
* @api {SetPoints} /SetPoints SetPoints
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {String[]} discrete_id discrete_id
* @apiSuccess {Expr[]} discrete_v discrete_v
* @apiSuccess {String[]} analog_id analog_id
* @apiSuccess {Expr[]} analog_v analog_v
*/
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct SetPoints {
pub discrete_id: Vec<String>,
pub discrete_v: Vec<Expr>,
pub analog_id: Vec<String>,
pub analog_v: Vec<Expr>,
}
/**
* @api {PointsToExp} /PointsToExp PointsToExp
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {String[]} ids id列表
* @apiSuccess {Expr} expr 表达式
*/
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct PointsToExp {
pub ids: Vec<String>,
pub expr: Expr,
}
/**
* @api {SetPoints2} /SetPoints2 SetPoints2
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {PointsToExp[]} discretes discretes
* @apiSuccess {PointsToExp[]} analogs analogs
*/
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct SetPoints2 {
pub discretes: Vec<PointsToExp>,
pub analogs: Vec<PointsToExp>,
}
#[derive(Debug, Clone)]
pub enum VarOrMeasures {
Vars(Vec<(String, MyF)>),
Measures(MeasureBuf),
}
/**
* @api {AoeModel} /AoeModel AoeModel
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u64} id AOE_id
* @apiSuccess {String} name AOE名
* @apiSuccess {EventNode[]} events 节点
* @apiSuccess {ActionEdge[]} actions 边
* @apiSuccess {TriggerType} trigger_type 启动的方式
* @apiSuccess {tuple[]} variables 用户自定义的变量,这些变量不在计算点的范围,tuple格式为(变量名:String, 变量表达式:Expr)
*/
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct AoeModel {
/// aoe id
pub id: u64,
/// aoe name
pub name: String,
/// 节点
pub events: Vec<EventNode>,
/// 边
pub actions: Vec<ActionEdge>,
/// aoe启动的方式
pub trigger_type: TriggerType,
/// 用户自定义的变量,这些变量不在计算点的范围
pub variables: Vec<(String, Expr)>,
}
impl Default for AoeModel {
fn default() -> Self {
Self {
id: 0,
name: String::default(),
events: vec![],
actions: vec![],
trigger_type: TriggerType::EventDrive,
variables: vec![],
}
}
}
impl PartialEq for AoeModel {
fn eq(&self, other: &Self) -> bool {
let b = self.id.eq(&other.id)
&& self.name.eq(&other.name)
&& self.trigger_type.eq(&other.trigger_type)
&& self.variables.eq(&other.variables)
&& self.events.len() == other.events.len()
&& self.actions.len() == other.actions.len();
if b {
for i in 0..self.events.len() {
if self.events[i] != other.events[i] {
return false;
}
}
for i in 0..self.actions.len() {
if self.actions[i] != other.actions[i] {
return false;
}
}
}
b
}
}
// above should as same as in sparrowzz
\ No newline at end of file
// flowing should as same as in sparrowzz
pub mod aoe;
pub mod solvers;
use std::collections::{HashMap, HashSet};
use std::collections::hash_map::Iter;
use log::error;
use petgraph::algo;
use petgraph::graphmap::DiGraphMap;
use eig_domain::{Measurement, MeasureValue};
use eig_expr::{Expr, Token};
pub const AOE_RESULT_BUF: usize = 100;
const ERR_SUFFIX: &str = "_err";
const DT_SUFFIX: &str = "_dt";
const DDT_SUFFIX: &str = "_ddt";
const T_SUFFIX: &str = "_t";
const PUB_T_SUFFIX: &str = "_pub_t";
const PUB_V_SUFFIX: &str = "_pub_v";
/// 测点变量取值,可以取值、取偏差、取时间
/// 偏差指的是最新量测和上一次发送到网络量测之间的偏差
#[derive(Debug, Clone)]
pub enum PointVarType {
/// 取测点值
Value,
/// 取测点值偏差(当前采样的值与最近一次发布值之间的偏差)
Error,
/// 取测点导数
Gradient,
/// 取测点采集时间
Time,
/// 取测点时间偏差(当前采集时间和最近一次发布时间之差)
TimeErr,
/// 取最近一次发布时间
PubTime,
/// 去最近一次发布的测点值
PubValue,
}
#[derive(Debug, Clone, Default)]
pub struct MeasureBuf {
/// 别名对应的point id
pub alias_to_id: HashMap<String, u64>,
/// 最新的量测值
pub current_mvs: HashMap<u64, MeasureValue>,
/// last time updated measurements
last_mvs: HashMap<u64, MeasureValue>,
/// 目前的告警状态
current_alarm: HashMap<u64, u8>,
/// 上次handle的测点
last_handled: HashMap<u64, MeasureValue>,
}
#[derive(Debug, Clone)]
pub struct ExprGraph {
/// 存储测点号
pub graph: DiGraphMap<u64, u8>,
/// 存储表达式
pub exprs: HashMap<u64, Expr>,
/// 记录每个计算点的层数,同一层的测点互不影响
pub layer: HashMap<u64, u32>,
/// 记录每个计算节点所对应的变量名
pub var_names: HashMap<u64, Vec<String>>,
}
/// 检查计算点中可能存在的环路问题
pub fn check_loop_in_computing_points(
map: &HashMap<u64, Measurement>,
alias: &HashMap<String, u64>,
) -> Option<u64> {
let mut exprs = HashMap::new();
let mut in_degree: HashMap<u64, u8> = HashMap::with_capacity(map.len());
let graph = form_graph(map, alias, &mut exprs, &mut in_degree);
// 拓扑排序
if let Err(e) = algo::toposort(&graph, None) {
error!("!!!There is loop in computing points");
let node_id = e.node_id();
// 给出错误的计算点
Some(node_id)
} else {
None
}
}
/// 从变量字符串中获得测点变量的类型和测点号,如果没有找到测点,则测点号为0
fn find_points_in_var(
mut var_str: String,
all_alias: &HashMap<String, u64>,
) -> (PointVarType, u64) {
// 首先判断是要获取测点值or测点偏差(和上次发布相比)or测点时间
let var_type = if var_str.ends_with(PUB_T_SUFFIX) {
let len = var_str.len() - PUB_T_SUFFIX.len();
var_str.truncate(len);
PointVarType::PubTime
} else if var_str.ends_with(PUB_V_SUFFIX) {
let len = var_str.len() - PUB_V_SUFFIX.len();
var_str.truncate(len);
PointVarType::PubValue
} else if var_str.ends_with(DDT_SUFFIX) {
let len = var_str.len() - DDT_SUFFIX.len();
var_str.truncate(len);
PointVarType::Gradient
} else if var_str.ends_with(ERR_SUFFIX) {
let len = var_str.len() - ERR_SUFFIX.len();
var_str.truncate(len);
PointVarType::Error
} else if var_str.ends_with(DT_SUFFIX) {
let len = var_str.len() - DT_SUFFIX.len();
var_str.truncate(len);
PointVarType::TimeErr
} else if var_str.ends_with(T_SUFFIX) {
let len = var_str.len() - T_SUFFIX.len();
var_str.truncate(len);
PointVarType::Time
} else {
PointVarType::Value
};
// 获取测点号
let point_id = if let Some(id) = all_alias.get(&var_str) {
*id
} else if var_str.starts_with('_') || var_str.starts_with('$') {
if let Ok(point_id) = var_str.as_str()[1..].parse::<u64>() {
point_id
} else {
0
}
} else {
0
};
(var_type, point_id)
}
/// 注意这个方法可能会返回重复的点号
pub fn find_points_in_expr(
expr: &Expr,
all_alias: &HashMap<String, u64>,
) -> Vec<(PointVarType, u64, String)> {
let mut r = Vec::new();
for token in expr.iter() {
if let Token::Var(s) = token {
let (pv_type, point_id) = find_points_in_var(s.clone(), all_alias);
// 如果找到了测点
if point_id > 0 {
r.push((pv_type, point_id, s.clone()));
}
}
}
r
}
/// 建立测点之间的有向图
pub fn form_graph(
map: &HashMap<u64, Measurement>,
alias: &HashMap<String, u64>,
exprs: &mut HashMap<u64, Expr>,
in_degree: &mut HashMap<u64, u8>,
) -> DiGraphMap<u64, u8> {
let mut graph = DiGraphMap::<u64, u8>::with_capacity(map.len(), map.len());
// 首先构建一个包含全部计算点及其相关测点的图
for (id, m) in map {
if m.is_computing_point {
let expr: Expr = m.expression.parse().unwrap();
if !graph.contains_node(*id) {
graph.add_node(*id);
}
for (_, point_id, _) in find_points_in_expr(&expr, alias) {
// 添加节点和边
if !graph.contains_node(point_id) {
graph.add_node(point_id);
}
if graph.add_edge(point_id, *id, 1).is_none() {
if let Some(degree) = in_degree.get_mut(id) {
*degree += 1;
} else {
in_degree.insert(*id, 1);
}
}
}
exprs.insert(*id, expr);
}
}
graph
}
impl MeasureBuf {
pub fn new(
current_mvs: HashMap<u64, MeasureValue>,
alias_to_id: HashMap<String, u64>,
) -> MeasureBuf {
let last_handled = HashMap::with_capacity(current_mvs.len());
let last_mvs = current_mvs.clone();
MeasureBuf {
current_mvs,
last_mvs,
current_alarm: Default::default(),
alias_to_id,
last_handled,
}
}
pub fn initial_point(&mut self, mvs: HashMap<u64, MeasureValue>, alias: HashMap<String, u64>) {
self.current_mvs = mvs;
self.alias_to_id = alias;
self.last_mvs.clear();
self.last_handled.clear();
self.current_mvs.shrink_to_fit();
self.alias_to_id.shrink_to_fit();
}
pub fn copy_sub(&self, ids: &HashSet<u64>, is_copy_alias: bool) -> MeasureBuf {
// means all
if ids.contains(&0) {
return self.clone();
}
let v: Vec<(Option<MeasureValue>, Option<MeasureValue>, Option<MeasureValue>)> = ids.iter().map(|id| {
let mv = self.current_mvs.get(id).cloned();
let last_mv = self.last_mvs.get(id).cloned();
let last_handled = self.last_handled.get(id).cloned();
(mv, last_mv, last_handled)
}).collect::<_>();
let alias_to_id = if is_copy_alias {
let mut alias_to_id = HashMap::with_capacity(ids.len());
for (alias, id) in &self.alias_to_id {
if ids.contains(id) {
alias_to_id.insert(alias.clone(), *id);
}
}
alias_to_id.shrink_to_fit();
alias_to_id
} else {
HashMap::with_capacity(0)
};
let mut current_mvs = HashMap::with_capacity(ids.len());
let mut last_mvs = HashMap::with_capacity(ids.len());
let mut last_handled = HashMap::with_capacity(ids.len());
for (current_mv, last_mv, last_handle) in v {
if current_mv.is_some() {
let mv = current_mv.unwrap();
current_mvs.insert(mv.point_id, mv);
}
if last_mv.is_some() {
let mv = last_mv.unwrap();
last_mvs.insert(mv.point_id, mv);
}
if last_handle.is_some() {
let mv = last_handle.unwrap();
last_handled.insert(mv.point_id, mv);
}
}
MeasureBuf {
alias_to_id,
current_mvs,
last_mvs,
last_handled,
current_alarm: Default::default(),
}
}
pub fn contains_point(&self, point_id: &u64) -> bool {
self.current_mvs.contains_key(point_id)
}
// pub fn update_mv(&mut self, new_m: &MeasureValue) {
// let point_id = new_m.point_id;
// if self.contains_point(&point_id) {
// let cloned_last = self.get_mut(&point_id).clone();
// self.update_last_handled(cloned_last.clone());
// self.update_last_mv(cloned_last);
// self.get_mut(&point_id).update(new_m);
// }
// }
// pub fn update_mvs(&mut self, v: &[MeasureValue]) {
// for m in v {
// // 在update_mv方法中已经判断了是否存在该测点,因此这里不需要再判断一次
// self.update_mv(m);
// }
// }
pub fn update_buf(&mut self, buf: &MeasureBuf) {
for (id, mv) in &buf.current_mvs {
if let Some(m) = self.current_mvs.get_mut(id) {
m.update(mv);
}
}
for (id, mv) in &buf.last_mvs {
if !self.contains_point(id) {
continue
}
if let Some(m) = self.last_mvs.get_mut(id) {
m.update(mv);
} else {
self.last_mvs.insert(*id, mv.clone());
}
}
for (id, mv) in &buf.last_handled {
if !self.contains_point(id) {
continue
}
if let Some(m) = self.last_handled.get_mut(id) {
m.update(mv);
} else {
self.last_handled.insert(*id, mv.clone());
}
}
}
pub fn get_mut(&mut self, point_id: &u64) -> &mut MeasureValue {
self.current_mvs.get_mut(point_id).unwrap()
}
pub fn get_mv(&self, point_id: &u64) -> Option<&MeasureValue> {
self.current_mvs.get(point_id)
}
pub fn get_mv_count(&self) -> usize {
self.current_mvs.len()
}
pub fn get_mvs(&self) -> Iter<'_, u64, MeasureValue> {
self.current_mvs.iter()
}
pub fn get_alarm_status(&self, point_id: &u64) -> u8 {
if let Some(status) = self.current_alarm.get(point_id) {
*status
} else {
0
}
}
pub fn get_last_handled(&self, point_id: &u64) -> Option<&MeasureValue> {
self.last_handled.get(point_id)
}
pub fn get_last_updated(&self, point_id: &u64) -> Option<&MeasureValue> {
self.last_mvs.get(point_id)
}
pub fn update_last_handled(&mut self, m: MeasureValue) {
self.last_handled.insert(m.point_id, m);
}
pub fn update_last_mv(&mut self, m: MeasureValue) {
self.last_mvs.insert(m.point_id, m);
}
pub fn update_alarm_status(&mut self, point_id: u64, status: u8) {
self.current_alarm.insert(point_id, status);
}
pub fn get_bool_measure(&self, point_id: &u64) -> bool {
let status = self.get_mv(point_id);
status.is_some() && status.unwrap().discrete_value > 0
}
}
// above should as same as in sparrowzz
\ No newline at end of file
pub use model::*;
pub use utils::*;
pub mod model;
pub mod utils;
\ No newline at end of file
// above should as same as in sparrowzz
use std::collections::HashMap;
use log::{trace, warn};
use serde::{Deserialize, Serialize};
use eig_expr::Expr;
use eig_expr::shuntingyard::{rpn_to_latex, rpn_to_string};
use eig_expr::Operation;
use eig_expr::Operation::*;
use eig_expr::Token::*;
use crate::find_points_in_expr;
use crate::solvers::utils::*;
/**
* @api {由表达式组成的稀疏矩阵} /SparseMat SparseMat
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {usize} m m
* @apiSuccess {usize} n n
* @apiSuccess {tuple[]} u u,数组,tuple格式为(usize, usize, Expr)
*/
/// 由表达式组成的稀疏矩阵
#[derive(Deserialize, Serialize, PartialEq, Debug, Clone)]
pub struct SparseMat {
pub m: usize,
pub n: usize,
pub v: Vec<(usize, usize, Expr)>,
}
/**
* @api {由表达式组成的矩阵} /Mat Mat
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {usize} m m
* @apiSuccess {usize} n n
* @apiSuccess {Expr[]} v v
*/
/// 由表达式组成的矩阵
#[derive(Deserialize, Serialize, PartialEq, Debug, Clone)]
pub struct Mat {
pub(crate) m: usize,
pub(crate) n: usize,
pub(crate) v: Vec<Expr>,
}
/**
* @api {混合整数线性规划求解器} /MILP MILP
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {String[]} x_name 变量名称
* @apiSuccess {tuple[]} x_lower x_lower,数组,tuple格式为(usize, Expr)
* @apiSuccess {tuple[]} x_upper x_upper,数组,tuple格式为(usize, Expr)
* @apiSuccess {u8[]} binary_int_float 整数变量在x中的位置
* @apiSuccess {Mat} a Ax >=/<= b
* @apiSuccess {Expr[]} b b
* @apiSuccess {Operation[]} constraint_type constraint_type
* @apiSuccess {tuple[]} c min/max c^T*x,数组,tuple格式为(usize, Expr)
* @apiSuccess {bool} min_or_max min: true, max: false
* @apiSuccess {Map} parameters 参数Map,HashMap<String, String>
*/
/// 混合整数线性规划求解器
#[derive(Deserialize, Serialize, PartialEq, Debug, Clone)]
pub struct MILP {
// 变量名称
pub(crate) x_name: Vec<String>,
pub x_lower: Vec<(usize, Expr)>,
pub x_upper: Vec<(usize, Expr)>,
// 整数变量在x中的位置
pub binary_int_float: Vec<u8>,
// Ax >=/<= b
pub a: Mat,
pub b: Vec<Expr>,
pub constraint_type: Vec<Operation>,
// min/max c^T*x
pub c: Vec<Expr>,
// min: true, max: false
pub min_or_max: bool,
pub parameters: HashMap<String, String>,
}
/**
* @api {混合整数线性规划求解器,矩阵用稀疏矩阵} /SparseMILP SparseMILP
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {String[]} x_name 变量名称
* @apiSuccess {tuple[]} x_lower x_lower,数组,tuple格式为(usize, Expr)
* @apiSuccess {tuple[]} x_upper x_upper,数组,tuple格式为(usize, Expr)
* @apiSuccess {u8[]} binary_int_float 整数变量在x中的位置
* @apiSuccess {SparseMat} a Ax >=/<= b
* @apiSuccess {Expr[]} b b
* @apiSuccess {Operation[]} constraint_type constraint_type
* @apiSuccess {tuple[]} c min/max c^T*x,数组,tuple格式为(usize, Expr)
* @apiSuccess {bool} min_or_max min: true, max: false
* @apiSuccess {Map} parameters 参数Map,HashMap<String, String>
*/
/// 混合整数线性规划求解器,矩阵用稀疏矩阵
#[derive(Deserialize, Serialize, PartialEq, Debug, Clone)]
pub struct SparseMILP {
// 变量名称
pub x_name: Vec<String>,
pub x_lower: Vec<(usize, Expr)>,
pub x_upper: Vec<(usize, Expr)>,
// 整数变量在x中的位置
pub binary_int_float: Vec<u8>,
// Ax >=/<= b
pub a: SparseMat,
pub b: Vec<Expr>,
pub constraint_type: Vec<Operation>,
// min/max c^T*x
pub c: Vec<(usize, Expr)>,
// min: true, max: false
pub min_or_max: bool,
pub parameters: HashMap<String, String>,
}
/**
* @api {稀疏线性方程组求解器} /SparseSolver SparseSolver
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {SparseMat} a a
* @apiSuccess {Expr[]} b b
* @apiSuccess {String[]} x_name x_name
* @apiSuccess {Expr[]} x_init x_init
* @apiSuccess {Map} parameters 参数Map,HashMap<String, String>
*/
/// 稀疏线性方程组Ax=b求解器
#[derive(Deserialize, Serialize, PartialEq, Debug, Clone)]
pub struct SparseSolver {
pub a: SparseMat,
pub b: Vec<Expr>,
pub x_name: Vec<String>,
pub x_init: Vec<Expr>,
pub parameters: HashMap<String, String>,
}
#[derive(Deserialize, Serialize, PartialEq, Debug, Clone)]
pub struct SparseSolverCx {
pub a: SparseMat,
pub b: Vec<Expr>,
pub x_name: Vec<String>,
pub x_init: Vec<Expr>,
pub parameters: HashMap<String, String>,
}
/// 线性方程组Ax=b求解器
#[derive(Deserialize, Serialize, PartialEq, Debug, Clone)]
pub struct Solver {
pub a: Mat,
pub b: Vec<Expr>,
pub x_name: Vec<String>,
pub x_init: Vec<Expr>,
pub parameters: HashMap<String, String>,
}
/**
* @api {NLP} /NLP NLP
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {Expr} obj_expr min obj
* @apiSuccess {String[]} x_name 变量名称
* @apiSuccess {Expr[]} x_lower x_lower
* @apiSuccess {Expr[]} x_upper x_upper
* @apiSuccess {Expr[]} g g(x) >=/<=/== b
* @apiSuccess {Expr[]} g_lower g_lower
* @apiSuccess {Expr[]} g_upper g_upper
* @apiSuccess {bool} min_or_max min: true, max: false
* @apiSuccess {Map} parameters 参数Map,HashMap<String, String>
*/
#[derive(Deserialize, Serialize, PartialEq, Debug, Clone)]
pub struct NLP {
// min obj
pub obj_expr: Expr,
// 变量名称
pub x_name: Vec<String>,
// 整数变量在x中的位置
pub x_lower: Vec<Expr>,
pub x_upper: Vec<Expr>,
// g(x) >=/<=/== b
pub g: Vec<Expr>,
pub g_lower: Vec<Expr>,
pub g_upper: Vec<Expr>,
//x0
pub x_init: Vec<Expr>,
// min: true, max: false
pub min_or_max: bool,
pub parameters: HashMap<String, String>,
}
/**
* @api {非线性方程求解器} /NewtonSolver NewtonSolver
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {Expr[]} f f
* @apiSuccess {String[]} x_name x_name
* @apiSuccess {Expr[]} x_init x_init
* @apiSuccess {Expr[]} x_init_cx x_init_cx
* @apiSuccess {Map} parameters 参数Map,HashMap<String, String>
*/
/// 非线性方程f(x)=b求解器
#[derive(Deserialize, Serialize, PartialEq, Debug, Clone)]
pub struct NewtonSolver {
pub f: Vec<Expr>,
pub x_name: Vec<String>,
pub x_init: Vec<Expr>,
pub x_init_cx: Vec<Expr>,
pub parameters: HashMap<String, String>,
}
/// 加权最小二乘求解器
#[derive(Deserialize, Serialize, PartialEq, Debug, Clone)]
pub struct NewtonWls {
pub f: Vec<Expr>,
pub weight: Vec<Expr>,
pub x_name: Vec<String>,
pub x_init: Vec<Expr>,
pub parameters: HashMap<String, String>,
}
impl Solver {
/// 解析字符串表示的数学模型
#[allow(clippy::should_implement_trait)]
pub fn from_str(expr_str: Vec<&str>) -> Result<Self, usize> {
let mut expr_str = expr_str.clone();
expr_str.retain(|expr| !expr.trim().is_empty());
let s_n = expr_str.len();
if s_n < 2 {
warn!("!!Insufficient function num for solve Ax=b, content: {:?}",expr_str);
return Err(s_n);
}
// 首先确定变量名称和类型
let x_defines: Vec<String> = expr_str[s_n - 1]
.split(',')
.map(|s| s.to_string())
.collect();
if s_n != x_defines.len() + 1 {
warn!("!!Insufficient function num for solve Ax=b, content: {:?}",expr_str);
return Err(s_n);
}
let (x_name, x_init) = create_x_name_init(&x_defines).ok_or(s_n)?;
let mut b = Vec::with_capacity(x_name.len());
let mut a = Mat {
m: x_name.len(),
n: x_name.len(),
v: Vec::new(),
};
for i in 0..x_name.len() {
let s = expr_str[i];
let constraint_expr = s.parse::<Expr>().map_err(|_| i + 1).map_err(|_| i + 1)?;
let mut left_right = get_expr_from_fun(constraint_expr.rpn).ok_or(i + 1)?;
let right = left_right.pop().ok_or(i + 1)?;
b.push(right);
let left_all = left_right.pop().ok_or(i + 1)?;
let left = get_expr_from_fun(left_all.rpn).ok_or(i + 1)?;
if left.len() != a.n {
warn!("!!Insufficient expr num in A, content: {:?}", expr_str);
return Err(i + 1);
}
for expr in left {
a.v.push(expr);
}
}
let parameters = HashMap::new();
Ok(Solver { a, b, x_name, x_init, parameters })
}
pub fn from_str_with_parameters(
expr_str: Vec<&str>,
parameters_str: &[&str],
) -> Result<Solver, (usize, usize)> {
match Solver::from_str(expr_str) {
Ok(mut m) => match read_parameters_from_str(parameters_str) {
Ok(parameters) => {
m.parameters = parameters;
Ok(m)
}
Err(n) => Err((1, n)),
},
Err(n) => Err((0, n)),
}
}
// 分析相关的测点
pub(crate) fn get_related_points(&self, alias: &HashMap<String, u64>) -> Vec<u64> {
let mut result = Vec::new();
for expr in &self.a.v {
for (_, id, _) in find_points_in_expr(expr, alias) {
if !result.contains(&id) {
result.push(id);
}
}
}
for expr in &self.b {
for (_, id, _) in find_points_in_expr(expr, alias) {
if !result.contains(&id) {
result.push(id);
}
}
}
result
}
}
impl SparseSolver {
/// 解析字符串表示的数学模型
#[allow(clippy::should_implement_trait)]
pub fn from_str(expr_str: &[&str]) -> Result<Self, usize> {
let mut expr_str = expr_str.to_vec();
expr_str.retain(|expr| !expr.trim().is_empty());
let s_n = expr_str.len();
if s_n < 2 {
warn!("!!Insufficient function num for solve Ax=b, content: {:?}",expr_str);
return Err(s_n);
}
// 首先确定变量名称和类型
let x_defines: Vec<String> = expr_str
.pop()
.unwrap()
.split(',')
.map(|s| s.trim().to_string())
.collect();
if s_n != x_defines.len() + 1 {
warn!("!!Insufficient function num for solve Ax=b, content: {:?}",expr_str);
return Err(s_n);
}
let (x_name, x_init) = create_x_name_init(&x_defines).ok_or(s_n)?;
let mut x_pos = HashMap::with_capacity(x_name.len());
for (i, x_name_i) in x_name.iter().enumerate() {
x_pos.insert(x_name_i.clone(), i);
}
let mut b = Vec::with_capacity(x_name.len());
let mut a = SparseMat {
m: x_name.len(),
n: x_name.len(),
v: Vec::new(),
};
for (i, expr_str_i) in expr_str.iter().enumerate() {
let r = parse_linear_expr_str(expr_str_i, &x_pos).ok_or(i + 1)?;
if r[0].0 != 0 {
b.push(Expr::from_vec(vec![Number(0.0)]));
}
for (col, mut expr) in r {
if col == 0 {
if expr.rpn.len() == 1 {
// 如果是常数参数,直接计算
if let Number(f) = expr.rpn[0] {
expr.rpn[0] = Number(-f);
b.push(expr);
continue;
}
}
expr.rpn.push(Unary(Minus));
b.push(expr);
} else {
a.v.push((i, col - 1, expr));
}
}
}
let parameters = HashMap::new();
Ok(SparseSolver { a, b, x_name, x_init, parameters })
}
pub fn from_str_with_parameters(
expr_str: &[&str],
parameters_str: &[&str],
) -> Result<SparseSolver, (usize, usize)> {
match SparseSolver::from_str(expr_str) {
Ok(mut m) => match read_parameters_from_str(parameters_str) {
Ok(parameters) => {
m.parameters = parameters;
Ok(m)
}
Err(n) => Err((1, n)),
},
Err(n) => Err((0, n)),
}
}
// 分析相关的测点
pub(crate) fn get_related_points(&self, alias: &HashMap<String, u64>) -> Vec<u64> {
let mut result = Vec::new();
for (_, _, expr) in &self.a.v {
for (_, id, _) in find_points_in_expr(expr, alias) {
if !result.contains(&id) {
result.push(id);
}
}
}
for expr in &self.b {
for (_, id, _) in find_points_in_expr(expr, alias) {
if !result.contains(&id) {
result.push(id);
}
}
}
result
}
/// 将模型解析为字符串, (方程组, 变量定义, 求解参数)
pub fn to_str(&self) -> (String, String, HashMap<String, String>) {
// 方程组部分
let mut result_fun = "".to_string();
let mut v_index = 0;
for i in 0..self.a.m {
//m行
let mut plus_flag = 0;
for j in 0..self.a.n {
//n列,系数矩阵
if self.a.v[v_index].0 == i && self.a.v[v_index].1 == j {
//若此项存在
if let Ok(s) = rpn_to_string(&self.a.v[v_index].2.rpn) {
if plus_flag == 1 {
result_fun += "+"
}
if s != *"1" {
result_fun += &format!("{}*{}", s, self.x_name[j]);
} else {
result_fun += &self.x_name[j];
}
plus_flag = 1;
} else {
warn!("!!Failed to parse mat, row:{}, line:{}", i, j);
}
v_index += 1;
if v_index == self.a.v.len() {
break;
}
}
}
if let Ok(s) = rpn_to_string(&self.b[i].rpn) {
//常数项
result_fun += &format!("={};", s);
} else {
warn!("!!Failed to parse b, row:{}", i);
}
}
// 变量部分
let mut result_var = "".to_string();
for i in 0..self.x_name.len() {
if i != 0 {
result_var += ",";
}
result_var += &self.x_name[i];
if let Ok(init) = rpn_to_string(&self.x_init[i].rpn) {
if !init.is_empty() {
result_var += &format!(":{}", init);
}
}
}
// 求解参数部分
let result_other = self.parameters.clone();
// let mut result_other = "".to_string();
// if !self.parameters.is_empty() {
// for (k, v) in &self.parameters {
// result_other += &format!("{}:{};\n", k, v);
// }
// }
(result_fun, result_var, result_other)
}
pub fn to_latex(&self) -> (String, String) {
// 方程组部分
let mut result_fun = "".to_string();
let mut v_index = 0;
for i in 0..self.a.m {
//m行
let mut plus_flag = 0;
for j in 0..self.a.n {
//n列,系数矩阵
if self.a.v[v_index].0 == i && self.a.v[v_index].1 == j {
//若此项存在
if let Ok(s) = rpn_to_latex(&self.a.v[v_index].2.rpn) {
if plus_flag == 1 {
result_fun += "+"
}
if s != *"1" {
result_fun += &format!("{}\times {}", s, self.x_name[j]);
} else {
result_fun += &self.x_name[j];
}
plus_flag = 1;
} else {
warn!("!!Failed to parse mat, row:{}, line:{}", i, j);
}
v_index += 1;
if v_index == self.a.v.len() {
break;
}
}
}
if let Ok(s) = rpn_to_latex(&self.b[i].rpn) {
//常数项
result_fun += &format!("={}\\\\\n", s);
} else {
warn!("!!Failed to parse b, row:{}", i);
}
}
// 变量部分
let mut result_var = "".to_string();
for i in 0..self.x_name.len() {
if i != 0 {
result_var += ",";
}
result_var += &self.x_name[i];
if let Ok(init) = rpn_to_latex(&self.x_init[i].rpn) {
if !init.is_empty() {
result_var += &format!(":{}", init);
}
}
}
result_var += "\\\\\n";
(result_fun, result_var)
}
}
impl SparseSolverCx {
/// 解析字符串表示的数学模型
#[allow(clippy::should_implement_trait)]
pub fn from_str(expr_str: &[&str]) -> Result<Self, usize> {
let mut expr_str = expr_str.to_vec();
expr_str.retain(|expr| !expr.trim().is_empty());
let s_n = expr_str.len();
if s_n < 2 {
warn!("!!Insufficient function num for solve Ax=b, content: {:?}",expr_str);
return Err(s_n);
}
// 首先确定变量名称和类型
let x_defines: Vec<String> = expr_str
.pop()
.unwrap()
.split(',')
.map(|s| s.trim().to_string())
.collect();
if s_n != x_defines.len() + 1 {
warn!("!!Insufficient function num for solve Ax=b, content: {:?}",expr_str);
return Err(s_n);
}
let (x_name, x_init) = create_x_name_init_cx(&x_defines).ok_or(s_n)?;
let mut x_pos = HashMap::with_capacity(x_name.len());
for (i, x_name_i) in x_name.iter().enumerate() {
x_pos.insert(x_name_i.clone(), i);
}
let mut b = Vec::with_capacity(x_name.len());
let mut a = SparseMat {
m: x_name.len(),
n: x_name.len(),
v: Vec::new(),
};
for (i, expr_str_i) in expr_str.iter().enumerate() {
let r = parse_linear_expr_str(expr_str_i, &x_pos).ok_or(i + 1)?;
if r[0].0 != 0 {
b.push(Expr::from_vec(vec![Number(0.0)]));
}
for (col, mut expr) in r {
if col == 0 {
if expr.rpn.len() == 1 {
// 如果是常数参数,直接计算
if let Number(f) = expr.rpn[0] {
expr.rpn[0] = Number(-f);
b.push(expr);
continue;
}
}
expr.rpn.push(Unary(Minus));
b.push(expr);
} else {
a.v.push((i, col - 1, expr));
}
}
}
let parameters = HashMap::new();
Ok(SparseSolverCx { a, b, x_name, x_init, parameters })
}
}
impl MILP {
/// 从字符串中获取模型
#[allow(clippy::should_implement_trait)]
pub fn from_str(expr_str: &[&str]) -> Result<MILP, usize> {
let mut expr_str = expr_str.to_vec();
expr_str.retain(|expr| !expr.trim().is_empty());
let s_n = expr_str.len();
if s_n <= 2 {
return Err(s_n);
}
// 首先确定变量名称和类型
let (x_name, binary_int_float, _, x_upper, x_lower) =
get_x_info(expr_str.pop().unwrap()).map_err(|_| s_n)?;
// 处理目标函数
let obj_exp: Expr = expr_str[0].parse().map_err(|_| 1usize)?;
let n = obj_exp.len();
let f = &obj_exp.rpn[n - 1];
let min_or_max: bool;
match f {
Func(name, _) => {
if name.to_uppercase() == "MIN" {
min_or_max = true;
} else if name.to_uppercase() == "MAX" {
min_or_max = false;
} else {
warn!("!!No min or max function found in obj expression: {:?}",obj_exp);
return Err(1);
}
}
_ => {
warn!("!!Not a function, obj expression: {:?}", obj_exp);
return Err(1);
}
}
let c: Vec<Expr> = get_expr_from_fun(obj_exp.rpn).ok_or(1usize)?;
let m = expr_str.len() - 1;
let n = x_name.len();
let mut v = Vec::new();
let mut b = Vec::with_capacity(m);
let mut constraint_type = Vec::with_capacity(m);
for (i, expr_str_i) in expr_str.iter().enumerate().skip(1) {
let s = expr_str_i;
let constraint_expr = s.parse::<Expr>().map_err(|_| i + 1)?;
let token = &constraint_expr.rpn[constraint_expr.len() - 1];
if let Binary(op) = token {
if *op == GtOrEqual || *op == LtOrEqual || *op == Equal {
constraint_type.push(*op)
} else {
warn!("!!The {}th constraint is wrong.", i);
return Err(i + 1);
}
} else {
warn!("!!The {}th constraint is wrong.", i);
return Err(i + 1);
}
let mut left_right = get_expr_from_fun(constraint_expr.rpn).ok_or(i + 1)?;
b.push(left_right.pop().unwrap());
let left = get_expr_from_fun(left_right.pop().unwrap().rpn).ok_or(i + 1)?;
for expr in left {
v.push(expr);
}
}
let a = Mat { m, n, v };
let parameters = HashMap::new();
Ok(MILP {
x_name,
x_lower,
x_upper,
a,
b,
c,
binary_int_float,
constraint_type,
min_or_max,
parameters,
})
}
pub fn from_str_with_parameters(
expr_str: &[&str],
parameters_str: &[&str],
) -> Result<MILP, (usize, usize)> {
match MILP::from_str(expr_str) {
Ok(mut m) => match read_parameters_from_str(parameters_str) {
Ok(parameters) => {
m.parameters = parameters;
Ok(m)
}
Err(n) => Err((1, n)),
},
Err(n) => Err((0, n)),
}
}
/// 分析相关的测点
pub(crate) fn get_related_points(&self, alias: &HashMap<String, u64>) -> Vec<u64> {
let mut result = Vec::new();
for expr in &self.a.v {
for (_, id, _) in find_points_in_expr(expr, alias) {
if !result.contains(&id) {
result.push(id);
}
}
}
for expr in &self.b {
for (_, id, _) in find_points_in_expr(expr, alias) {
if !result.contains(&id) {
result.push(id);
}
}
}
for expr in &self.c {
for (_, id, _) in find_points_in_expr(expr, alias) {
if !result.contains(&id) {
result.push(id);
}
}
}
result
}
/// 将模型解析为字符串, (目标函数, 约束条件, 变量声明, 求解参数)
pub fn to_str(&self) -> (String, String, String, HashMap<String, String>) {
// 目标函数部分
let mut result_fun = "".to_string();
result_fun += if self.min_or_max { "min(" } else { "max(" };
let mut plus_flag = 0;
for i in 0..self.c.len() {
if let Ok(s) = rpn_to_string(&self.c[i].rpn) {
if plus_flag == 1 {
result_fun += ","
}
result_fun += &s.to_string();
plus_flag = 1;
} else {
warn!("!!Failed to parse object function, index:{}", i);
}
}
result_fun += ");";
// 约束部分
let mut result_cons = "".to_string();
for i in 0..self.a.m {
//m行
let mut l = "".to_string();
let mut plus_flag = 0;
for j in i * self.a.n..(i + 1) * self.a.n {
//n列,系数矩阵
if plus_flag == 1 {
l += ",";
}
if let Ok(s) = rpn_to_string(&self.a.v[j].rpn) {
l += &s;
plus_flag = 1;
} else {
warn!("!!Failed to parse mat");
}
}
if let Ok(s) = rpn_to_string(&self.b[i].rpn) {
//常数项
let op = match self.constraint_type[i] {
Equal => "==",
Unequal => "!=",
LessThan => "<",
GreatThan => ">",
LtOrEqual => "<=",
GtOrEqual => ">=",
_ => "==",
};
result_cons += &format!("g({}){}{};", l, op, s);
} else {
warn!("!!Failed to parse constraint, row:{}", i);
};
}
// 变量部分
let mut result_var = "".to_string();
for i in 0..self.x_name.len() {
if i != 0 {
result_var += ",";
}
result_var += &format!("{}:{}", self.x_name[i], self.binary_int_float[i]);
}
// 求解参数部分
let result_other = self.parameters.clone();
// let mut result_other = "".to_string();
// if !self.parameters.is_empty() {
// for (k, v) in &self.parameters {
// result_other += &format!("{}:{};\n", k, v);
// }
// }
(result_fun, result_cons, result_var, result_other)
}
}
impl SparseMILP {
/// 从字符串中获取模型
#[allow(clippy::should_implement_trait)]
pub fn from_str(expr_str: &[&str]) -> Result<SparseMILP, usize> {
let mut expr_str = expr_str.to_vec();
expr_str.retain(|expr| !expr.trim().is_empty());
let s_n = expr_str.len();
if s_n <= 2 {
return Err(s_n);
}
// 首先确定变量名称和类型
let (x_name, binary_int_float, x_pos, x_upper, x_lower) =
get_x_info(expr_str.pop().unwrap()).map_err(|_| s_n)?;
// 处理目标函数
let mut obj_exp: Expr = expr_str[0].parse().map_err(|_| 1usize)?;
// pop min or max func
let f = obj_exp.rpn.pop().ok_or(1usize)?;
let min_or_max: bool;
match f {
Func(name, _) => {
trace!("obj func is {}.", name);
if name.to_uppercase() == "MIN" {
min_or_max = true;
} else if name.to_uppercase() == "MAX" {
min_or_max = false;
} else {
warn!("!!No min or max function found in obj expression: {:?}",obj_exp);
return Err(1);
}
}
_ => {
warn!("!!Not a function, obj expression: {:?}", obj_exp);
return Err(1);
}
}
let mut c = split_linear_expr(obj_exp.rpn, &x_pos).ok_or(1usize)?;
if c[0].0 == 0 {
return Err(1);
}
// 下标从0开始,与MILP保持一致
for c_i in c.iter_mut() {
c_i.0 -= 1;
}
let m = expr_str.len() - 1;
let n = x_name.len();
let mut v = Vec::new();
let mut b = Vec::with_capacity(m);
let mut constraint_type = Vec::with_capacity(m);
for (i, expr_str_i) in expr_str.iter().enumerate().skip(1) {
let s = expr_str_i;
let constraint_expr = s.parse::<Expr>().map_err(|_| i + 1)?;
let token = &constraint_expr.rpn[constraint_expr.len() - 1];
if let Binary(op) = token {
if *op == GtOrEqual || *op == LtOrEqual || *op == Equal {
constraint_type.push(*op)
} else {
warn!("!!The {}th constraint is wrong.", i);
return Err(i + 1);
}
} else {
warn!("!!The {}th constraint is wrong.", i);
return Err(i + 1);
}
let mut left_right = get_expr_from_fun(constraint_expr.rpn).ok_or(i + 1)?;
if left_right.len() != 2 {
return Err(i + 1);
}
let right = left_right.pop().unwrap();
let left = left_right.pop().unwrap();
let mut right = parse_linear_expr(right.rpn, &x_pos).ok_or(i + 1)?;
let left = parse_linear_expr(left.rpn, &x_pos).ok_or(i + 1)?;
if merge_expr_map(left, &mut right, Minus) {
let r = create_linear_expr(right).ok_or(i + 1)?;
if r[0].0 != 0 {
b.push(Expr::from_vec(vec![Number(0.0)]));
}
for (col, mut expr) in r {
if col == 0 {
if expr.rpn.len() == 1 {
// 如果是常数参数,直接计算
if let Number(f) = expr.rpn[0] {
expr.rpn[0] = Number(-f);
b.push(expr);
continue;
}
}
expr.rpn.push(Unary(Minus));
// check expression
if !expr.check_validity() {
return Err(i + 1);
}
b.push(expr);
} else {
// check expression
if !expr.check_validity() {
return Err(i + 1);
}
v.push((i - 1, col - 1, expr));
}
}
}
}
let a = SparseMat { m, n, v };
let parameters = HashMap::new();
Ok(SparseMILP {
x_name,
x_lower,
x_upper,
binary_int_float,
a,
b,
constraint_type,
c,
min_or_max,
parameters,
})
}
pub fn from_str_with_parameters(
expr_str: &[&str],
parameters_str: &[&str],
) -> Result<SparseMILP, (usize, usize)> {
match SparseMILP::from_str(expr_str) {
Ok(mut m) => match read_parameters_from_str(parameters_str) {
Ok(parameters) => {
m.parameters = parameters;
Ok(m)
}
Err(n) => Err((1, n)),
},
Err(n) => Err((0, n)),
}
}
/// 分析相关的测点
pub(crate) fn get_related_points(&self, alias: &HashMap<String, u64>) -> Vec<u64> {
let mut result = Vec::new();
for (_, _, expr) in &self.a.v {
for (_, id, _) in find_points_in_expr(expr, alias) {
if !result.contains(&id) {
result.push(id);
}
}
}
for expr in &self.b {
for (_, id, _) in find_points_in_expr(expr, alias) {
if !result.contains(&id) {
result.push(id);
}
}
}
for (_, expr) in &self.c {
for (_, id, _) in find_points_in_expr(expr, alias) {
if !result.contains(&id) {
result.push(id);
}
}
}
result
}
/// 将模型解析为字符串, (目标函数, 约束条件, 变量声明, 求解参数)
pub fn to_str(&self) -> (String, String, String, HashMap<String, String>) {
// 目标函数部分
let mut result_fun = "".to_string();
result_fun += if self.min_or_max { "min(" } else { "max(" };
let mut plus_flag = 0;
for i in 0..self.c.len() {
if let Ok(s) = rpn_to_string(&self.c[i].1.rpn) {
if plus_flag == 1 {
result_fun += "+";
}
if s != *"1" {
result_fun += &format!("{}*{}", s, &self.x_name[self.c[i].0]);
} else {
result_fun += &self.x_name[self.c[i].0];
}
plus_flag = 1;
} else {
warn!("!!Failed to parse object function, index:{}", i);
}
}
result_fun += ");";
// 约束部分
let mut result_cons = "".to_string();
let mut v_index = 0;
for i in 0..self.a.m {
//m行
let mut plus_flag = 0;
for j in 0..self.a.n {
//n列,系数矩阵
if self.a.v[v_index].0 == i && self.a.v[v_index].1 == j {
//若此项存在
if let Ok(s) = rpn_to_string(&self.a.v[v_index].2.rpn) {
if plus_flag == 1 {
result_cons += "+";
}
if s != *"1" {
result_cons += &format!("{}*{}", s, &self.x_name[j]);
} else {
result_cons += &self.x_name[j];
}
plus_flag = 1;
} else {
warn!("!!Failed to parse mat, row:{}, line:{}", i, j);
}
v_index += 1;
if v_index == self.a.v.len() {
break;
}
}
}
if let Ok(s) = rpn_to_string(&self.b[i].rpn) {
//常数项
let op = match self.constraint_type[i] {
Equal => "==",
Unequal => "!=",
LessThan => "<",
GreatThan => ">",
LtOrEqual => "<=",
GtOrEqual => ">=",
_ => "==",
};
result_cons += &format!("{}{};", op, s);
} else {
warn!("!!Failed to parse constraint, row:{}", i);
}
if v_index == self.a.v.len() {
break;
}
}
// 变量部分
let mut result_var = "".to_string();
for i in 0..self.x_name.len() {
if i != 0 {
result_var += ",";
}
result_var += &format!("{}:{}", self.x_name[i], self.binary_int_float[i]);
}
// 求解参数部分
let result_other = self.parameters.clone();
// let mut result_other = "".to_string();
// if !self.parameters.is_empty() {
// for (k, v) in &self.parameters {
// result_other += &format!("{}:{};\n", k, v);
// }
// }
(result_fun, result_cons, result_var, result_other)
}
pub fn to_latex(&self) -> (String, String, String) {
// 目标函数部分
let mut result_fun = "".to_string();
result_fun += if self.min_or_max { "\\min " } else { "\\max " };
let mut x_index = 0;
let mut plus_flag = 0;
for i in 0..self.x_name.len() {
if self.c[i].0 == x_index {
//如果xi的系数存在
if let Ok(s) = rpn_to_latex(&self.c[i].1.rpn) {
if plus_flag == 1 {
result_fun += "+";
}
if s != *"1" {
result_fun += &format!("{}\times {}", s, &self.x_name[i]);
} else {
result_fun += &self.x_name[i];
}
result_fun += &self.x_name[i];
plus_flag = 1;
} else {
warn!("!!Failed to parse object function, index:{}", i);
}
x_index += 1;
}
}
result_fun += ")\\\\\n";
// 约束部分
let mut result_cons = "".to_string();
let mut v_index = 0;
for i in 0..self.a.m {
//m行
let mut plus_flag = 0;
for j in 0..self.a.n {
//n列,系数矩阵
if self.a.v[v_index].0 == i && self.a.v[v_index].1 == j {
//若此项存在
if let Ok(s) = rpn_to_latex(&self.a.v[v_index].2.rpn) {
if plus_flag == 1 {
result_cons += "+";
}
if s != *"1" {
result_cons += &format!("{}\times {}", s, &self.x_name[j]);
} else {
result_cons += &self.x_name[j];
}
plus_flag = 1;
} else {
warn!("!!Failed to parse mat, row:{}, line:{}", i, j);
}
v_index += 1;
if v_index == self.a.v.len() {
break;
}
}
}
if let Ok(s) = rpn_to_latex(&self.b[i].rpn) {
//常数项
let op = match self.constraint_type[i] {
Equal => "=",
Unequal => "\\ne ",
LessThan => "<",
GreatThan => ">",
LtOrEqual => "\\le ",
GtOrEqual => "\\ge ",
_ => "=",
};
result_cons += &format!("{}{}\\\\\n", op, s);
} else {
warn!("!!Failed to parse constraint, row:{}", i);
}
if v_index == self.a.v.len() {
break;
}
}
// 变量部分
let mut result_var = "".to_string();
for i in 0..self.x_name.len() {
let var_type = self.binary_int_float[i];
if var_type == 1 {
result_var += &format!("{} = 0 \\quad or \\quad 1", self.x_name[i]);
} else if var_type == 2 {
result_var += &format!("{} \\in \\mathbb{{z}}", self.x_name[i]);
}
result_var += "\\\\\n";
}
(result_fun, result_cons, result_var)
}
}
impl NewtonSolver {
// 解析字符串表示的数学模型
#[allow(clippy::should_implement_trait)]
pub fn from_str(expr_str: &[&str]) -> Result<NewtonSolver, usize> {
let mut expr_str = expr_str.to_vec();
expr_str.retain(|expr| !expr.trim().is_empty());
let s_n = expr_str.len();
if s_n < 2 {
warn!(
"!!Insufficient function num for solve f(x)=0, content: {:?}",
expr_str
);
return Err(s_n);
}
// 首先确定变量名称和类型
let x_defines: Vec<String> = expr_str
.pop()
.unwrap()
.split(',')
.map(|s| s.trim().to_string())
.collect();
let n = x_defines.len();
if n != expr_str.len() {
warn!("!!function num is not equal to x length, content: {:?}", expr_str);
return Err(s_n);
}
// column of x
let (x_name, x_init) = create_x_name_init(&x_defines).ok_or(s_n)?;
let mut f = Vec::with_capacity(n);
for (i, expr_str_i) in expr_str.iter().enumerate() {
let expr: Expr = expr_str_i.parse().map_err(|_| i + 1)?;
// 对方程进行校验
if !expr.check_validity() {
return Err(i + 1);
}
f.push(expr);
}
let parameters = HashMap::new();
Ok(NewtonSolver {
f,
x_name,
x_init,
x_init_cx: vec![],
parameters,
})
}
pub fn from_str_with_parameters(
expr_str: &[&str],
parameters_str: &[&str],
) -> Result<NewtonSolver, (usize, usize)> {
match NewtonSolver::from_str(expr_str) {
Ok(mut m) => match read_parameters_from_str(parameters_str) {
Ok(parameters) => {
m.parameters = parameters;
Ok(m)
}
Err(n) => Err((1, n)),
},
Err(n) => Err((0, n)),
}
}
// 分析相关的测点
pub(crate) fn get_related_points(&self, alias: &HashMap<String, u64>) -> Vec<u64> {
let mut result = Vec::new();
for nl_expr in &self.f {
for (_, id, _) in find_points_in_expr(nl_expr, alias) {
if !result.contains(&id) {
result.push(id);
}
}
}
result
}
/// 将模型解析为字符串, (方程组,变量声明,求解参数)
pub fn to_str(&self) -> (String, String, HashMap<String, String>) {
// 方程组部分
let mut result_fun = "".to_string();
for i in 0..self.f.len() {
if let Ok(s) = rpn_to_string(&self.f[i].rpn) {
result_fun += &format!("{};", s);
} else {
warn!("!!Failed to parse mat, row:{}", i);
}
}
// 变量部分
let mut result_var = "".to_string();
for i in 0..self.x_name.len() {
if i != 0 {
result_var += ",";
}
result_var += &self.x_name[i];
if let Ok(init) = rpn_to_string(&self.x_init[i].rpn) {
if !init.is_empty() {
result_var += &format!(":{}", init);
}
}
}
// 求解参数部分
let result_other = self.parameters.clone();
// let mut result_other = "".to_string();
// if !self.parameters.is_empty() {
// for (k, v) in &self.parameters {
// result_other += &format!("{}:{};\n", k, v);
// }
// }
(result_fun, result_var, result_other)
}
pub fn to_latex(&self) -> (String, String) {
// 方程组部分
let mut result_fun = "".to_string();
for i in 0..self.f.len() {
if let Ok(s) = rpn_to_latex(&self.f[i].rpn) {
result_fun += &format!("{} = 0\\\\\n", s);
} else {
warn!("!!Failed to parse mat, row:{}", i);
}
}
// 变量部分
let mut result_var = "".to_string();
for i in 0..self.x_name.len() {
if i != 0 {
result_var += ",";
}
result_var += &self.x_name[i];
if let Ok(init) = rpn_to_latex(&self.x_init[i].rpn) {
if !init.is_empty() {
result_var += &format!(":{}", init);
}
}
}
(result_fun, result_var)
}
}
impl NewtonWls {
/// 从字符串中获取模型
#[allow(clippy::should_implement_trait)]
pub fn from_str(expr_str: Vec<&str>) -> Result<NewtonWls, usize> {
let mut expr_str = expr_str.clone();
expr_str.retain(|expr| !expr.trim().is_empty());
let sn = expr_str.len();
if sn < 3 {
warn!("!!Insufficient function num for solve wls, content: {:?}", expr_str);
return Err(sn);
}
// 首先确定变量名称和类型
let x_defines: Vec<String> = expr_str
.pop()
.unwrap()
.split(',')
.map(|s| s.trim().to_string())
.collect();
// column of x
let (x_name, x_init) = create_x_name_init(&x_defines).ok_or(sn)?;
let mut f = Vec::with_capacity(sn - 1);
let mut weight = Vec::with_capacity(sn - 1);
for (i, expr_str_i) in expr_str.iter().enumerate() {
let f_s = expr_str_i;
let f_and_weight: Vec<&str> = f_s.split(':').collect();
if f_and_weight.len() == 1 {
weight.push(Expr::from_vec(vec![Number(1.0)]));
} else if f_and_weight.len() == 2 {
let expr: Expr = f_and_weight[1].parse().map_err(|_| i + 1)?;
// 校验
if !expr.check_validity() {
return Err(i + 1);
}
weight.push(expr);
} else if f_and_weight.len() > 2 {
return Err(i + 1);
};
let expr: Expr = f_and_weight[0].parse().map_err(|_| i + 1)?;
// 对方程进行校验
if !expr.check_validity() {
return Err(i + 1);
}
f.push(expr);
}
let parameters = HashMap::new();
Ok(NewtonWls { f, weight, x_name, x_init, parameters })
}
pub fn from_str_with_parameters(
expr_str: Vec<&str>,
parameters_str: &[&str],
) -> Result<NewtonWls, (usize, usize)> {
match NewtonWls::from_str(expr_str) {
Ok(mut m) => match read_parameters_from_str(parameters_str) {
Ok(parameters) => {
m.parameters = parameters;
Ok(m)
}
Err(n) => Err((1, n)),
},
Err(n) => Err((0, n)),
}
}
}
impl NLP {
/// 从字符串中获取模型
#[allow(clippy::should_implement_trait)]
pub fn from_str(expr_str: &[&str]) -> Result<NLP, usize> {
let mut expr_str = expr_str.to_vec();
expr_str.retain(|expr| !expr.trim().is_empty());
let n = expr_str.len();
if n < 2 {
return Err(n);
}
// 首先确定变量名称和类型
let x: Vec<&str> = expr_str.pop().unwrap().split(',').collect();
let mut x_name = Vec::with_capacity(x.len());
let mut x_init: Vec<Expr> = Vec::with_capacity(x.len());
let mut x_lower: Vec<Expr> = Vec::with_capacity(x.len());
let mut x_upper: Vec<Expr> = Vec::with_capacity(x.len());
for x_s in x {
let name_and_limit_and_init: Vec<&str> = x_s.split(':').collect();
if name_and_limit_and_init.len() != 2 {
return Err(n);
}
x_name.push(name_and_limit_and_init[0].trim().to_string());
let limits_and_init = name_and_limit_and_init[1].trim();
if limits_and_init.len() < 3 {
return Err(n);
}
let tmp: Vec<&str> = limits_and_init[1..limits_and_init.len() - 1]
.split('/')
.collect();
if (tmp.len() != 2) && (tmp.len() != 3) {
return Err(n);
}
if tmp[0].trim().is_empty() {
x_lower.push(Expr::from_vec(vec![Number(f64::MIN)]));
} else if let Ok(l) = tmp[0].parse() {
x_lower.push(l);
}
if tmp[1].trim().is_empty() {
x_upper.push(Expr::from_vec(vec![Number(f64::MAX)]));
} else if let Ok(l) = tmp[1].parse() {
x_upper.push(l);
}
if tmp.len() == 3 && !tmp[2].trim().is_empty() {
if let Ok(l) = tmp[2].parse() {
x_init.push(l);
}
} else {
x_init.push(Expr::new());
}
}
// 处理目标函数
let mut obj_expr: Expr = expr_str[0].parse().map_err(|_| n)?;
if !obj_expr.check_validity() {
return Err(n);
}
// pop min or max func
let f = obj_expr.rpn.pop().ok_or(1usize)?;
let mut min_or_max = true;
match &f {
Func(name, _) => {
if name.to_uppercase() == "MIN" {
min_or_max = true;
} else if name.to_uppercase() == "MAX" {
min_or_max = false;
} else {
obj_expr.rpn.push(f);
}
}
_ => {
obj_expr.rpn.push(f);
}
}
let m = expr_str.len() - 1;
let mut g = Vec::with_capacity(m);
let mut g_lower = Vec::with_capacity(m);
let mut g_upper = Vec::with_capacity(m);
for (i, expr_str_i) in expr_str.iter().enumerate().skip(1) {
let g_s = expr_str_i;
let g_and_limit: Vec<&str> = g_s.split(':').collect();
if g_and_limit.len() != 2 {
return Err(i + 1);
}
let constraint_expr = g_and_limit[0].trim().parse::<Expr>().map_err(|_| i + 1)?;
if !constraint_expr.check_validity() {
return Err(i + 1);
}
g.push(constraint_expr);
// find limit of constraints
let limits = g_and_limit[1].trim();
let tmp: Vec<&str> = limits[1..limits.len() - 1].split('/').collect();
if tmp.len() != 2 {
return Err(i + 1);
}
if tmp[0].trim().is_empty() {
g_lower.push(Expr::from_vec(vec![Number(f64::MIN)]));
} else if let Ok(l) = tmp[0].parse() {
g_lower.push(l);
}
if tmp[1].trim().is_empty() {
g_upper.push(Expr::from_vec(vec![Number(f64::MAX)]));
} else if let Ok(l) = tmp[1].parse() {
g_upper.push(l);
}
}
let parameters = HashMap::new();
Ok(NLP {
x_name,
x_lower,
x_upper,
g,
g_upper,
g_lower,
obj_expr,
x_init,
min_or_max,
parameters,
})
}
/// 从字符串中获取模型及求解参数
pub fn from_str_with_parameters(
expr_str: &[&str],
parameters_str: &[&str],
) -> Result<NLP, (usize, usize)> {
match NLP::from_str(expr_str) {
Ok(mut m) => match read_parameters_from_str(parameters_str) {
Ok(parameters) => {
m.parameters = parameters;
Ok(m)
}
Err(n) => Err((1, n)),
},
Err(n) => Err((0, n)),
}
}
pub(crate) fn get_related_points(&self, alias: &HashMap<String, u64>) -> Vec<u64> {
let mut result = Vec::new();
for (_, id, _) in find_points_in_expr(&self.obj_expr, alias) {
if !result.contains(&id) {
result.push(id);
}
}
for expr in &self.x_lower {
for (_, id, _) in find_points_in_expr(expr, alias) {
if !result.contains(&id) {
result.push(id);
}
}
}
for expr in &self.x_upper {
for (_, id, _) in find_points_in_expr(expr, alias) {
if !result.contains(&id) {
result.push(id);
}
}
}
for expr in &self.g {
for (_, id, _) in find_points_in_expr(expr, alias) {
if !result.contains(&id) {
result.push(id);
}
}
}
for expr in &self.g_lower {
for (_, id, _) in find_points_in_expr(expr, alias) {
if !result.contains(&id) {
result.push(id);
}
}
}
for expr in &self.g_upper {
for (_, id, _) in find_points_in_expr(expr, alias) {
if !result.contains(&id) {
result.push(id);
}
}
}
result
}
/// 将模型解析为字符串, (目标函数, 约束条件, 变量定义, 求解参数)
pub fn to_str(&self) -> (String, String, String, HashMap<String, String>) {
// 目标函数部分
let mut result_fun = "".to_string();
if let Ok(s) = rpn_to_string(&self.obj_expr) {
if self.min_or_max {
result_fun = format!("{};", s);
} else {
result_fun = format!("max({});", s);
}
} else {
warn!("!!Failed to parse object function");
}
// 约束部分
let mut result_cons = "".to_string();
for i in 0..self.g.len() {
if let Ok(s) = rpn_to_string(&self.g[i].rpn) {
if let Ok(mut l) = rpn_to_string(&self.g_lower[i].rpn) {
if let Ok(mut u) = rpn_to_string(&self.g_upper[i].rpn) {
if self.g_lower[i].rpn[0] == Number(f64::MIN) {
l = "".to_string();
}
if self.g_upper[i].rpn[0] == Number(f64::MAX) {
u = "".to_string();
}
result_cons += &format!("{}:[{}/{}];", s, l, u);
}
}
}
}
// 变量部分
let mut result_var = "".to_string();
for i in 0..self.x_name.len() {
if let Ok(mut l) = rpn_to_string(&self.x_lower[i].rpn) {
//下限
if let Ok(mut u) = rpn_to_string(&self.x_upper[i].rpn) {
//上限
if self.x_lower[i].rpn[0] == Number(f64::MIN) {
l = "".to_string();
}
if self.x_upper[i].rpn[0] == Number(f64::MAX) {
u = "".to_string();
}
let mut init = "".to_string();
if !self.x_init[i].rpn.is_empty() {
if let Ok(v) = rpn_to_string(&self.x_init[i].rpn) {
init = "/".to_string() + &v;
}
}
result_var += &format!("{}:[{}/{}{}]", self.x_name[i], l, u, init);
}
}
if i != self.x_name.len() - 1 {
result_var += ",";
}
}
// 求解参数部分
let result_other = self.parameters.clone();
// let mut result_other = "".to_string();
// if !self.parameters.is_empty() {
// for (k, v) in &self.parameters {
// result_other += &format!("{}:{};\n", k, v);
// }
// }
(result_fun, result_cons, result_var, result_other)
}
pub fn to_latex(&self) -> (String, String, String) {
// 目标函数部分
let mut result_fun = "".to_string();
result_fun += if self.min_or_max { "\\min " } else { "\\max " };
if let Ok(s) = rpn_to_latex(&self.obj_expr) {
result_fun += &format!("{}\\\\\n", s);
} else {
warn!("!!Failed to parse object function");
}
// 约束部分
let mut result_cons = "".to_string();
for i in 0..self.g.len() {
if let Ok(s) = rpn_to_latex(&self.g[i].rpn) {
if let Ok(mut l) = rpn_to_latex(&self.g_lower[i].rpn) {
l += "\\le ";
if let Ok(mut u) = rpn_to_latex(&self.g_upper[i].rpn) {
if self.g_lower[i].rpn[0] == self.g_upper[i].rpn[0] {
l = "".to_string();
u = "=".to_string() + &u;
} else {
u = "\\le ".to_string() + &u;
if self.g_lower[i].rpn[0] == Number(f64::MIN) {
l = "".to_string();
}
if self.g_upper[i].rpn[0] == Number(f64::MAX) {
u = "".to_string();
}
}
result_cons += &format!("{}{}{}\\\\\n", l, s, u);
}
}
}
}
// 变量部分
let mut result_var = "".to_string();
for i in 0..self.x_name.len() {
if let Ok(mut l) = rpn_to_latex(&self.x_lower[i].rpn) {
//下限
l += "\\le ";
if let Ok(mut u) = rpn_to_latex(&self.x_upper[i].rpn) {
//上限
if self.x_lower[i].rpn[0] == self.x_upper[i].rpn[0] {
l = "".to_string();
u = "=".to_string() + &u;
} else {
u = "\\le ".to_string() + &u;
if self.x_lower[i].rpn[0] == Number(f64::MIN) {
l = "".to_string();
}
if self.x_upper[i].rpn[0] == Number(f64::MAX) {
u = "".to_string();
}
}
let mut init = "".to_string();
if !self.x_init[i].rpn.is_empty() {
if let Ok(v) = rpn_to_latex(&self.x_init[i].rpn) {
init = "(".to_string() + &v + ")";
}
}
if !l.is_empty() || !u.is_empty() {
result_var += &format!("{}{}{}{}\\\\\n", l, self.x_name[i], init, u);
}
}
}
}
(result_fun, result_cons, result_var)
}
}
impl NewtonSolver {
pub fn to_nlp(&self) -> NLP{
NLP {
x_name: self.x_name.clone(),
x_init: self.x_init.clone(),
x_lower: vec![Expr::from_vec(vec![Number(f64::MIN)]); self.f.len()],
x_upper: vec![Expr::from_vec(vec![Number(f64::MAX)]); self.f.len()],
g: self.f.clone(),
g_lower: vec![Expr::from_vec(vec![Number(0.)]); self.f.len()],
g_upper: vec![Expr::from_vec(vec![Number(0.)]); self.f.len()],
obj_expr: Expr::from_vec(vec![Number(0.)]),
min_or_max: true,
parameters: HashMap::new(),
}
}
}
impl SparseSolver {
pub fn to_sparsemilp(&self) -> SparseMILP {
let mut x_lower = Vec::with_capacity(self.x_name.len());
let mut x_upper = Vec::with_capacity(self.x_name.len());
for i in 0..self.x_name.len() {
x_lower.push((i, Expr::from_vec(vec![Number(f64::MIN)])));
x_upper.push((i, Expr::from_vec(vec![Number(f64::MAX)])));
}
SparseMILP {
x_name: self.x_name.clone(),
x_lower,
x_upper,
binary_int_float: vec![3_u8; self.x_name.len()],
a: self.a.clone(),
b: self.b.clone(),
constraint_type: vec![Operation::Equal; self.b.len()],
c: vec![(0, Expr::from_vec(vec![Number(0.)]))],
min_or_max: true,
parameters: HashMap::new(),
}
}
}
// above should as same as in sparrowzz
\ No newline at end of file
// flowing should as same as in sparrowzz
use std::collections::{HashMap, VecDeque};
use log::warn;
use eig_expr::Expr;
use eig_expr::Token::*;
use eig_expr::{Operation, Token};
use eig_expr::{factorial, ContextProvider};
pub trait N: ContextProvider + Sync {}
impl<T> N for T where T: ContextProvider + Sync {}
pub fn parse_linear_expr_str(
expr_str: &str,
x_name_pos: &HashMap<String, usize>,
) -> Option<Vec<(usize, Expr)>> {
let r: Vec<&str> = expr_str.split('=').collect();
if r.len() == 2 {
let left = r[0].parse::<Expr>().ok()?;
let right = r[1].parse::<Expr>().ok()?;
let left = parse_linear_expr(left.rpn, x_name_pos)?;
let mut right = parse_linear_expr(right.rpn, x_name_pos)?;
if merge_expr_map(left, &mut right, Operation::Minus) {
return create_linear_expr(right);
}
} else {
let left = expr_str.parse::<Expr>().ok()?;
let left = parse_linear_expr(left.rpn, x_name_pos)?;
return create_linear_expr(left);
}
None
}
pub fn split_linear_expr(
rpn: Vec<Token>,
x_name_pos: &HashMap<String, usize>,
) -> Option<Vec<(usize, Expr)>> {
let final_map = parse_linear_expr(rpn, x_name_pos)?;
create_linear_expr(final_map)
}
pub fn create_linear_expr(map: HashMap<usize, VecDeque<Token>>) -> Option<Vec<(usize, Expr)>> {
let mut result = Vec::with_capacity(map.len());
// 收集结果
for (key, value) in map {
let expr = Expr::from_vec(Vec::from(value));
result.push((key, expr));
}
// 按照expr0, expr1, expr2, ... , exprN 进行排序
result.sort_by(|a, b| a.0.cmp(&b.0));
Some(result)
}
/// 分解表达式,将 1+2*x1+(3*4)*x2这样的表达式分解为[1,2,12]
pub fn parse_linear_expr(
rpn: Vec<Token>,
x_name_pos: &HashMap<String, usize>,
) -> Option<HashMap<usize, VecDeque<Token>>> {
// 每个map里面存储了expr0 + expr1 * x1 + expr2 * x2 + ... + exprn * xn的系数
let mut stack: Vec<HashMap<usize, VecDeque<Token>>> = Vec::with_capacity(16);
// check model
for token in rpn {
match token {
Binary(op) => {
if stack.len() < 2 {
return None;
}
let mut right = stack.pop().unwrap();
let left = stack.pop().unwrap();
// 合并两个map,结果存储在right中
if !merge_expr_map(left, &mut right, op) {
return None;
}
stack.push(right);
}
Unary(op) => {
if stack.is_empty() {
return None;
}
let mut x = stack.pop().unwrap();
match op {
Operation::Plus => {} // 无须做任何改动
Operation::Minus => {
for expr in x.values_mut() {
if expr.len() == 1 {
// 如果是常数参数,直接计算
if let Number(f) = expr[0] {
expr[0] = Number(-f);
continue;
}
}
expr.push_back(Unary(Operation::Minus));
}
}
Operation::Not => {
// x 必须只含有exp0
if x.len() != 1 {
return None;
}
if let Some(expr0) = x.get_mut(&0) {
if expr0.len() == 1 {
// 如果是常数参数,直接计算
if let Number(f) = expr0[0] {
expr0[0] = Number(if f > 0.0 { 0.0 } else { 1.0 });
stack.push(x);
continue;
}
}
expr0.push_back(Unary(Operation::Not));
} else {
return None;
}
}
Operation::BitNot => {
// x 必须只含有exp0
if x.len() != 1 {
return None;
}
if let Some(expr0) = x.get_mut(&0) {
if expr0.len() == 1 {
// 如果是常数参数,直接计算
if let Number(f) = expr0[0] {
expr0[0] = Number(!(f as i64) as f64);
stack.push(x);
continue;
}
}
expr0.push_back(Unary(Operation::BitNot));
} else {
return None;
}
}
Operation::Fact => {
// x 必须只含有exp0
if x.len() != 1 {
return None;
}
if let Some(expr0) = x.get_mut(&0) {
if expr0.len() == 1 {
// 如果是常数参数,直接计算
if let Number(f) = expr0[0] {
expr0[0] = Number(factorial(f).ok()?);
stack.push(x);
continue;
}
expr0.push_back(Unary(Operation::Fact));
}
} else {
return None;
}
}
_ => return None,
};
stack.push(x);
}
Func(_, Some(i)) => {
if stack.len() < i {
return None;
}
let mut para = VecDeque::with_capacity(i + 1);
let mut k = 0;
for j in stack.len() - i..stack.len() {
// x不允许在函数里面,只能包含expr0
if stack[j].len() != 1 {
return None;
}
if let Some(mut expr0) = stack[j].remove(&0) {
if expr0.len() == 1 {
// 如果是常数参数
if let Number(_) = expr0[0] {
k += 1;
}
}
while !expr0.is_empty() {
para.push_back(expr0.pop_front().unwrap());
}
} else {
return None;
}
}
para.push_back(token.clone());
// 如果可以直接计算
let new_expr0: VecDeque<Token> = if k == i {
let rpn = Vec::from(para);
let result = Expr::from_vec(rpn).eval().ok()?;
[Number(result)].into()
} else {
para
};
// 如果是常数参数,直接计算
let nl = stack.len() - i;
stack.truncate(nl);
stack.push([(0, new_expr0)].into());
}
Number(_) => {
// 存储expr0
let mut c: HashMap<usize, VecDeque<Token>> = HashMap::new();
c.insert(0, [token].into());
stack.push(c);
}
Var(ref var) => {
let mut c: HashMap<usize, VecDeque<Token>> = HashMap::new();
// 获得xi的下标
if let Some(i) = x_name_pos.get(var) {
c.insert(*i + 1, [Number(1.0)].into());
} else {
c.insert(0, [token].into());
}
stack.push(c);
}
_ => return None,
}
}
if stack.len() != 1 {
return None;
}
Some(stack.pop().unwrap())
}
/// 分解表达式,根据最后一个操作符是函数或二目运算符分解成多个或2个表达式
pub fn get_expr_from_fun(rpn: Vec<Token>) -> Option<Vec<Expr>> {
let mut stack = Vec::with_capacity(16);
enum TokenGroup {
T(Token),
G(Vec<Token>),
}
let total_token_num = rpn.len();
let mut index = 0;
for token in rpn {
index += 1;
match &token {
Var(_) => {
stack.push(TokenGroup::T(token));
}
Number(_) => stack.push(TokenGroup::T(token)),
Binary(_) => {
// 已经是最后一个操作符
if index == total_token_num {
return if stack.len() == 2 {
let mut result = Vec::with_capacity(stack.len());
for g in stack {
match g {
TokenGroup::T(t) => result.push(Expr::from_vec(vec![t])),
TokenGroup::G(v) => result.push(Expr::from_vec(v)),
}
}
Some(result)
} else {
warn!("!!Illegal expression");
None
};
}
let right = stack.pop().unwrap();
let left = stack.pop().unwrap();
let mut tokens = Vec::new();
match left {
TokenGroup::T(t) => tokens.push(t),
TokenGroup::G(v) => {
tokens.extend(v);
}
}
match right {
TokenGroup::T(t) => tokens.push(t),
TokenGroup::G(v) => {
tokens.extend(v);
}
}
tokens.push(token);
stack.push(TokenGroup::G(tokens));
}
Unary(_) => {
let x = stack.pop().unwrap();
let mut tokens = Vec::new();
match x {
TokenGroup::T(t) => tokens.push(t),
TokenGroup::G(v) => {
tokens.extend(v);
}
}
tokens.push(token);
stack.push(TokenGroup::G(tokens));
}
Func(_, Some(i)) => {
let len = stack.len();
if len < *i {
warn!(
"!!eval: stack does not have enough arguments for function token {:?}",
token
);
return None;
} else if len == *i && index == total_token_num {
// the last func
let mut result = Vec::with_capacity(len);
for g in stack {
match g {
TokenGroup::T(t) => result.push(Expr::from_vec(vec![t])),
TokenGroup::G(v) => result.push(Expr::from_vec(v)),
}
}
return Some(result);
} else {
let mut tokens = Vec::with_capacity(*i);
for _ in (len - *i)..len {
let g = stack.pop().unwrap();
match g {
TokenGroup::T(t) => tokens.push(t),
TokenGroup::G(mut v) => loop {
let t = v.pop();
if t.is_none() {
break;
}
tokens.push(t.unwrap());
},
}
}
tokens.reverse();
tokens.push(token);
stack.push(TokenGroup::G(tokens));
}
}
_ => {
warn!("!!Unrecognized token: {:?}", token);
return None;
}
}
}
warn!("!!Illegal expression");
None
}
/// 合并两个map,结果存储在right中
pub fn merge_expr_map(
mut left: HashMap<usize, VecDeque<Token>>,
right: &mut HashMap<usize, VecDeque<Token>>,
op: Operation,
) -> bool {
let is_left_const = left.len() == 1 && left.contains_key(&0);
let is_right_const = right.len() == 1 && right.contains_key(&0);
if is_left_const && is_right_const {
let left_expr0 = left.remove(&0).unwrap();
let right_expr0 = right.get_mut(&0).unwrap();
return merge_two_linear_expr(left_expr0, right_expr0, op);
} else if is_left_const && !is_right_const {
if op != Operation::Times && op != Operation::Plus && op != Operation::Minus {
return false;
}
if op == Operation::Times {
let left_expr0 = left.remove(&0).unwrap();
for right_expr in right.values_mut() {
if !merge_two_linear_expr(left_expr0.clone(), right_expr, op) {
return false;
}
}
return true;
}
} else if !is_left_const && is_right_const {
let right_expr0 = right.remove(&0).unwrap();
if op != Operation::Times
&& op != Operation::Div
&& op != Operation::Plus
&& op != Operation::Minus
{
return false;
}
if op == Operation::Times || op == Operation::Div {
for (index, left_expr) in left {
let mut new_expr = right_expr0.clone();
if !merge_two_linear_expr(left_expr, &mut new_expr, op) {
return false;
}
right.insert(index, new_expr);
}
return true;
}
right.insert(0, right_expr0);
} else if op != Operation::Plus && op != Operation::Minus {
return false;
}
// // 合并两个map,结果存储在right中
if op == Operation::Minus {
for right_expr in right.values_mut() {
let left_expr = [Number(-1.0)].into();
if !merge_two_linear_expr(left_expr, right_expr, Operation::Times) {
return false;
}
}
}
for (index, left_expr) in left {
if let Some(right_expr) = right.get_mut(&index) {
if !merge_two_linear_expr(left_expr, right_expr, Operation::Plus) {
return false;
}
} else {
right.insert(index, left_expr);
}
}
true
}
/// 合并两个系数方程
pub fn merge_two_linear_expr(
mut left: VecDeque<Token>,
right: &mut VecDeque<Token>,
op: Operation,
) -> bool {
match op {
Operation::Plus => {
if let Some(Number(l)) = fetch_number_token(&left) {
if let Some(Number(r)) = fetch_number_token(right) {
let t = Number(l + r);
right.clear();
right.push_front(t);
return true;
}
}
}
Operation::Minus => {
if let Some(Number(l)) = fetch_number_token(&left) {
if let Some(Number(r)) = fetch_number_token(right) {
let t = Number(l - r);
right.clear();
right.push_front(t);
return true;
}
}
}
Operation::Times => {
if let Some(Number(l)) = fetch_number_token(&left) {
if let Some(Number(r)) = fetch_number_token(right) {
let t = Number(l * r);
right.clear();
right.push_front(t);
return true;
}
}
}
Operation::Div => {
if let Some(Number(l)) = fetch_number_token(&left) {
if let Some(Number(r)) = fetch_number_token(right) {
let t = Number(l / r);
right.clear();
right.push_front(t);
return true;
}
}
}
Operation::Rem => {
if let Some(Number(l)) = fetch_number_token(&left) {
if let Some(Number(r)) = fetch_number_token(right) {
let t = Number(l % r);
right.clear();
right.push_front(t);
return true;
}
}
}
Operation::Pow => {
if let Some(Number(l)) = fetch_number_token(&left) {
if let Some(Number(r)) = fetch_number_token(right) {
let t = Number(l.powf(r));
right.clear();
right.push_front(t);
return true;
}
}
}
Operation::Equal => {
if let Some(Number(l)) = fetch_number_token(&left) {
if let Some(Number(r)) = fetch_number_token(right) {
let t = Number(if l == r { 1.0 } else { 0.0 });
right.clear();
right.push_front(t);
return true;
}
}
}
Operation::Unequal => {
if let Some(Number(l)) = fetch_number_token(&left) {
if let Some(Number(r)) = fetch_number_token(right) {
let t = Number(if l != r { 1.0 } else { 0.0 });
right.clear();
right.push_front(t);
return true;
}
}
}
Operation::LessThan => {
if let Some(Number(l)) = fetch_number_token(&left) {
if let Some(Number(r)) = fetch_number_token(right) {
let t = Number(if l < r { 1.0 } else { 0.0 });
right.clear();
right.push_front(t);
return true;
}
}
}
Operation::GreatThan => {
if let Some(Number(l)) = fetch_number_token(&left) {
if let Some(Number(r)) = fetch_number_token(right) {
let t = Number(if l > r { 1.0 } else { 0.0 });
right.clear();
right.push_front(t);
return true;
}
}
}
Operation::LtOrEqual => {
if let Some(Number(l)) = fetch_number_token(&left) {
if let Some(Number(r)) = fetch_number_token(right) {
let t = Number(if l <= r { 1.0 } else { 0.0 });
right.clear();
right.push_front(t);
return true;
}
}
}
Operation::GtOrEqual => {
if let Some(Number(l)) = fetch_number_token(&left) {
if let Some(Number(r)) = fetch_number_token(right) {
let t = Number(if l >= r { 1.0 } else { 0.0 });
right.clear();
right.push_front(t);
return true;
}
}
}
Operation::And => {
if let Some(Number(l)) = fetch_number_token(&left) {
if let Some(Number(r)) = fetch_number_token(right) {
let t = Number(if (l > 0.0) && (r > 0.0) { 1.0 } else { 0.0 });
right.clear();
right.push_front(t);
return true;
}
}
}
Operation::Or => {
if let Some(Number(l)) = fetch_number_token(&left) {
if let Some(Number(r)) = fetch_number_token(right) {
let t = Number(if (l > 0.0) || (r > 0.0) { 1.0 } else { 0.0 });
right.clear();
right.push_front(t);
return true;
}
}
}
Operation::BitAnd => {
if let Some(Number(l)) = fetch_number_token(&left) {
if let Some(Number(r)) = fetch_number_token(right) {
let t = Number((l as i64 & r as i64) as f64);
right.clear();
right.push_front(t);
return true;
}
}
}
Operation::BitOr => {
if let Some(Number(l)) = fetch_number_token(&left) {
if let Some(Number(r)) = fetch_number_token(right) {
let t = Number((l as i64 | r as i64) as f64);
right.clear();
right.push_front(t);
return true;
}
}
}
Operation::BitXor => {
if let Some(Number(l)) = fetch_number_token(&left) {
if let Some(Number(r)) = fetch_number_token(right) {
let t = Number((l as i64 ^ r as i64) as f64);
right.clear();
right.push_front(t);
return true;
}
}
}
Operation::BitShl => {
if let Some(Number(l)) = fetch_number_token(&left) {
if let Some(Number(r)) = fetch_number_token(right) {
let t = Number(((l as i64) << (r as i64)) as f64);
right.clear();
right.push_front(t);
return true;
}
}
}
Operation::BitShr => {
if let Some(Number(l)) = fetch_number_token(&left) {
if let Some(Number(r)) = fetch_number_token(right) {
let t = Number(((l as i64) >> (r as i64)) as f64);
right.clear();
right.push_front(t);
return true;
}
}
}
Operation::BitAt => {
if let Some(Number(l)) = fetch_number_token(&left) {
if let Some(Number(r)) = fetch_number_token(right) {
if !(1.0..=64.0).contains(&r) {
return false;
}
let f = if (l as i64) & 2_i64.pow(r as u32 - 1) != 0 {
1.0
} else {
0.0
};
let t = Number(f);
right.clear();
right.push_front(t);
return true;
}
}
}
_ => return false,
}
while !left.is_empty() {
let t = left.pop_back().unwrap();
right.push_front(t);
}
right.push_back(Binary(op));
true
}
/// 从表达式中提取数字
fn fetch_number_token(expr: &VecDeque<Token>) -> Option<Token> {
if expr.len() == 1 {
// 如果是常数参数
if let Number(_) = expr[0] {
return Some(expr[0].clone());
}
}
None
}
pub enum XError {
Wrongx,
}
#[allow(clippy::type_complexity)]
pub fn get_x_info(
x_expr_str: &str,
) -> Result<(Vec<String>, Vec<u8>, HashMap<String, usize>, Vec<(usize, Expr)>, Vec<(usize, Expr)>), XError> {
// 首先确定变量名称和类型
let x: Vec<&str> = x_expr_str.split(',').collect();
let mut x_name = Vec::with_capacity(x.len());
let mut binary_int_float: Vec<u8> = Vec::with_capacity(x.len());
let mut x_name_pos = HashMap::with_capacity(x.len());
let mut x_upper = Vec::new();
let mut x_lower = Vec::new();
for (pos, x_s) in x.into_iter().enumerate() {
let name_type: Vec<&str> = x_s.split(':').collect();
if name_type.len() != 2 {
return Err(XError::Wrongx);
}
// 处理变量名称
let name = name_type[0].trim().to_string();
x_name_pos.insert(name.clone(), pos);
x_name.push(name);
// 处理变量的类型和上下限
let x_settings = name_type[1].trim();
if x_settings.starts_with('[') {
if x_settings.len() < 3 {
return Err(XError::Wrongx);
}
let tmp: Vec<&str> = x_settings[1..x_settings.len() - 1].split('/').collect();
// 设置类型
let var_type: u8 = tmp[0].parse().map_err(|_| XError::Wrongx)?;
if !(1..=3).contains(&var_type) {
// 1: binary, 2: integer 3: float
return Err(XError::Wrongx);
}
binary_int_float.push(var_type);
if (var_type == 2 || var_type == 3) && tmp.len() == 3 {
if !tmp[1].trim().is_empty() {
if let Ok(l) = tmp[1].parse() {
x_lower.push((pos, l));
} else {
return Err(XError::Wrongx);
}
}
if !tmp[2].trim().is_empty() {
if let Ok(u) = tmp[2].parse() {
x_upper.push((pos, u));
} else {
return Err(XError::Wrongx);
}
}
}
} else {
// 如果只有类型,没有设置上下限,默认是>=0
let var_type: u8 = x_settings.parse().map_err(|_| XError::Wrongx)?;
if !(1..=3).contains(&var_type) {
// 1: binary, 2: integer 3: float
return Err(XError::Wrongx);
}
binary_int_float.push(var_type);
}
}
Ok((x_name, binary_int_float, x_name_pos, x_upper, x_lower))
}
pub fn create_x_name_init_cx(x_define: &Vec<String>) -> Option<(Vec<String>, Vec<Expr>)> {
let mut x_name = Vec::with_capacity(x_define.len());
let mut x_init = Vec::with_capacity(x_define.len());
for x_define_i in x_define {
let name_and_init: Vec<&str> = x_define_i.split(':').collect();
if name_and_init.len() == 1 {
// 处理变量名称
x_name.push(name_and_init[0].trim().to_string());
// 默认的初值是0
x_init.push(Expr::new());
} else if name_and_init.len() == 2 {
// 处理变量名称
x_name.push(name_and_init[0].trim().to_string());
// 处理初值
if let Ok(init) = name_and_init[1].trim().parse() {
x_init.push(init);
} else {
x_init.push(Expr::new());
}
} else {
return None;
}
}
Some((x_name, x_init))
}
pub fn create_x_name_init(x_define: &Vec<String>) -> Option<(Vec<String>, Vec<Expr>)> {
let mut x_name = Vec::with_capacity(x_define.len());
let mut x_init = Vec::with_capacity(x_define.len());
for x_define_i in x_define {
let name_and_init: Vec<&str> = x_define_i.split(':').collect();
if name_and_init.len() == 1 {
// 处理变量名称
x_name.push(name_and_init[0].trim().to_string());
x_init.push(Expr::new());
} else if name_and_init.len() == 2 {
// 处理变量名称
x_name.push(name_and_init[0].trim().to_string());
// 处理初值
if let Ok(init) = name_and_init[1].trim().parse() {
x_init.push(init);
} else {
x_init.push(Expr::new());
}
} else {
return None;
}
}
Some((x_name, x_init))
}
pub fn read_parameters_from_str(parameters_str: &[&str]) -> Result<HashMap<String, String>, usize> {
let mut parameters = HashMap::new();
for (i, parameter_str) in parameters_str.iter().enumerate() {
if parameter_str.trim().is_empty() {
continue;
}
let kvs: Vec<&str> = parameter_str.split(':').collect();
if kvs.len() == 2 {
parameters.insert(kvs[0].trim().to_string(), kvs[1].trim().to_string());
} else {
return Err(i + 1);
}
}
Ok(parameters)
}
// above should as same as in sparrowzz
\ No newline at end of file
[package]
name = "eig-domain"
version = "0.1.0"
authors = ["dongshufeng <dongshufeng@zju.edu.cn>"]
edition.workspace = true
rust-version.workspace = true
build = "build.rs"
[dependencies]
serde = { version = "1.0", features = ["derive"] }
protobuf = { version = "3.7", features = ["with-bytes"] }
# this project
eig-expr = { path = "../eig-expr" }
csv = "1.3"
calamine = { version = "0.30", features = []}
encoding_rs = "0.8"
[build-dependencies]
protobuf-codegen = "3.7"
protobuf = { version = "3.7", features = ["with-bytes"] }
\ No newline at end of file
use protobuf::descriptor::field_descriptor_proto::Type;
use protobuf::reflect::FieldDescriptor;
use protobuf::reflect::MessageDescriptor;
use protobuf_codegen::Codegen;
use protobuf_codegen::Customize;
use protobuf_codegen::CustomizeCallback;
fn main() {
struct GenSerde;
impl CustomizeCallback for GenSerde {
fn message(&self, _message: &MessageDescriptor) -> Customize {
Customize::default().before("#[derive(::serde::Serialize, ::serde::Deserialize)]")
}
fn field(&self, field: &FieldDescriptor) -> Customize {
if field.proto().type_() == Type::TYPE_ENUM {
// `EnumOrUnknown` is not a part of rust-protobuf, so external serializer is needed.
Customize::default().before(
"#[serde(serialize_with = \"crate::serialize_enum_or_unknown\", deserialize_with = \"crate::deserialize_enum_or_unknown\")]")
} else {
Customize::default()
}
}
fn special_field(&self, _message: &MessageDescriptor, _field: &str) -> Customize {
Customize::default().before("#[serde(skip)]")
}
}
Codegen::new()
.pure()
.cargo_out_dir("protos")
.include("protos")
.inputs([
"protos/eig.proto",
"protos/aoe.proto",
"protos/pbhymqtt.proto",
])
.customize_callback(GenSerde)
.run()
.expect("protobuf codegen failed.");
}
message PbEventResult {
enum EventEvalResult {
Happen = 1;
NotHappen = 2;
Canceled = 3;
Error = 4;
}
required uint64 id = 1;
required uint64 start_time = 2;
required uint64 end_time = 3;
required EventEvalResult final_result = 4;
}
message PbActionResult {
enum ActionExeResult {
NotRun = 1;
Success = 2;
Failed = 3;
}
required uint64 source_id = 1;
required uint64 target_id = 2;
required uint64 start_time = 3;
required uint64 end_time = 4;
required ActionExeResult final_result = 5;
optional uint32 fail_code = 6;
repeated uint64 yk_points = 7;
repeated int64 yk_values = 8;
repeated uint64 yt_points = 9;
repeated double yt_values = 10;
repeated string variables = 11;
repeated double var_values = 12;
}
message PbAoeResult {
// aoe id
required uint64 aoe_id = 1;
required uint64 start_time = 2;
required uint64 end_time = 3;
repeated PbEventResult event_results = 4;
repeated PbActionResult action_results = 5;
}
message PbAoeResults {
repeated PbAoeResult results = 1;
}
message PbAoeOperation {
enum Op {
START = 1;
STOP = 2;
}
required uint64 aoe_id = 1; // AOE ID
required Op operation = 2; // AOE ID
}
\ No newline at end of file
message PbAnalogValue {
// 测点Id
required uint64 pointId = 1;
// 测量值
required double measValue = 2;
// 时标
optional uint64 timestamp = 3;
// 原始值
optional double origValue = 4;
// change init
optional bool change_init = 5;
// source
optional uint32 source = 6;
}
message PbDiscreteValue {
// 测点Id
required uint64 pointId = 1;
// 新的测量值
required int64 measValue = 2;
// 时标
optional uint64 timestamp = 3;
// 原始值
optional int64 origValue = 4;
// change init
optional bool change_init = 5;
// source
optional uint32 source = 6;
}
message PbPointValues {
repeated PbDiscreteValue dValues = 1;
repeated PbAnalogValue aValues = 2;
}
message PbSetIntPoint {
// 发起者id
required uint64 senderId = 1;
// 测点Id
required uint64 pointId = 2;
required int64 value = 3;
optional uint64 timestamp = 4;
}
message PbSetFloatPoint {
// 发起者id
required uint64 senderId = 1;
// 测点Id
required uint64 pointId = 2;
required double value = 3;
optional uint64 timestamp = 4;
}
// 设点命令
message PbSetPoints {
repeated PbSetIntPoint dValues = 1;
repeated PbSetFloatPoint aValues = 2;
}
// 通过mqtt传输的文件
message PbFile {
enum FileOperation {
UPDATE = 1;
DELETE = 2;
RENAME = 3;
}
optional string fileName = 1;
required bytes fileContent = 2;
optional FileOperation op = 3;
optional bool is_zip = 4[default = false];
}
message PbFiles {
repeated PbFile files = 1;
}
// ping响应消息
message PbEigPingRes {
required string id = 1;
required string name = 2;
required string ip = 3;
optional string desc = 4;
optional bool is_ems = 5;
optional bool is_standby = 6;
}
message PbProperty {
required string key = 1;
required string value = 2;
}
message PbFileInfo {
required string file_name = 1;
required uint64 file_size = 2;
}
// eig配置文件、通道配置文件、测点配置文件、svg文件概况
message PbEigProfile {
repeated PbProperty properties = 1;
repeated PbFileInfo transport_files = 2;
repeated PbFileInfo point_files = 3;
repeated PbFileInfo svg_files = 4;
repeated PbFileInfo aoe_files = 5;
}
message PbAlarmDefine {
enum AlarmLevel {
Common = 1;
Important = 2;
Emergency = 3;
}
required uint32 id = 1;
//告警触发规则
required string rule = 2;
//级别
required AlarmLevel level = 3;
optional string name = 4;
//此告警内容的详情
optional string desc = 5;
// 此告警规则所对应的设备或用户,只有配置了用户才能收到短信
optional string owners = 6;
}
message PbEigAlarm {
enum AlarmStatus {
occur = 1;
disappear = 2;
}
enum AlarmType {
invalidPoints = 1;
invalidTransport = 2;
invalidAOE = 3;
alarmLevel1 = 4;
alarmLevel2 = 5;
badData = 6;
userDefine = 7;
}
required uint64 timestamp = 1;
optional uint64 id = 2;
optional AlarmType alarm_type = 3;
optional AlarmStatus status = 4;
optional uint32 define_id = 5;
optional string content = 6;
}
message PbAlarmDefines {
repeated PbAlarmDefine defines = 1;
}
message PbEigAlarms {
repeated PbEigAlarm alarms = 1;
}
message PbSetPointResult {
enum SetPointStatus {
YkCreated = 1;
YtCreated = 2;
YkSuccess = 3;
YtSuccess = 4;
YkFailTimeout = 5;
YtFailTimeout = 6;
YkFailTooBusy = 7;
YtFailTooBusy = 8;
YkFailProtocol = 9;
YtFailProtocol = 10;
}
required uint64 sender_id = 1;
required uint64 point_id = 2;
required uint64 create_time = 3;
required uint64 finish_time = 4;
required uint64 command = 5;
required SetPointStatus status = 6;
}
message PbSetPointResults {
repeated PbSetPointResult results = 1;
}
message PbMessage {
required string topic = 1;
required bytes content = 2;
}
message PbRequest {
enum RequestType {
Get = 1;
Post = 2;
Put = 3;
Delete = 4;
Test = 5;
}
optional uint64 id = 1;
required string url = 2;
required RequestType function = 3;
// base64 string
optional string content = 4;
repeated string header_keys = 5;
repeated string header_values = 6;
}
message PbResponse {
required uint64 request_id = 1;
required bool is_ok = 2;
// base64 encoded string
optional string content = 3;
// is 7z
optional bool is_zip = 4;
}
\ No newline at end of file
// 确认回复报文(用于注册模型、注册设备、数据上报)
message PbHYAckResponse {
// token
required string token = 1;
// 时标
optional string timestamp = 2;
// 成功/失败
required string status = 3;
}
// guid结构
message PbHYGuid {
optional string model = 1;
optional string port = 2;
optional string addr = 3;
optional string desc = 4;
// GUID
required string guid = 5;
required string dev = 6;
}
// 获取guid回复报文
message PbHYGuidResponse {
required string token = 1;
optional string timestamp = 2;
// 成功/失败
repeated PbHYGuid body = 3;
}
// 数据读写过程:
// 信息体结构
message PbHYPointValue {
required string name = 1;
required string val = 2;
optional string quality = 3;
optional string secret = 4;
optional string timestamp = 5;
}
// 数据读取结构
message PbHYReadPoints {
required string dev = 1;
repeated PbHYPointValue body = 2;
}
// 数据查询回复报文
message PbHYReadResponse {
required string token = 1;
optional string timestamp = 2;
repeated PbHYReadPoints body = 3;
}
// 数据写入请求报文
message PbHYWriteRequest {
required string token = 1;
optional string timestamp = 2;
required string data_row = 3;
repeated PbHYPointValue body = 4;
}
use std::collections::HashMap;
use std::io::{Cursor, Write};
use std::path::Path;
use calamine::{open_workbook_auto_from_rs, Data, Reader, Sheets, Xlsx, open_workbook_from_rs};
pub fn excel_to_csv_bytes<P: AsRef<Path>>(path: P) -> Option<Vec<Vec<u8>>> {
let bytes = std::fs::read(path).ok()?;
excel_bytes_to_csv_bytes(bytes.as_slice())
}
// return: row count, col count, merged dimensions(key is start, value is end), values(key is i*n_j)
pub fn get_first_sheet_merged_cells(bytes: Vec<u8>)
-> Option<(u32, u32, HashMap<(u32,u32), (u32, u32)>, HashMap<usize, String>)> {
let c = Cursor::new(bytes);
let mut excel: Xlsx<_> = open_workbook_from_rs(c).ok()?;
excel.load_merged_regions().ok()?;
let sheet_names = excel.sheet_names();
let mut max_col = 0;
if sheet_names.len() > 0 {
let v = excel.merged_regions_by_sheet(&sheet_names[0]);
let mut merged_cells = HashMap::with_capacity(v.len());
for (_, _, c) in v {
merged_cells.insert(c.start, c.end);
if c.end.1 > max_col {
max_col = c.end.1;
}
}
let range = excel.worksheet_range(&sheet_names[0]).ok()?;
let max_col = max_col as usize;
let (m, w) = range.get_size();
let n = if w > max_col + 1 { w } else { max_col + 1 };
let mut values = HashMap::with_capacity(m * n);
for (i, r) in range.rows().enumerate() {
for (j, c) in r.iter().enumerate() {
let key = i * n + j;
let value = match *c {
Data::Empty => String::new(),
Data::String(ref s) => format!("{s}"),
Data::Float(ref f) => format!("{f}"),
Data::DateTime(ref data) => format!("{data}"),
Data::DurationIso(ref s) | Data::DateTimeIso(ref s) => format!("{s}"),
Data::Int(ref i) => format!("{i}"),
Data::Error(ref e) => format!("{:?}", e),
Data::Bool(ref b) => format!("{b}"),
};
values.insert(key, value);
}
}
return Some((m as u32, n as u32, merged_cells, values));
}
None
}
pub fn excel_bytes_to_csv_bytes(bytes: &[u8]) -> Option<Vec<Vec<u8>>> {
let c = Cursor::new(bytes.to_vec());
if let Ok(mut xl) = open_workbook_auto_from_rs(c) {
let mut sheet_names = xl.sheet_names();
sheet_names.retain(|name| !name.starts_with('_'));
sheets_to_csv(&mut xl, sheet_names)
} else {
let is_csv = csv::ReaderBuilder::new()
.has_headers(true)
.from_reader(bytes)
.records()
.next().is_some_and(|x| x.is_ok());
if is_csv {
Some(vec![bytes.to_vec()])
} else {
None
}
}
}
pub fn excel_bytes_to_csv_bytes_by_sheet_names(
bytes: &[u8],
names: Vec<String>,
) -> Option<Vec<Vec<u8>>> {
let c = Cursor::new(bytes.to_vec());
let mut xl = open_workbook_auto_from_rs(c).ok()?;
sheets_to_csv(&mut xl, names)
}
fn sheets_to_csv<T>(xl: &mut Sheets<T>, names: Vec<String>) -> Option<Vec<Vec<u8>>>
where
T: std::io::Read + std::io::Seek,
{
let mut result = Vec::with_capacity(names.len());
for name in names {
let range = xl.worksheet_range(name.as_str()).ok()?;
let n = range.get_size().1 - 1;
let mut dest = Vec::new();
for r in range.rows() {
for (i, c) in r.iter().enumerate() {
match *c {
Data::Empty => Ok(()),
Data::String(ref s) => {
if s.contains(',')
|| s.contains('\r')
|| s.contains('\n')
|| s.contains('"')
{
let new_s = s.replace('\"', "\"\"");
write!(dest, "\"{new_s}\"")
} else {
write!(dest, "{s}")
}
}
Data::Float(ref f) => write!(dest, "{f}"),
Data::DateTime(ref data) => write!(dest, "{data}"),
Data::DurationIso(ref s) | Data::DateTimeIso(ref s) => write!(dest, "{s}"),
Data::Int(ref i) => write!(dest, "{i}"),
Data::Error(ref e) => write!(dest, "{:?}", e),
Data::Bool(ref b) => write!(dest, "{b}"),
}
.ok()?;
if i != n {
write!(dest, ",").ok()?;
}
}
write!(dest, "\r\n").ok()?;
}
if !dest.is_empty() {
result.push(dest);
}
}
Some(result)
}
#[derive(Debug, PartialEq)]
enum FileEncode {
UTF8,
UTF16LE,
UTF16BE,
GBK,
}
pub fn transfer_to_utf8(data: Vec<u8>) -> Result<Vec<u8>,()> {
let encode = get_encoding(data.as_slice());
// encoding_rs::max_utf8_buffer_length
let mut decoder = match encode {
FileEncode::UTF8 => encoding_rs::UTF_8.new_decoder(),
FileEncode::UTF16LE => {
encoding_rs::UTF_16LE.new_decoder()
}
FileEncode::UTF16BE => {
encoding_rs::UTF_16BE.new_decoder()
}
FileEncode::GBK => {
encoding_rs::GBK.new_decoder()
}
};
let mut result = Vec::with_capacity(
decoder.max_utf8_buffer_length(data.len()).unwrap()
);
result.resize(result.capacity(), 0u8);
let (_, _, written, has_errors) = decoder.decode_to_utf8(data.as_slice(), &mut result, true);
if has_errors {
Err(())
} else {
result.truncate(written);
Ok(result)
}
}
fn get_encoding(data: &[u8]) -> FileEncode {
// let data: Vec<u8> = vec![0xFF, 0xFE, 0x41, 0x00, 0x42, 0x00];
// let data = data.to_owned();
// let data_clone = data.to_owned();
let len = data.len();
if len > 2 && data[0] == 0xFF && data[1] == 0xFE {
return FileEncode::UTF16LE;
} else if len > 2 && data[0] == 0xFE && data[1] == 0xFF {
return FileEncode::UTF16BE;
} else if len > 3 && data[0] == 0xEF && data[1] == 0xBB && data[2] == 0xBF {
// UTF8-BOM
return FileEncode::UTF8;
} else {
// 根据编码规则判断编码格式是GBK/UTF-8
//无文件头根据编码规律来判断编码格式
//UTF-8的编码规则很简单,只有二条:
//1)对于单字节的符号,字节的第一位设为0,后面7位为这个符号的unicode码。因此对于英语字母,UTF - 8编码和ASCII码是相同的。
//2)对于n字节的符号(n>1),第一个字节的前n位都设为1,第n + 1位设为0,后面字节的前两位一律设为10。剩下的没有提及的二进制位,全部为这个符号的unicode码。
// let mut byte_number = 0;
let mut utf8_number = 0;
let mut index = 0;
while index < len {
//取第一个字节判断第一位是否为1,以及获取第一位为1时后面位连续为1的数量
let mut byte_number = 0;
for i in 0..8 {
if data[index] & (0b10000000 >> i) != 0 {
byte_number += 1;
} else {
break;
}
}
//若byte等于0,则非中文,中文数量清零
if byte_number == 0 {
utf8_number = 0;
index += 1;
} else if byte_number == 1 || byte_number > 4 {
return FileEncode::GBK;
} else {
//如果该字节开头几位连续为1,且数量byte超过1,则判断d该自己后面byte-1个字节是否符合UTF-8编码规则, 即10开头;
for i in 1..byte_number {
if data[index + i] & 0b11000000 != 0b10000000 {
return FileEncode::GBK;
}
}
//即使满足UTF-8,仍可能为GBK
//如果连续的UTF-8编码的中文数量超过3个,则判断为utf-8
utf8_number += 1;
index += byte_number;
if utf8_number >= 3 {
return FileEncode::UTF8;
}
}
}
}
FileEncode::UTF8
}
\ No newline at end of file
use std::fmt;
use std::marker::PhantomData;
use csv::StringRecord;
use protobuf::{EnumFull, EnumOrUnknown};
use serde::{Deserialize, Serialize};
use eig_expr::Expr;
use crate::prop::DataUnit;
pub mod prop;
pub mod proto;
pub mod web;
pub mod excel;
/**
* @api {Measurement} /Measurement Measurement
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u64} point_id 测点id
* @apiSuccess {String} point_name 测点名
* @apiSuccess {String} alias_id 字符串id
* @apiSuccess {bool} is_discrete 是否是离散量
* @apiSuccess {bool} is_computing_point 是否是计算点
* @apiSuccess {String} expression 如果是计算点,这是表达式
* @apiSuccess {String} trans_expr 变换公式
* @apiSuccess {String} inv_trans_expr 逆变换公式
* @apiSuccess {String} change_expr 判断是否"变化"的公式,用于变化上传或储存
* @apiSuccess {String} zero_expr 判断是否为0值的公式
* @apiSuccess {String} data_unit 单位
* @apiSuccess {f64} upper_limit 上限,用于坏数据辨识
* @apiSuccess {f64} lower_limit 下限,用于坏数据辨识
* @apiSuccess {String} alarm_level1_expr 告警级别1的表达式
* @apiSuccess {String} alarm_level2_expr 告警级别2的表达式
* @apiSuccess {bool} is_realtime 如是,则不判断是否"变化",均上传
* @apiSuccess {bool} is_soe 是否是soe点
* @apiSuccess {u64} init_value 默认值存储在8个字节,需要根据is_discrete来转换成具体的值
*/
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct Measurement {
/// 唯一的id
pub point_id: u64,
/// 测点名
pub point_name: String,
/// 字符串id
pub alias_id: String,
/// 是否是离散量
pub is_discrete: bool,
/// 是否是计算点
pub is_computing_point: bool,
/// 如果是计算点,这是表达式
pub expression: String,
/// 变换公式
pub trans_expr: String,
/// 逆变换公式
pub inv_trans_expr: String,
/// 判断是否"变化"的公式,用于变化上传或储存
pub change_expr: String,
/// 判断是否为0值的公式
pub zero_expr: String,
/// 单位
pub data_unit: String,
#[serde(skip)]
pub unit: DataUnit,
/// 上限,用于坏数据辨识
pub upper_limit: f64,
/// 下限,用于坏数据辨识
pub lower_limit: f64,
/// 告警级别1的表达式
pub alarm_level1_expr: String,
#[serde(skip)]
pub alarm_level1: Option<Expr>,
/// 告警级别2的表达式
pub alarm_level2_expr: String,
#[serde(skip)]
pub alarm_level2: Option<Expr>,
/// 如是,则不判断是否"变化",均上传
pub is_realtime: bool,
/// 是否是soe点
pub is_soe: bool,
/// 默认值存储在8个字节,需要根据is_discrete来转换成具体的值
pub init_value: u64,
/// Description
pub desc: String,
/// 标识该测点是否是采集点,在运行时根据测点是否属于通道来判断
#[serde(skip)]
pub is_remote: bool,
}
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct MeasureValue {
/// 对应的测点
pub point_id: u64,
/// 是否离散量
pub is_discrete: bool,
/// 时间戳
pub timestamp: u64,
/// 模拟量值
pub analog_value: f64,
/// 离散量值
pub discrete_value: i64,
/// 是否已经变换
pub is_transformed: bool,
/// 变换后的模拟量值
pub transformed_analog: f64,
/// 变换后的离散量值
pub transformed_discrete: i64,
}
impl MeasureValue {
pub fn init_discrete_with_time(
point_id: u64,
discrete_value: i64,
timestamp: u64,
) -> MeasureValue {
MeasureValue {
point_id,
is_discrete: true,
timestamp,
analog_value: 0.0,
discrete_value,
is_transformed: false,
transformed_analog: 0.0,
transformed_discrete: 0,
}
}
/// 生成浮点数存储的测点值对象
pub fn init_analog_with_time(point_id: u64, analog_value: f64, timestamp: u64) -> MeasureValue {
MeasureValue {
point_id,
is_discrete: false,
timestamp,
analog_value,
discrete_value: 0,
is_transformed: false,
transformed_analog: 0.0,
transformed_discrete: 0,
}
}
/// 生成bool型测点值对象
pub fn create_bool_measure(
point_id: u64,
b: bool,
timestamp: u64,
is_discrete: bool,
) -> MeasureValue {
let discrete_value = if is_discrete {
if b { 1 } else { 0 }
} else {
0
};
let analog_value = if !is_discrete {
if b { 1.0 } else { 0.0 }
} else {
0.0
};
MeasureValue {
point_id,
is_discrete,
timestamp,
analog_value,
discrete_value,
is_transformed: false,
transformed_analog: 0.0,
transformed_discrete: 0,
}
}
/// 取测点的值,如果经过了变换则返回变换后的值
pub fn get_value(&self) -> f64 {
if self.is_discrete {
if self.is_transformed {
self.transformed_discrete as f64
} else {
self.discrete_value as f64
}
} else if self.is_transformed {
self.transformed_analog
} else {
self.analog_value
}
}
pub fn get_value2(&self) -> i64 {
if self.is_discrete {
if self.is_transformed {
self.transformed_discrete
} else {
self.discrete_value
}
} else if self.is_transformed {
self.transformed_analog as i64
} else {
self.analog_value as i64
}
}
/// 计算偏差
pub fn get_error(&self, new_m: &MeasureValue) -> f64 {
new_m.get_value() - self.get_value()
}
pub fn update_time(&mut self, t: u64) {
self.timestamp = t;
}
/// 更新测点值
pub fn update(&mut self, new_m: &MeasureValue) {
// 如果已经修改了类型,不再更新
if self.is_discrete != new_m.is_discrete {
return;
}
if self.is_discrete {
self.discrete_value = new_m.discrete_value;
if new_m.is_transformed {
self.transformed_discrete = new_m.transformed_discrete;
}
} else {
self.analog_value = new_m.analog_value;
if new_m.is_transformed {
self.transformed_analog = new_m.transformed_analog;
}
}
self.is_transformed = new_m.is_transformed;
self.timestamp = new_m.timestamp;
}
pub fn is_same_value(&self, new_m: &MeasureValue) -> bool {
if new_m.is_discrete {
new_m.discrete_value == self.discrete_value
} else if new_m.is_transformed {
// 比较变换之后的值
new_m.transformed_analog == self.transformed_analog
} else {
new_m.analog_value == self.analog_value
}
}
}
fn serialize_enum_or_unknown<E: EnumFull, S: serde::Serializer>(
e: &Option<EnumOrUnknown<E>>,
s: S,
) -> Result<S::Ok, S::Error> {
if let Some(e) = e {
match e.enum_value() {
Ok(v) => s.serialize_str(v.descriptor().name()),
Err(v) => s.serialize_i32(v),
}
} else {
s.serialize_unit()
}
}
/**
* @api {整型指令数据} /SetIntValue SetIntValue
* @apiGroup A_Object
* @apiSuccess {u64} sender_id sender_id
* @apiSuccess {u64} point_id point_id
* @apiSuccess {i64} yk_command yk_command
* @apiSuccess {u64} timestamp timestamp
*/
/// 指令数据
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct SetIntValue {
pub sender_id: u64,
pub point_id: u64,
pub yk_command: i64,
pub timestamp: u64,
}
/**
* @api {浮点型指令数据} /SetFloatValue SetFloatValue
* @apiGroup A_Object
* @apiSuccess {u64} sender_id sender_id
* @apiSuccess {u64} point_id point_id
* @apiSuccess {f64} yt_command yt_command
* @apiSuccess {u64} timestamp timestamp
*/
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct SetFloatValue {
pub sender_id: u64,
pub point_id: u64,
pub yt_command: f64,
pub timestamp: u64,
}
/**
* @api {公式型指令数据} /SetPointValue SetPointValue
* @apiGroup A_Object
* @apiSuccess {u64} sender_id sender_id
* @apiSuccess {u64} point_id point_id
* @apiSuccess {expr} command command
* @apiSuccess {u64} timestamp timestamp
*/
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct SetPointValue {
pub sender_id: u64,
pub point_id: u64,
pub command: Expr,
pub timestamp: u64,
}
fn deserialize_enum_or_unknown<'de, E: EnumFull, D: serde::Deserializer<'de>>(
d: D,
) -> Result<Option<EnumOrUnknown<E>>, D::Error> {
struct DeserializeEnumVisitor<E: EnumFull>(PhantomData<E>);
impl<'de, E: EnumFull> serde::de::Visitor<'de> for DeserializeEnumVisitor<E> {
type Value = Option<EnumOrUnknown<E>>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "a string, an integer or none")
}
fn visit_i32<R>(self, v: i32) -> Result<Self::Value, R>
where
R: serde::de::Error,
{
Ok(Some(EnumOrUnknown::from_i32(v)))
}
fn visit_str<R>(self, v: &str) -> Result<Self::Value, R>
where
R: serde::de::Error,
{
match E::enum_descriptor().value_by_name(v) {
Some(v) => Ok(Some(EnumOrUnknown::from_i32(v.value()))),
None => Err(serde::de::Error::custom(format!(
"unknown enum value: {}",
v
))),
}
}
fn visit_unit<R>(self) -> Result<Self::Value, R>
where
R: serde::de::Error,
{
Ok(None)
}
}
d.deserialize_any(DeserializeEnumVisitor(PhantomData))
}
pub fn csv_str(record: &StringRecord, col: usize) -> Option<&str> {
Some(record.get(col)?.trim())
}
pub fn csv_string(record: &StringRecord, col: usize) -> Option<String> {
Some(record.get(col)?.trim().to_string())
}
pub fn csv_usize(record: &StringRecord, col: usize) -> Option<usize> {
let s = record.get(col)?.to_string();
let r = s.parse().ok()?;
Some(r)
}
pub fn csv_u8(record: &StringRecord, col: usize) -> Option<u8> {
let s = record.get(col)?.trim();
let r = if s.starts_with("0x") {
u8::from_str_radix(s.trim_start_matches("0x"), 16).ok()?
} else {
s.parse().ok()?
};
Some(r)
}
pub fn csv_u16(record: &StringRecord, col: usize) -> Option<u16> {
let s = record.get(col)?.trim();
let r = if s.starts_with("0x") {
u16::from_str_radix(s.trim_start_matches("0x"), 16).ok()?
} else {
s.parse().ok()?
};
Some(r)
}
pub fn csv_u32(record: &StringRecord, col: usize) -> Option<u32> {
let s = record.get(col)?.trim();
let r = if s.starts_with("0x") {
u32::from_str_radix(s.trim_start_matches("0x"), 16).ok()?
} else {
s.parse().ok()?
};
Some(r)
}
pub fn csv_u64(record: &StringRecord, col: usize) -> Option<u64> {
let s = record.get(col)?.trim();
let r = if s.starts_with("0x") {
u64::from_str_radix(s.trim_start_matches("0x"), 16).ok()?
} else {
s.parse().ok()?
};
Some(r)
}
pub fn csv_i8(record: &StringRecord, col: usize) -> Option<i8> {
let s = record.get(col)?.trim();
let r = if s.starts_with("0x") {
i8::from_str_radix(s.trim_start_matches("0x"), 16).ok()?
} else {
s.parse().ok()?
};
Some(r)
}
pub fn csv_i16(record: &StringRecord, col: usize) -> Option<i16> {
let s = record.get(col)?.trim();
let r = if s.starts_with("0x") {
i16::from_str_radix(s.trim_start_matches("0x"), 16).ok()?
} else {
s.parse().ok()?
};
Some(r)
}
pub fn csv_i32(record: &StringRecord, col: usize) -> Option<i32> {
let s = record.get(col)?.trim();
let r = if s.starts_with("0x") {
i32::from_str_radix(s.trim_start_matches("0x"), 16).ok()?
} else {
s.parse().ok()?
};
Some(r)
}
pub fn csv_i64(record: &StringRecord, col: usize) -> Option<i64> {
let s = record.get(col)?.trim();
let r = if s.starts_with("0x") {
i64::from_str_radix(s.trim_start_matches("0x"), 16).ok()?
} else {
s.parse().ok()?
};
Some(r)
}
pub fn csv_f64(record: &StringRecord, col: usize) -> Option<f64> {
let s = record.get(col)?.trim();
let r = s.parse().ok()?;
Some(r)
}
pub fn csv_f32(record: &StringRecord, col: usize) -> Option<f32> {
let s = record.get(col)?.trim();
let r = s.parse().ok()?;
Some(r)
}
\ No newline at end of file
use std::{fmt, str::FromStr};
use std::fmt::{Display, Formatter};
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, PartialEq)]
pub enum DataUnitError {
UnknownDataUnit(String),
}
/**
* @api {枚举_采集数据类型} /DataType DataType
* @apiPrivate
* @apiGroup A_Enum
* @apiSuccess {String} Binary Binary
* @apiSuccess {String} OneByteIntSigned OneByteIntSigned
* @apiSuccess {String} OneByteIntSignedLower OneByteIntSignedLower
* @apiSuccess {String} OneByteIntSignedUpper OneByteIntSignedUpper
* @apiSuccess {String} OneByteIntUnsigned OneByteIntUnsigned
* @apiSuccess {String} OneByteIntUnsignedLower OneByteIntUnsignedLower
* @apiSuccess {String} OneByteIntUnsignedUpper OneByteIntUnsignedUpper
* @apiSuccess {String} TwoByteIntUnsigned TwoByteIntUnsigned
* @apiSuccess {String} TwoByteIntUnsignedSwapped TwoByteIntUnsignedSwapped
* @apiSuccess {String} TwoByteIntSigned TwoByteIntSigned
* @apiSuccess {String} TwoByteIntSignedSwapped TwoByteIntSignedSwapped
* @apiSuccess {String} TwoByteBcd TwoByteBcd
* @apiSuccess {String} FourByteIntUnsigned FourByteIntUnsigned
* @apiSuccess {String} FourByteIntSigned FourByteIntSigned
* @apiSuccess {String} FourByteIntUnsignedSwapped FourByteIntUnsignedSwapped
* @apiSuccess {String} FourByteIntSignedSwapped FourByteIntSignedSwapped
* @apiSuccess {String} FourByteIntUnsignedSwappedSwapped FourByteIntUnsignedSwappedSwapped
* @apiSuccess {String} FourByteIntSignedSwappedSwapped FourByteIntSignedSwappedSwapped
* @apiSuccess {String} FourByteFloat FourByteFloat
* @apiSuccess {String} FourByteFloatSwapped FourByteFloatSwapped
* @apiSuccess {String} FourByteFloatSwappedSwapped FourByteFloatSwappedSwapped
* @apiSuccess {String} FourByteBcd FourByteBcd
* @apiSuccess {String} FourByteBcdSwapped FourByteBcdSwapped
* @apiSuccess {String} FourByteMod10k FourByteMod10k
* @apiSuccess {String} FourByteMod10kSwapped FourByteMod10kSwapped
* @apiSuccess {String} SixByteMod10k SixByteMod10k
* @apiSuccess {String} SixByteMod10kSwapped SixByteMod10kSwapped
* @apiSuccess {String} EightByteIntUnsigned EightByteIntUnsigned
* @apiSuccess {String} EightByteIntSigned EightByteIntSigned
* @apiSuccess {String} EightByteIntUnsignedSwapped EightByteIntUnsignedSwapped
* @apiSuccess {String} EightByteIntSignedSwapped EightByteIntSignedSwapped
* @apiSuccess {String} EightByteIntUnsignedSwappedSwapped EightByteIntUnsignedSwappedSwapped
* @apiSuccess {String} EightByteIntSignedSwappedSwapped EightByteIntSignedSwappedSwapped
* @apiSuccess {String} EightByteFloat EightByteFloat
* @apiSuccess {String} EightByteFloatSwapped EightByteFloatSwapped
* @apiSuccess {String} EightByteFloatSwappedSwapped EightByteFloatSwappedSwapped
* @apiSuccess {String} EightByteMod10kSwapped EightByteMod10kSwapped
* @apiSuccess {String} EightByteMod10k EightByteMod10k
*/
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub enum DataType {
Binary,
OneByteIntSigned,
OneByteIntSignedLower,
OneByteIntSignedUpper,
OneByteIntUnsigned,
OneByteIntUnsignedLower,
OneByteIntUnsignedUpper,
TwoByteIntUnsigned,
TwoByteIntUnsignedSwapped,
TwoByteIntSigned,
TwoByteIntSignedSwapped,
TwoByteBcd,
FourByteIntUnsigned,
FourByteIntSigned,
FourByteIntUnsignedSwapped,
FourByteIntSignedSwapped,
FourByteIntUnsignedSwappedSwapped,
FourByteIntSignedSwappedSwapped,
FourByteFloat,
FourByteFloatSwapped,
FourByteFloatSwappedSwapped,
FourByteBcd,
FourByteBcdSwapped,
FourByteMod10k,
FourByteMod10kSwapped,
SixByteMod10k,
SixByteMod10kSwapped,
EightByteIntUnsigned,
EightByteIntSigned,
EightByteIntUnsignedSwapped,
EightByteIntSignedSwapped,
EightByteIntUnsignedSwappedSwapped,
EightByteIntSignedSwappedSwapped,
EightByteFloat,
EightByteFloatSwapped,
EightByteFloatSwappedSwapped,
EightByteMod10kSwapped,
EightByteMod10k,
}
impl FromStr for DataType {
type Err = ();
fn from_str(value: &str) -> Result<Self, Self::Err> {
let r = match value {
"Binary" => DataType::Binary,
"OneByteIntSigned" => DataType::OneByteIntSigned,
"OneByteIntSignedLower" => DataType::OneByteIntSignedLower,
"OneByteIntSignedUpper" => DataType::OneByteIntSignedUpper,
"OneByteIntUnsigned" => DataType::OneByteIntUnsigned,
"OneByteIntUnsignedLower" => DataType::OneByteIntUnsignedLower,
"OneByteIntUnsignedUpper" => DataType::OneByteIntUnsignedUpper,
"TwoByteIntUnsigned" => DataType::TwoByteIntUnsigned,
"TwoByteIntSigned" => DataType::TwoByteIntSigned,
"TwoByteIntSignedSwapped" => DataType::TwoByteIntSignedSwapped,
"TwoByteBcd" => DataType::TwoByteBcd,
"TwoByteIntUnsignedSwapped" => DataType::TwoByteIntUnsignedSwapped,
"FourByteIntUnsigned" => DataType::FourByteIntUnsigned,
"FourByteIntSigned" => DataType::FourByteIntSigned,
"FourByteIntUnsignedSwapped" => DataType::FourByteIntUnsignedSwapped,
"FourByteIntSignedSwapped" => DataType::FourByteIntSignedSwapped,
"FourByteIntUnsignedSwappedSwapped" => DataType::FourByteIntUnsignedSwappedSwapped,
"FourByteIntSignedSwappedSwapped" => DataType::FourByteIntSignedSwappedSwapped,
"FourByteFloat" => DataType::FourByteFloat,
"FourByteFloatSwapped" => DataType::FourByteFloatSwapped,
"FourByteFloatSwappedSwapped" => DataType::FourByteFloatSwappedSwapped,
"FourByteBcd" => DataType::FourByteBcd,
"FourByteBcdSwapped" => DataType::FourByteBcdSwapped,
"FourByteMod10k" => DataType::FourByteMod10k,
"FourByteMod10kSwapped" => DataType::FourByteMod10kSwapped,
"SixByteMod10k" => DataType::SixByteMod10k,
"SixByteMod10kSwapped" => DataType::SixByteMod10kSwapped,
"EightByteIntUnsigned" => DataType::EightByteIntUnsigned,
"EightByteIntSigned" => DataType::EightByteIntSigned,
"EightByteIntUnsignedSwapped" => DataType::EightByteIntUnsignedSwapped,
"EightByteIntSignedSwapped" => DataType::EightByteIntSignedSwapped,
"EightByteIntUnsignedSwappedSwapped" => DataType::EightByteIntUnsignedSwappedSwapped,
"EightByteIntSignedSwappedSwapped" => DataType::EightByteIntSignedSwappedSwapped,
"EightByteFloat" => DataType::EightByteFloat,
"EightByteFloatSwapped" => DataType::EightByteFloatSwapped,
"EightByteMod10kSwapped" => DataType::EightByteMod10kSwapped,
"EightByteMod10k" => DataType::EightByteMod10k,
_ => return Err(()),
};
Ok(r)
}
}
impl DataType {
pub fn get_byte_count(&self) -> u16 {
match self {
DataType::Binary => 1,
DataType::OneByteIntSigned => 1,
DataType::OneByteIntSignedLower => 1,
DataType::OneByteIntSignedUpper => 1,
DataType::OneByteIntUnsigned => 1,
DataType::OneByteIntUnsignedLower => 1,
DataType::OneByteIntUnsignedUpper => 1,
DataType::TwoByteIntUnsigned => 2,
DataType::TwoByteIntSigned => 2,
DataType::TwoByteIntSignedSwapped => 2,
DataType::TwoByteBcd => 2,
DataType::TwoByteIntUnsignedSwapped => 2,
DataType::FourByteIntUnsigned => 4,
DataType::FourByteIntSigned => 4,
DataType::FourByteIntUnsignedSwapped => 4,
DataType::FourByteIntSignedSwapped => 4,
DataType::FourByteIntUnsignedSwappedSwapped => 4,
DataType::FourByteIntSignedSwappedSwapped => 4,
DataType::FourByteFloat => 4,
DataType::FourByteFloatSwapped => 4,
DataType::FourByteFloatSwappedSwapped => 4,
DataType::FourByteBcd => 4,
DataType::FourByteBcdSwapped => 4,
DataType::FourByteMod10k => 4,
DataType::FourByteMod10kSwapped => 4,
DataType::SixByteMod10k => 6,
DataType::SixByteMod10kSwapped => 6,
DataType::EightByteIntUnsigned => 8,
DataType::EightByteIntSigned => 8,
DataType::EightByteIntUnsignedSwapped => 8,
DataType::EightByteIntSignedSwapped => 8,
DataType::EightByteIntUnsignedSwappedSwapped => 8,
DataType::EightByteIntSignedSwappedSwapped => 8,
DataType::EightByteFloat => 8,
DataType::EightByteFloatSwapped => 8,
DataType::EightByteFloatSwappedSwapped => 8,
DataType::EightByteMod10kSwapped => 8,
DataType::EightByteMod10k => 8,
}
}
}
impl Display for DataType {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
/**
* @api {枚举_属性类型} /PropType PropType
* @apiPrivate
* @apiGroup A_Enum
* @apiSuccess {String} U8 u8
* @apiSuccess {String} U16 u16
* @apiSuccess {String} U32 u32
* @apiSuccess {String} U64 u64
* @apiSuccess {String} I8 i8
* @apiSuccess {String} I16 i16
* @apiSuccess {String} I32 i32
* @apiSuccess {String} I64 i64
* @apiSuccess {String} F32 f32
* @apiSuccess {String} F64 f64
* @apiSuccess {String} Str str
* @apiSuccess {String} ComplexF32 f32类型复数
* @apiSuccess {String} ComplexF64 f64类型复数
* @apiSuccess {String} TensorF32 f32类型向量
* @apiSuccess {String} TensorF64 f64类型向量
* @apiSuccess {String} TensorComplexF32 f32类型复数向量
* @apiSuccess {String} TensorComplexF64 f64类型复数向量
* @apiSuccess {String} Unknown 未知
*/
/// 属性类型枚举
#[repr(u8)]
#[derive(Serialize, Deserialize, PartialEq, Clone, Debug, Copy, Default)]
pub enum PropType {
U8 = 1,
U16,
U32,
U64,
I8,
I16,
I32,
I64,
F32,
F64,
Str,
// 复数
ComplexF32,
ComplexF64,
// 向量
TensorF32,
TensorF64,
TensorComplexF32,
TensorComplexF64,
// unknown type
#[default]
Unknown = 255,
}
/**
* @api {枚举_属性值} /PropValue PropValue
* @apiPrivate
* @apiGroup A_Enum
* @apiSuccess {Object} U8 {"U8": u8}
* @apiSuccess {Object} U16 {"U16": u16}
* @apiSuccess {Object} U32 {"U32": u32}
* @apiSuccess {Object} U64 {"U64": u64}
* @apiSuccess {Object} I8 {"I8": i8}
* @apiSuccess {Object} I16 {"I16": i16}
* @apiSuccess {Object} I32 {"I32": i32}
* @apiSuccess {Object} I64 {"I64": i64}
* @apiSuccess {Object} F32 {"F32": f32}
* @apiSuccess {Object} F64 {"F64": f64}
* @apiSuccess {Object} Str {"Str": String}
* @apiSuccess {Object} ComplexF32 f32类型复数,{"ComplexF32": tuple(f32, f32)}
* @apiSuccess {Object} ComplexF64 f64类型复数,{"ComplexF64": tuple(f64, f64)}
* @apiSuccess {Object} TensorF32 f32类型向量,{"TensorF32": tuple(usize[], f32[])}
* @apiSuccess {Object} TensorF64 f64类型向量,{"TensorF64": tuple(usize[], f64[])}
* @apiSuccess {Object} TensorComplexF32 f32类型复数向量,{"TensorComplexF32": tuple(usize[], tuple(f32, f32)[])}
* @apiSuccess {Object} TensorComplexF64 f64类型复数向量,{"TensorComplexF64": tuple(usize[], tuple(f64, f64)[])}
* @apiSuccess {String} Unknown 未知
*/
#[derive(Serialize, Deserialize, PartialEq, Clone, Debug)]
pub enum PropValue {
U8(u8),
U16(u16),
U32(u32),
U64(u64),
I8(i8),
I16(i16),
I32(i32),
I64(i64),
F32(f32),
F64(f64),
Str(String),
// 复数
ComplexF32(f32, f32),
ComplexF64(f64, f64),
// 向量
TensorF32(Vec<usize>, Vec<f32>),
TensorF64(Vec<usize>, Vec<f64>),
TensorComplexF32(Vec<usize>, Vec<(f32, f32)>),
TensorComplexF64(Vec<usize>, Vec<(f64, f64)>),
Unknown,
}
/// 将枚举转换成字符串,调用to_string()方法
impl Display for PropValue {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match self {
PropValue::U8(s) => write!(f, "{}", s),
PropValue::U16(s) => write!(f, "{}", s),
PropValue::U32(s) => write!(f, "{}", s),
PropValue::U64(s) => write!(f, "{}", s),
PropValue::I8(s) => write!(f, "{}", s),
PropValue::I16(s) => write!(f, "{}", s),
PropValue::I32(s) => write!(f, "{}", s),
PropValue::I64(s) => write!(f, "{}", s),
PropValue::F32(s) => write!(f, "{}", s),
PropValue::F64(s) => write!(f, "{}", s),
PropValue::Str(s) => write!(f, "{}", s),
PropValue::ComplexF32(_, _) => write!(f, ""),
PropValue::ComplexF64(_, _) => write!(f, ""),
PropValue::TensorF32(_, _) => write!(f, ""),
PropValue::TensorF64(_, _) => write!(f, ""),
PropValue::TensorComplexF32(_, _) => write!(f, ""),
PropValue::TensorComplexF64(_, _) => write!(f, ""),
PropValue::Unknown => write!(f, ""),
}
}
}
impl PropValue {
pub fn from_str(t: PropType, s: &str) -> Option<Self> {
let v = match t {
PropType::U8 => PropValue::U8(s.parse().ok()?),
PropType::U16 => PropValue::U16(s.parse().ok()?),
PropType::U32 => PropValue::U32(s.parse().ok()?),
PropType::U64 => PropValue::U64(s.parse().ok()?),
PropType::I8 => PropValue::I8(s.parse().ok()?),
PropType::I16 => PropValue::I16(s.parse().ok()?),
PropType::I32 => PropValue::I32(s.parse().ok()?),
PropType::I64 => PropValue::I64(s.parse().ok()?),
PropType::F32 => PropValue::F32(s.parse().ok()?),
PropType::F64 => PropValue::F64(s.parse().ok()?),
PropType::Str => PropValue::Str(s.parse().ok()?),
PropType::ComplexF32 => PropValue::Unknown,
PropType::ComplexF64 => PropValue::Unknown,
PropType::TensorF32 => PropValue::Unknown,
PropType::TensorF64 => PropValue::Unknown,
PropType::TensorComplexF32 => PropValue::Unknown,
PropType::TensorComplexF64 => PropValue::Unknown,
PropType::Unknown => PropValue::Unknown,
};
Some(v)
}
pub fn from_f64(t: PropType, f: f64) -> Option<Self> {
let v = match t {
PropType::U8 => PropValue::U8(f as u8),
PropType::U16 => PropValue::U16(f as u16),
PropType::U32 => PropValue::U32(f as u32),
PropType::U64 => PropValue::U64(f as u64),
PropType::I8 => PropValue::I8(f as i8),
PropType::I16 => PropValue::I16(f as i16),
PropType::I32 => PropValue::I32(f as i32),
PropType::I64 => PropValue::I64(f as i64),
PropType::F32 => PropValue::F32(f as f32),
PropType::F64 => PropValue::F64(f),
PropType::Str => PropValue::Str(f.to_string()),
PropType::ComplexF32 => PropValue::Unknown,
PropType::ComplexF64 => PropValue::Unknown,
PropType::TensorF32 => PropValue::Unknown,
PropType::TensorF64 => PropValue::Unknown,
PropType::TensorComplexF32 => PropValue::Unknown,
PropType::TensorComplexF64 => PropValue::Unknown,
PropType::Unknown => PropValue::Unknown,
};
Some(v)
}
pub fn get_f64(&self) -> Option<f64> {
match self {
PropValue::U8(s) => Some(*s as f64),
PropValue::U16(s) => Some(*s as f64),
PropValue::U32(s) => Some(*s as f64),
PropValue::U64(s) => Some(*s as f64),
PropValue::I8(s) => Some(*s as f64),
PropValue::I16(s) => Some(*s as f64),
PropValue::I32(s) => Some(*s as f64),
PropValue::I64(s) => Some(*s as f64),
PropValue::F32(s) => Some(*s as f64),
PropValue::F64(s) => Some(*s),
PropValue::Str(s) => s.parse().ok(),
_ => None,
}
}
pub fn get_bool(&self) -> Option<bool> {
match self {
PropValue::U8(s) => Some(*s > 0),
PropValue::U16(s) => Some(*s > 0),
PropValue::U32(s) => Some(*s > 0),
PropValue::U64(s) => Some(*s > 0),
PropValue::I8(s) => Some(*s > 0),
PropValue::I16(s) => Some(*s > 0),
PropValue::I32(s) => Some(*s > 0),
PropValue::I64(s) => Some(*s > 0),
PropValue::F32(s) => Some(*s > 0.),
PropValue::F64(s) => Some(*s > 0.),
PropValue::Str(s) => {
match s.to_uppercase().as_str() {
"TRUE" | "YES" | "T" | "Y" => Some(true),
"FALSE" | "NO" | "F" | "N" => Some(false),
_ => None
}
}
_ => None,
}
}
}
impl PropType {
/// 用于遍历所有属性类型列表
pub const PS_PROP_TYPE: [PropType; 18] = [
PropType::U8,
PropType::U16,
PropType::U32,
PropType::U64,
PropType::I8,
PropType::I16,
PropType::I32,
PropType::I64,
PropType::F32,
PropType::F64,
PropType::Str,
PropType::ComplexF32,
PropType::ComplexF64,
PropType::TensorF32,
PropType::TensorF64,
PropType::TensorComplexF32,
PropType::TensorComplexF64,
PropType::Unknown,
];
}
impl FromStr for PropType {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
let p = match s.to_uppercase().as_str() {
"U8" => PropType::U8,
"U16" => PropType::U16,
"U32" => PropType::U32,
"U64" => PropType::U64,
"I8" => PropType::I8,
"I16" => PropType::I16,
"I32" => PropType::I32,
"I64" => PropType::I64,
"F32" => PropType::F32,
"F64" => PropType::F64,
"STR" => PropType::Str,
"COMPLEXF32" => PropType::ComplexF32,
"COMPLEXF64" => PropType::ComplexF64,
"TENSORF32" => PropType::TensorF32,
"TENSORF64" => PropType::TensorF64,
"TENSORCOMPLEXF32" => PropType::TensorComplexF32,
"TENSORCOMPLEXF64" => PropType::TensorComplexF64,
_ => PropType::Unknown,
};
Ok(p)
}
}
/// 将枚举转换成字符串,调用to_string()方法
impl Display for PropType {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
/**
* @api {枚举_数据单位} /DataUnit DataUnit
* @apiPrivate
* @apiGroup A_Enum
* @apiSuccess {String} A 安培
* @apiSuccess {String} V 伏特
* @apiSuccess {String} kV 千伏
* @apiSuccess {String} W 瓦特
* @apiSuccess {String} kW 千瓦
* @apiSuccess {String} MW 兆瓦
* @apiSuccess {String} H 亨利
* @apiSuccess {String} mH 毫亨
* @apiSuccess {String} Ah 安时
* @apiSuccess {String} mAh 毫安时
* @apiSuccess {String} kWh 千瓦时
* @apiSuccess {String} Celsius 摄氏度
* @apiSuccess {String} feet feet
* @apiSuccess {String} km kilometer
* @apiSuccess {String} meter meter
* @apiSuccess {String} UnitOne 无单位
* @apiSuccess {String} Percent 百分比
* @apiSuccess {String} Unknown 其他未知单位
*/
#[allow(non_camel_case_types)]
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Copy, Clone, Hash, Default)]
pub enum DataUnit {
/// switch on of off
OnOrOff,
/// 安培
A,
/// 伏特
V,
/// 千伏
kV,
/// 瓦特
W,
/// 千瓦
kW,
/// 兆瓦
MW,
/// Var
Var,
/// kVar
kVar,
/// MVar
MVar,
// VA
VA,
// kVA
kVA,
// MVA
MVA,
/// 亨利
H,
/// 毫亨
mH,
/// 安时
Ah,
/// 毫安时
mAh,
/// 千瓦时
kWh,
/// 摄氏度
Celsius,
/// feet
feet,
/// kilometer
km,
/// meter
meter,
/// Square millimeter
mm2,
/// 无单位
UnitOne,
/// 百分比
Percent,
/// 大小单位
bit,
/// Byte
B,
/// KB
kB,
/// MB
MB,
/// GB
GB,
/// TB
TB,
/// PB
PB,
/// 其他未知单位
#[default]
Unknown,
}
impl DataUnit {
/// 用于遍历所有数据单位列表
pub const DATA_UNIT: [DataUnit; 33] = [
DataUnit::OnOrOff,
DataUnit::A,
DataUnit::V,
DataUnit::kV,
DataUnit::W,
DataUnit::kW,
DataUnit::MW,
DataUnit::Var,
DataUnit::kVar,
DataUnit::MVar,
DataUnit::VA,
DataUnit::kVA,
DataUnit::MVA,
DataUnit::H,
DataUnit::mH,
DataUnit::Ah,
DataUnit::mAh,
DataUnit::kWh,
DataUnit::Celsius,
DataUnit::feet,
DataUnit::km,
DataUnit::meter,
DataUnit::mm2,
DataUnit::UnitOne,
DataUnit::Percent,
DataUnit::bit,
DataUnit::B,
DataUnit::kB,
DataUnit::MB,
DataUnit::GB,
DataUnit::TB,
DataUnit::PB,
DataUnit::Unknown,
];
}
impl FromStr for DataUnit {
type Err = DataUnitError;
fn from_str(s: &str) -> Result<DataUnit, DataUnitError> {
match s.trim().to_uppercase().as_str() {
"ON_OR_OFF" => Ok(DataUnit::OnOrOff),
"ONOROFF" => Ok(DataUnit::OnOrOff),
"ONOFF" => Ok(DataUnit::OnOrOff),
"ON/OFF" => Ok(DataUnit::OnOrOff),
"A" => Ok(DataUnit::A),
"安" => Ok(DataUnit::A),
"安培" => Ok(DataUnit::A),
"V" => Ok(DataUnit::V),
"伏" => Ok(DataUnit::V),
"伏特" => Ok(DataUnit::kV),
"KV" => Ok(DataUnit::kV),
"千伏" => Ok(DataUnit::kV),
"W" => Ok(DataUnit::W),
"瓦" => Ok(DataUnit::W),
"瓦特" => Ok(DataUnit::W),
"KW" => Ok(DataUnit::kW),
"千瓦" => Ok(DataUnit::kW),
"MW" => Ok(DataUnit::MW),
"兆瓦" => Ok(DataUnit::MW),
"VA" => Ok(DataUnit::VA),
"伏安" => Ok(DataUnit::VA),
"KVA" => Ok(DataUnit::kVA),
"千伏安" => Ok(DataUnit::kVA),
"MVA" => Ok(DataUnit::MVA),
"兆伏安" => Ok(DataUnit::MVA),
"VAR" => Ok(DataUnit::Var),
"乏" => Ok(DataUnit::Var),
"KVAR" => Ok(DataUnit::kVar),
"千乏" => Ok(DataUnit::kVar),
"MVAR" => Ok(DataUnit::MVar),
"兆乏" => Ok(DataUnit::MVar),
"H" => Ok(DataUnit::H),
"亨" => Ok(DataUnit::H),
"亨利" => Ok(DataUnit::H),
"MH" => Ok(DataUnit::mH),
"毫亨" => Ok(DataUnit::mH),
"AH" => Ok(DataUnit::Ah),
"安时" => Ok(DataUnit::Ah),
"MAH" => Ok(DataUnit::mAh),
"毫安时" => Ok(DataUnit::mAh),
"KWH" => Ok(DataUnit::kWh),
"千瓦时" => Ok(DataUnit::kWh),
"度" => Ok(DataUnit::kWh),
"℃" => Ok(DataUnit::Celsius), //℃ 字符代码 2103 中文摄氏度 一个字符
"°C" => Ok(DataUnit::Celsius), // °C 英文摄氏度 两个字符 字符代码00B0 +大写字母C
"CELSIUS" => Ok(DataUnit::Celsius),
"摄氏度" => Ok(DataUnit::Celsius),
"FEET" => Ok(DataUnit::feet),
"KM" => Ok(DataUnit::km),
"M" => Ok(DataUnit::meter),
"METER" => Ok(DataUnit::meter),
"MM2" => Ok(DataUnit::mm2),
"%" => Ok(DataUnit::Percent),
"PERCENT" => Ok(DataUnit::Percent),
"BIT" => Ok(DataUnit::bit),
"BYTE" => Ok(DataUnit::B),
"B" => Ok(DataUnit::B),
"KB" => Ok(DataUnit::kB),
"MB" => Ok(DataUnit::MB),
"GB" => Ok(DataUnit::GB),
"TB" => Ok(DataUnit::TB),
"PB" => Ok(DataUnit::PB),
"UNITONE" => Ok(DataUnit::UnitOne),
//todo:finish it
_ => Ok(DataUnit::Unknown),
}
}
}
impl Display for DataUnit {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match self {
DataUnit::OnOrOff => write!(f, "on_or_off"),
DataUnit::feet => write!(f, "ft"),
DataUnit::UnitOne => write!(f, ""),
DataUnit::Percent => write!(f, "%"),
DataUnit::Celsius => write!(f, "°C"),
DataUnit::Unknown => write!(f, ""),
_ => write!(f, "{:?}", self),
}
}
}
#[cfg(test)]
mod tests {
use crate::prop::DataUnit;
#[test]
fn test() {
let a = DataUnit::Celsius;
let b = a.to_string();
println!("{}", b);
}
}
\ No newline at end of file
include!(concat!(env!("OUT_DIR"), "/protos/mod.rs"));
use std::collections::HashMap;
use serde::{Serialize, Deserialize};
use crate::proto::eig::PbEigPingRes;
/**
* @api {Eig配置对象} /EigConfig EigConfig
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {Map} properties HashMap<String, String>
*/
#[derive(Serialize, Deserialize, Debug, Clone, Default)]
pub struct EigConfig {
pub properties: HashMap<String, String>,
pub properties2: HashMap<String, String>,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct ConfigMsg {
pub code: u8,
pub config: Option<EigConfig>,
pub ping: Option<PbEigPingRes>,
}
\ No newline at end of file
[package]
name = "eig-expr"
version = "0.1.0"
authors = ["dongshufeng <dongshufeng@zju.edu.cn>"]
edition.workspace = true
rust-version.workspace = true
[dependencies]
fnv = "1.0"
nom = "8.0"
serde = { version = "1.0", features = ["derive"] }
num-traits = "0.2"
num-complex = "0.4"
ndarray = "0.16"
\ No newline at end of file
use std;
use std::f64::consts;
use std::ops::Deref;
use std::rc::Rc;
use std::str::FromStr;
use fnv::FnvHashMap;
use ndarray::{Array, Array2, IxDyn};
use num_complex::{Complex, Complex64};
use crate::{ContextProvider, Error, Expr, factorial, MyCx, MyF};
use crate::{FuncEvalError, Operation, Token, Token::*};
use crate::shuntingyard::to_rpn;
use crate::tokenizer::tokenize;
type ContextHashMap<K, V> = FnvHashMap<K, V>;
/**
* @api {Expr} /Expr Expr
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {Token[]} rpn rpn
*/
/// Representation of a parsed expression.
///
/// The expression is internally stored in the [reverse Polish notation (RPN)][RPN] as a sequence
/// of `Token`s.
///
/// Methods `bind`, `bind_with_context`, `bind2`, ... can be used to create closures from
/// the expression that then can be passed around and used as any other `Fn` closures.
///
/// let func = "x^2".parse::<Expr>().unwrap().bind("x").unwrap();
/// let r = Some(2.).map(func);
/// assert_eq!(r, Some(4.));
///
/// [RPN]: https://en.wikipedia.org/wiki/Reverse_Polish_notation
impl Expr {
pub fn new() -> Expr {
Expr::default()
}
pub fn from_vec(rpn: Vec<Token>) -> Expr {
Expr { rpn }
}
/// Evaluates the expression.
pub fn eval(&self) -> Result<f64, Error> {
self.eval_with_context(builtin())
}
/// Evaluates the expression with variables given by the argument.
pub fn eval_with_context<C: ContextProvider>(&self, ctx: C) -> Result<f64, Error> {
let mut stack = Vec::with_capacity(16);
if self.rpn.is_empty() {
return Err(Error::EmptyExpression);
}
for token in &self.rpn {
match *token {
Var(ref n) => {
if let Some(v) = ctx.get_var(n) {
stack.push(v);
} else {
return Err(Error::UnknownVariable(n.clone()));
}
}
Number(f) => stack.push(f),
Binary(op) => {
let right = stack.pop().unwrap();
let left = stack.pop().unwrap();
let r = match op {
Operation::Plus => left + right,
Operation::Minus => left - right,
Operation::Times => left * right,
Operation::Div => left / right,
Operation::Rem => left % right,
Operation::Pow => left.powf(right),
// added by dsf, 2021.3
Operation::LessThan => {
if left < right {
1.0
} else {
0.0
}
}
Operation::GreatThan => {
if left > right {
1.0
} else {
0.0
}
}
Operation::LtOrEqual => {
if left <= right {
1.0
} else {
0.0
}
}
Operation::GtOrEqual => {
if left >= right {
1.0
} else {
0.0
}
}
Operation::Equal => {
if left == right {
1.0
} else {
0.0
}
}
Operation::Unequal => {
if left != right {
1.0
} else {
0.0
}
}
Operation::And => {
if (left > 0.0) && (right > 0.0) {
1.0
} else {
0.0
}
}
Operation::Or => {
if (left > 0.0) || (right > 0.0) {
1.0
} else {
0.0
}
}
Operation::BitAnd => (left as i64 & right as i64) as f64,
Operation::BitOr => (left as i64 | right as i64) as f64,
Operation::BitXor => (left as i64 ^ right as i64) as f64,
Operation::BitShl => ((left as i64) << (right as i64)) as f64,
Operation::BitShr => ((left as i64) >> (right as i64)) as f64,
Operation::BitAt => {
#[allow(clippy::manual_range_contains)]
if right < 1. || right > 64. {
return Err(Error::EvalError(format!(
"Operation \"@\" ERROR:the {:?} bit doesn't exist.",
right
)));
}
if (left as i64) & 2_i64.pow(right as u32 - 1) != 0 {
1.0
} else {
0.0
}
}
_ => {
return Err(Error::EvalError(format!(
"Unimplemented binary operation: {:?}",
op
)));
}
};
stack.push(r);
}
Unary(op) => {
let x = stack.pop().unwrap();
let r = match op {
Operation::Plus => x,
Operation::Minus => -x,
Operation::Not => {
if x > 0.0 {
0.0
} else {
1.0
}
},
Operation::BitNot => !(x as i64) as f64,
Operation::Fact => {
// Check to make sure x has no fractional component (can be converted to int without loss)
match factorial(x) {
Ok(res) => res,
Err(e) => return Err(Error::EvalError(String::from(e))),
}
}
_ => {
let msg = format!("Unimplemented unary operation: {:?}", op);
return Err(Error::EvalError(msg));
}
};
stack.push(r);
}
Func(ref n, Some(i)) => {
if stack.len() < i {
let msg = format!("eval: stack does not have enough arguments for function token {:?}", token);
return Err(Error::EvalError(msg));
}
match ctx.eval_func(n, &stack[stack.len() - i..]) {
Ok(r) => {
let nl = stack.len() - i;
stack.truncate(nl);
stack.push(r);
}
Err(e) => return Err(Error::Function(n.to_owned(), e)),
}
}
Func(ref n, None) => match ctx.eval_func(n, &[]) {
Ok(r) => {
stack.push(r);
}
Err(e) => return Err(Error::Function(n.to_owned(), e)),
},
_ => return Err(Error::EvalError(format!("Unrecognized token: {:?}", token))),
}
}
let mut r = stack.pop().expect("Stack is empty, this is impossible.");
if !stack.is_empty() {
return Err(Error::EvalError(format!("There are still {} items on the stack.", stack.len())));
}
// inf
if r.is_infinite() {
// warn!("the result of the expression is inf");
if r.is_sign_positive() {
r = f64::MAX;
} else {
r = f64::MIN;
}
}
Ok(r)
}
/// check expression is valid
pub fn check_validity(&self) -> bool {
let mut stack = Vec::with_capacity(16);
// 对模型进行检查
for token in &self.rpn {
match *token {
Var(_) => stack.push(0u8),
Number(_) => stack.push(0u8),
Binary(_) => {
if stack.len() < 2 {
return false;
}
stack.truncate(stack.len() - 1);
}
Unary(_) => {
if stack.is_empty() {
return false;
}
}
Tensor(size) => {
match size {
None => {},
Some(i) => {
if stack.len() < i {
return false;
}
let nl = stack.len() - i + 1;
stack.truncate(nl);
}
}
}
Func(_, Some(i)) => {
if stack.len() < i {
return false;
}
let nl = stack.len() - i;
stack.truncate(nl);
stack.push(0u8);
}
Func(_, None) => stack.push(0u8),
_ => return false,
}
}
stack.len() == 1
}
/// Creates a function of one variable based on this expression, with default constants and
/// functions.
///
/// Binds the input of the returned closure to `var`.
///
/// # Failure
///
/// Returns `Err` if there is a variable in the expression that is not provided by the default
/// context or `var`.
pub fn bind<'a>(self, var: &str) -> Result<impl Fn(f64) -> f64 + 'a, Error> {
self.bind_with_context(builtin(), var)
}
/// Creates a function of one variable based on this expression.
///
/// Binds the input of the returned closure to `var`.
///
/// # Failure
///
/// Returns `Err` if there is a variable in the expression that is not provided by `ctx` or
/// `var`.
pub fn bind_with_context<'a, C>(
self,
ctx: C,
var: &str,
) -> Result<impl Fn(f64) -> f64 + 'a, Error>
where
C: ContextProvider + 'a,
{
self.check_context(((var, 0.), &ctx))?;
let var = var.to_owned();
Ok(move |x| {
self.eval_with_context(((&var, x), &ctx))
.expect("Expr::bind")
})
}
/// Creates a function of two variables based on this expression, with default constants and
/// functions.
///
/// Binds the inputs of the returned closure to `var1` and `var2`.
///
/// # Failure
///
/// Returns `Err` if there is a variable in the expression that is not provided by the default
/// context or `var`.
pub fn bind2<'a>(self, var1: &str, var2: &str) -> Result<impl Fn(f64, f64) -> f64 + 'a, Error> {
self.bind2_with_context(builtin(), var1, var2)
}
/// Creates a function of two variables based on this expression.
///
/// Binds the inputs of the returned closure to `var1` and `var2`.
///
/// # Failure
///
/// Returns `Err` if there is a variable in the expression that is not provided by `ctx` or
/// `var`.
pub fn bind2_with_context<'a, C>(
self,
ctx: C,
var1: &str,
var2: &str,
) -> Result<impl Fn(f64, f64) -> f64 + 'a, Error>
where
C: ContextProvider + 'a,
{
self.check_context(([(var1, 0.), (var2, 0.)], &ctx))?;
let var1 = var1.to_owned();
let var2 = var2.to_owned();
Ok(move |x, y| {
self.eval_with_context(([(&var1, x), (&var2, y)], &ctx))
.expect("Expr::bind2")
})
}
/// Creates a function of three variables based on this expression, with default constants and
/// functions.
///
/// Binds the inputs of the returned closure to `var1`, `var2` and `var3`.
///
/// # Failure
///
/// Returns `Err` if there is a variable in the expression that is not provided by the default
/// context or `var`.
pub fn bind3<'a>(
self,
var1: &str,
var2: &str,
var3: &str,
) -> Result<impl Fn(f64, f64, f64) -> f64 + 'a, Error> {
self.bind3_with_context(builtin(), var1, var2, var3)
}
/// Creates a function of three variables based on this expression.
///
/// Binds the inputs of the returned closure to `var1`, `var2` and `var3`.
///
/// # Failure
///
/// Returns `Err` if there is a variable in the expression that is not provided by `ctx` or
/// `var`.
pub fn bind3_with_context<'a, C>(
self,
ctx: C,
var1: &str,
var2: &str,
var3: &str,
) -> Result<impl Fn(f64, f64, f64) -> f64 + 'a, Error>
where
C: ContextProvider + 'a,
{
self.check_context(([(var1, 0.), (var2, 0.), (var3, 0.)], &ctx))?;
let var1 = var1.to_owned();
let var2 = var2.to_owned();
let var3 = var3.to_owned();
Ok(move |x, y, z| {
self.eval_with_context(([(&var1, x), (&var2, y), (&var3, z)], &ctx))
.expect("Expr::bind3")
})
}
/// Creates a function of four variables based on this expression, with default constants and
/// functions.
///
/// Binds the inputs of the returned closure to `var1`, `var2`, `var3` and `var4`.
///
/// # Failure
///
/// Returns `Err` if there is a variable in the expression that is not provided by the default
/// context or `var`.
pub fn bind4<'a>(
self,
var1: &str,
var2: &str,
var3: &str,
var4: &str,
) -> Result<impl Fn(f64, f64, f64, f64) -> f64 + 'a, Error> {
self.bind4_with_context(builtin(), var1, var2, var3, var4)
}
/// Creates a function of four variables based on this expression.
///
/// Binds the inputs of the returned closure to `var1`, `var2`, `var3` and `var4`.
///
/// # Failure
///
/// Returns `Err` if there is a variable in the expression that is not provided by `ctx` or
/// `var`.
pub fn bind4_with_context<'a, C>(
self,
ctx: C,
var1: &str,
var2: &str,
var3: &str,
var4: &str,
) -> Result<impl Fn(f64, f64, f64, f64) -> f64 + 'a, Error>
where
C: ContextProvider + 'a,
{
self.check_context(([(var1, 0.), (var2, 0.), (var3, 0.), (var4, 0.)], &ctx))?;
let var1 = var1.to_owned();
let var2 = var2.to_owned();
let var3 = var3.to_owned();
let var4 = var4.to_owned();
Ok(move |x1, x2, x3, x4| {
self.eval_with_context(([(&var1, x1), (&var2, x2), (&var3, x3), (&var4, x4)], &ctx))
.expect("Expr::bind4")
})
}
/// Creates a function of five variables based on this expression, with default constants and
/// functions.
///
/// Binds the inputs of the returned closure to `var1`, `var2`, `var3`, `var4` and `var5`.
///
/// # Failure
///
/// Returns `Err` if there is a variable in the expression that is not provided by the default
/// context or `var`.
pub fn bind5<'a>(
self,
var1: &str,
var2: &str,
var3: &str,
var4: &str,
var5: &str,
) -> Result<impl Fn(f64, f64, f64, f64, f64) -> f64 + 'a, Error> {
self.bind5_with_context(builtin(), var1, var2, var3, var4, var5)
}
/// Creates a function of five variables based on this expression.
///
/// Binds the inputs of the returned closure to `var1`, `var2`, `var3`, `var4` and `var5`.
///
/// # Failure
///
/// Returns `Err` if there is a variable in the expression that is not provided by `ctx` or
/// `var`.
pub fn bind5_with_context<'a, C>(
self,
ctx: C,
var1: &str,
var2: &str,
var3: &str,
var4: &str,
var5: &str,
) -> Result<impl Fn(f64, f64, f64, f64, f64) -> f64 + 'a, Error>
where
C: ContextProvider + 'a,
{
self.check_context((
[(var1, 0.), (var2, 0.), (var3, 0.), (var4, 0.), (var5, 0.)],
&ctx,
))?;
let var1 = var1.to_owned();
let var2 = var2.to_owned();
let var3 = var3.to_owned();
let var4 = var4.to_owned();
let var5 = var5.to_owned();
Ok(move |x1, x2, x3, x4, x5| {
self.eval_with_context((
[
(&var1, x1),
(&var2, x2),
(&var3, x3),
(&var4, x4),
(&var5, x5),
],
&ctx,
))
.expect("Expr::bind5")
})
}
/// Binds the input of the returned closure to elements of `vars`.
///
/// # Failure
///
/// Returns `Err` if there is a variable in the expression that is not provided by the default
/// context or `var`.
pub fn bindn<'a>(self, vars: &'a [&str]) -> Result<impl Fn(&[f64]) -> f64 + 'a, Error> {
self.bindn_with_context(builtin(), vars)
}
/// Creates a function of N variables based on this expression.
///
/// Binds the input of the returned closure to the elements of `vars`.
///
/// # Failure
///
/// Returns `Err` if there is a variable in the expression that is not provided by `ctx` or
/// `var`.
pub fn bindn_with_context<'a, C>(
self,
ctx: C,
vars: &'a [&str],
) -> Result<impl Fn(&[f64]) -> f64 + 'a, Error>
where
C: ContextProvider + 'a,
{
let n = vars.len();
self.check_context((
vars.iter().zip(vec![0.; n].into_iter()).collect::<Vec<_>>(),
&ctx,
))?;
let vars = vars.iter().map(|v| v.to_owned()).collect::<Vec<_>>();
Ok(move |x: &[f64]| {
self.eval_with_context((
vars.iter()
.zip(x.iter())
.map(|(v, x)| (v, *x))
.collect::<Vec<_>>(),
&ctx,
))
.expect("Expr::bindn")
})
}
/// Checks that the value of every variable in the expression is specified by the context `ctx`.
///
/// # Failure
///
/// Returns `Err` if a missing variable is detected.
fn check_context<C: ContextProvider>(&self, ctx: C) -> Result<(), Error> {
for t in &self.rpn {
match *t {
Var(ref name) => {
if ctx.get_var(name).is_none() {
return Err(Error::UnknownVariable(name.clone()));
}
}
Func(ref name, Some(i)) => {
let v = vec![0.; i];
if let Err(e) = ctx.eval_func(name, &v) {
return Err(Error::Function(name.to_owned(), e));
}
}
Func(_, None) => {
return Err(Error::EvalError(format!(
"expr::check_context: Unexpected token: {:?}",
*t
)));
}
LParen | RParen | Binary(_) | Unary(_) | Comma | Number(_) => {}
_ => {}
}
}
Ok(())
}
}
/// Evaluates a string with built-in constants and functions.
pub fn eval_str<S: AsRef<str>>(expr: S) -> Result<f64, Error> {
let expr = Expr::from_str(expr.as_ref())?;
expr.eval_with_context(builtin())
}
impl FromStr for Expr {
type Err = Error;
/// Constructs an expression by parsing a string.
fn from_str(s: &str) -> Result<Self, Error> {
match tokenize(s) {
Ok(tokens) => match to_rpn(&tokens) {
Ok(rpn) => Ok(Expr { rpn }),
Err(e) => Err(Error::RPNError(e)),
},
Err(e) => Err(Error::ParseError(e)),
}
}
}
/// Evaluates a string with the given context.
///
/// No built-ins are defined in this case.
pub fn eval_str_with_context<S: AsRef<str>, C: ContextProvider>(
expr: S,
ctx: C,
) -> Result<f64, Error> {
let expr = Expr::from_str(expr.as_ref())?;
expr.eval_with_context(ctx)
}
impl Deref for Expr {
type Target = [Token];
fn deref(&self) -> &[Token] {
&self.rpn
}
}
#[doc(hidden)]
pub fn max_array(xs: &[f64]) -> f64 {
xs.iter().fold(f64::NEG_INFINITY, |m, &x| m.max(x))
}
#[doc(hidden)]
pub fn min_array(xs: &[f64]) -> f64 {
xs.iter().fold(f64::INFINITY, |m, &x| m.min(x))
}
/// Returns the built-in constants and functions in a form that can be used as a `ContextProvider`.
#[doc(hidden)]
pub fn builtin<'a>() -> Context<'a> {
// TODO: cache this (lazy_static)
Context::new()
}
impl<'a, T: ContextProvider> ContextProvider for &'a T {
fn get_var(&self, name: &str) -> Option<f64> {
(**self).get_var(name)
}
fn get_tensor(&self, name: &str) -> Option<Array<f64, IxDyn>> {
(**self).get_tensor(name)
}
fn get_var_cx(&self, name: &str) -> Option<Complex64> {
(**self).get_var_cx(name)
}
fn get_tensor_cx(&self, name: &str) -> Option<Array<Complex64, IxDyn>> {
(**self).get_tensor_cx(name)
}
fn eval_func(&self, name: &str, args: &[f64]) -> Result<f64, FuncEvalError> {
(**self).eval_func(name, args)
}
fn eval_func_cx(&self, name: &str, args: &[Complex64]) -> Result<Complex64, FuncEvalError> {
(**self).eval_func_cx(name, args)
}
fn eval_func_tensor(&self, name: &str, args: &[MyF]) -> Result<MyF, FuncEvalError> {
(**self).eval_func_tensor(name, args)
}
fn eval_func_tensor_cx(&self, name: &str, args: &[MyCx]) -> Result<MyCx, FuncEvalError> {
(**self).eval_func_tensor_cx(name, args)
}
fn matrix_inv(&self, arg: &Array2<f64>) -> Result<Array2<f64>, FuncEvalError> {
(**self).matrix_inv(arg)
}
fn matrix_inv_cx(&self, arg: &Array2<Complex64>) -> Result<Array2<Complex64>, FuncEvalError> {
(**self).matrix_inv_cx(arg)
}
}
impl<'a, T: ContextProvider> ContextProvider for &'a mut T {
fn get_var(&self, name: &str) -> Option<f64> {
(**self).get_var(name)
}
fn get_tensor(&self, name: &str) -> Option<Array<f64, IxDyn>> {
(**self).get_tensor(name)
}
fn get_var_cx(&self, name: &str) -> Option<Complex64> {
(**self).get_var_cx(name)
}
fn get_tensor_cx(&self, name: &str) -> Option<Array<Complex64, IxDyn>> {
(**self).get_tensor_cx(name)
}
fn eval_func(&self, name: &str, args: &[f64]) -> Result<f64, FuncEvalError> {
(**self).eval_func(name, args)
}
fn eval_func_cx(&self, name: &str, args: &[Complex64]) -> Result<Complex64, FuncEvalError> {
(**self).eval_func_cx(name, args)
}
fn eval_func_tensor(&self, name: &str, args: &[MyF]) -> Result<MyF, FuncEvalError> {
(**self).eval_func_tensor(name, args)
}
fn eval_func_tensor_cx(&self, name: &str, args: &[MyCx]) -> Result<MyCx, FuncEvalError> {
(**self).eval_func_tensor_cx(name, args)
}
fn matrix_inv(&self, arg: &Array2<f64>) -> Result<Array2<f64>, FuncEvalError> {
(**self).matrix_inv(arg)
}
fn matrix_inv_cx(&self, arg: &Array2<Complex64>) -> Result<Array2<Complex64>, FuncEvalError> {
(**self).matrix_inv_cx(arg)
}
}
impl<T: ContextProvider, S: ContextProvider> ContextProvider for (T, S) {
fn get_var(&self, name: &str) -> Option<f64> {
self.0.get_var(name).or_else(|| self.1.get_var(name))
}
fn get_tensor(&self, name: &str) -> Option<Array<f64, IxDyn>> {
self.0.get_tensor(name).or_else(|| self.1.get_tensor(name))
}
fn get_var_cx(&self, name: &str) -> Option<Complex64> {
self.0.get_var_cx(name).or_else(|| self.1.get_var_cx(name))
}
fn get_tensor_cx(&self, name: &str) -> Option<Array<Complex64, IxDyn>> {
self.0
.get_tensor_cx(name)
.or_else(|| self.1.get_tensor_cx(name))
}
fn eval_func(&self, name: &str, args: &[f64]) -> Result<f64, FuncEvalError> {
match self.0.eval_func(name, args) {
Err(FuncEvalError::UnknownFunction) => self.1.eval_func(name, args),
e => e,
}
}
fn eval_func_cx(&self, name: &str, args: &[Complex64]) -> Result<Complex64, FuncEvalError> {
match self.0.eval_func_cx(name, args) {
Err(FuncEvalError::UnknownFunction) => self.1.eval_func_cx(name, args),
e => e,
}
}
fn eval_func_tensor(&self, name: &str, args: &[MyF]) -> Result<MyF, FuncEvalError> {
match self.0.eval_func_tensor(name, args) {
Err(FuncEvalError::UnknownFunction) => self.1.eval_func_tensor(name, args),
e => e,
}
}
fn eval_func_tensor_cx(&self, name: &str, args: &[MyCx]) -> Result<MyCx, FuncEvalError> {
match self.0.eval_func_tensor_cx(name, args) {
Err(FuncEvalError::UnknownFunction) => self.1.eval_func_tensor_cx(name, args),
e => e,
}
}
fn matrix_inv(&self, v: &Array2<f64>) -> Result<Array2<f64>, FuncEvalError> {
match self.0.matrix_inv(v) {
Err(FuncEvalError::UnknownFunction) => self.1.matrix_inv(v),
e => e,
}
}
fn matrix_inv_cx(&self, v: &Array2<Complex64>) -> Result<Array2<Complex64>, FuncEvalError> {
match self.0.matrix_inv_cx(v) {
Err(FuncEvalError::UnknownFunction) => self.1.matrix_inv_cx(v),
e => e,
}
}
}
impl<S: AsRef<str>> ContextProvider for (S, f64) {
fn get_var(&self, name: &str) -> Option<f64> {
if self.0.as_ref() == name {
Some(self.1)
} else {
None
}
}
}
/// `std::collections::HashMap` of variables.
impl<S> ContextProvider for std::collections::HashMap<S, f64>
where
S: std::hash::Hash + Eq + std::borrow::Borrow<str>,
{
fn get_var(&self, name: &str) -> Option<f64> {
self.get(name).cloned()
}
fn get_var_cx(&self, name: &str) -> Option<Complex64> {
self.get(name).map(|f| Complex::new(*f, 0.))
}
}
/// `std::collections::HashMap` of variables.
impl<S> ContextProvider for std::collections::HashMap<S, Complex64>
where
S: std::hash::Hash + Eq + std::borrow::Borrow<str>,
{
fn get_var_cx(&self, name: &str) -> Option<Complex64> {
self.get(name).cloned()
}
}
impl<S> ContextProvider for std::collections::HashMap<S, MyF>
where
S: std::hash::Hash + Eq + std::borrow::Borrow<str>,
{
fn get_var(&self, name: &str) -> Option<f64> {
if let Some(MyF::F64(f)) = self.get(name) {
Some(*f)
} else {
None
}
}
fn get_var_cx(&self, name: &str) -> Option<Complex64> {
if let Some(MyF::F64(f)) = self.get(name) {
Some(Complex64::new(*f, 0.))
} else {
None
}
}
fn get_tensor(&self, name: &str) -> Option<Array<f64, IxDyn>> {
if let Some(MyF::Tensor(v)) = self.get(name) {
Some(v.clone())
} else {
None
}
}
}
impl<S> ContextProvider for std::collections::HashMap<S, MyCx>
where
S: std::hash::Hash + Eq + std::borrow::Borrow<str>,
{
fn get_var_cx(&self, name: &str) -> Option<Complex64> {
if let Some(MyCx::F64(f)) = self.get(name) {
Some(*f)
} else {
None
}
}
fn get_tensor_cx(&self, name: &str) -> Option<Array<Complex64, IxDyn>> {
if let Some(MyCx::Tensor(v)) = self.get(name) {
Some(v.clone())
} else {
None
}
}
}
/// `std::collections::HashMap` of variables.
impl<S> ContextProvider for std::collections::HashMap<S, Array<f64, IxDyn>>
where
S: std::hash::Hash + Eq + std::borrow::Borrow<str>,
{
fn get_tensor(&self, name: &str) -> Option<Array<f64, IxDyn>> {
self.get(name).cloned()
}
}
/// `std::collections::BTreeMap` of variables.
impl<S> ContextProvider for std::collections::BTreeMap<S, f64>
where
S: Ord + std::borrow::Borrow<str>,
{
fn get_var(&self, name: &str) -> Option<f64> {
self.get(name).cloned()
}
fn get_var_cx(&self, name: &str) -> Option<Complex64> {
self.get(name).map(|f| Complex::new(*f, 0.))
}
}
impl<S> ContextProvider for std::collections::BTreeMap<S, Complex64>
where
S: Ord + std::borrow::Borrow<str>,
{
fn get_var_cx(&self, name: &str) -> Option<Complex64> {
self.get(name).cloned()
}
}
impl<S> ContextProvider for std::collections::BTreeMap<S, Array<f64, IxDyn>>
where
S: Ord + std::borrow::Borrow<str>,
{
fn get_tensor(&self, name: &str) -> Option<Array<f64, IxDyn>> {
self.get(name).cloned()
}
}
impl<S> ContextProvider for std::collections::BTreeMap<S, MyF>
where
S: Ord + std::borrow::Borrow<str>,
{
fn get_var(&self, name: &str) -> Option<f64> {
if let Some(MyF::F64(f)) = self.get(name) {
Some(*f)
} else {
None
}
}
fn get_var_cx(&self, name: &str) -> Option<Complex64> {
if let Some(MyF::F64(f)) = self.get(name) {
Some(Complex64::new(*f, 0.))
} else {
None
}
}
fn get_tensor(&self, name: &str) -> Option<Array<f64, IxDyn>> {
if let Some(MyF::Tensor(v)) = self.get(name) {
Some(v.clone())
} else {
None
}
}
}
impl<S> ContextProvider for std::collections::BTreeMap<S, MyCx>
where
S: Ord + std::borrow::Borrow<str>,
{
fn get_var_cx(&self, name: &str) -> Option<Complex64> {
if let Some(MyCx::F64(f)) = self.get(name) {
Some(*f)
} else {
None
}
}
fn get_tensor_cx(&self, name: &str) -> Option<Array<Complex64, IxDyn>> {
if let Some(MyCx::Tensor(v)) = self.get(name) {
Some(v.clone())
} else {
None
}
}
}
impl<S: AsRef<str>> ContextProvider for Vec<(S, f64)> {
fn get_var(&self, name: &str) -> Option<f64> {
for &(ref n, v) in self.iter() {
if n.as_ref() == name {
return Some(v);
}
}
None
}
}
// macro for implementing ContextProvider for arrays
macro_rules! array_impls {
($($N:expr)+) => {
$(
impl<S: AsRef<str>> ContextProvider for [(S, f64); $N] {
fn get_var(&self, name: &str) -> Option<f64> {
for &(ref n, v) in self.iter() {
if n.as_ref() == name {
return Some(v);
}
}
None
}
}
)+
}
}
array_impls! {
0 1 2 3 4 5 6 7 8
}
/// A structure for storing variables/constants and functions to be used in an expression.
///
/// # Example
///
/// use {eval_str_with_context, Context};
///
/// let mut ctx = Context::new(); // builtins
/// ctx.var("x", 3.)
/// .func("f", |x| 2. * x)
/// .funcn("sum", |xs| xs.iter().sum(), ..);
///
/// assert_eq!(eval_str_with_context("pi + sum(1., 2.) + f(x)", &ctx),
/// Ok(std::f64::consts::PI + 1. + 2. + 2. * 3.));
/// ```
#[derive(Clone)]
pub struct Context<'a> {
vars: ContextHashMap<String, f64>,
funcs: ContextHashMap<String, GuardedFunc<'a>>,
// tensors: ContextHashMap<String, Tensor<'a, f32>>,
}
impl<'a> Context<'a> {
/// Creates a context with built-in constants and functions.
pub fn new() -> Context<'a> {
thread_local!(static DEFAULT_CONTEXT: Context<'static> = {
let mut ctx = Context::empty();
ctx.var("pi", consts::PI);
ctx.var("PI", consts::PI);
ctx.var("e", consts::E);
ctx.func1("sqrt", f64::sqrt);
ctx.func1("exp", f64::exp);
ctx.func1("ln", f64::ln);
ctx.func1("log10", f64::log10);
ctx.func1("abs", f64::abs);
ctx.func1("sin", f64::sin);
ctx.func1("cos", f64::cos);
ctx.func1("tan", f64::tan);
ctx.func1("asin", f64::asin);
ctx.func1("acos", f64::acos);
ctx.func1("atan", f64::atan);
ctx.func1("sinh", f64::sinh);
ctx.func1("cosh", f64::cosh);
ctx.func1("tanh", f64::tanh);
ctx.func1("asinh", f64::asinh);
ctx.func1("acosh", f64::acosh);
ctx.func1("atanh", f64::atanh);
ctx.func1("floor", f64::floor);
ctx.func1("ceil", f64::ceil);
ctx.func1("round", f64::round);
ctx.func1("signum", f64::signum);
ctx.func2("atan2", f64::atan2);
ctx.funcn("max", max_array, 1..);
ctx.funcn("min", min_array, 1..);
ctx
});
DEFAULT_CONTEXT.with(|ctx| ctx.clone())
}
/// Creates an empty contexts.
pub fn empty() -> Context<'a> {
Context {
vars: ContextHashMap::default(),
funcs: ContextHashMap::default(),
// tensors: ContextHashMap::default(),
}
}
/// Adds a new variable/constant.
pub fn var<S: Into<String>>(&mut self, var: S, value: f64) -> &mut Self {
self.vars.insert(var.into(), value);
self
}
/// Adds a new function of one argument.
pub fn func0<S, F>(&mut self, name: S, func: F) -> &mut Self
where
S: Into<String>,
F: Fn() -> f64 + 'a,
{
self.funcs.insert(name.into(), Rc::new(move |_| Ok(func())));
self
}
/// Adds a new function of one argument.
pub fn func1<S, F>(&mut self, name: S, func: F) -> &mut Self
where
S: Into<String>,
F: Fn(f64) -> f64 + 'a,
{
self.funcs.insert(
name.into(),
Rc::new(move |args: &[f64]| {
if args.len() == 1 {
Ok(func(args[0]))
} else {
Err(FuncEvalError::NumberArgs(1))
}
}),
);
self
}
// pub fn tensor<S: Into<String>>(&mut self, var: S, tensor: Tensor<'a, f32>) -> &mut Self
// {
// self.tensors.insert(var.into(), tensor);
// self
// }
//
/// Adds a new function of two arguments.
pub fn func2<S, F>(&mut self, name: S, func: F) -> &mut Self
where
S: Into<String>,
F: Fn(f64, f64) -> f64 + 'a,
{
self.funcs.insert(
name.into(),
Rc::new(move |args: &[f64]| {
if args.len() == 2 {
Ok(func(args[0], args[1]))
} else {
Err(FuncEvalError::NumberArgs(2))
}
}),
);
self
}
/// Adds a new function of three arguments.
pub fn func3<S, F>(&mut self, name: S, func: F) -> &mut Self
where
S: Into<String>,
F: Fn(f64, f64, f64) -> f64 + 'a,
{
self.funcs.insert(
name.into(),
Rc::new(move |args: &[f64]| {
if args.len() == 3 {
Ok(func(args[0], args[1], args[2]))
} else {
Err(FuncEvalError::NumberArgs(3))
}
}),
);
self
}
/// Adds a new function of a variable number of arguments.
///
/// `n_args` specifies the allowed number of variables by giving an exact number `n` or a range
/// `n..m`, `..`, `n..`, `..m`. The range is half-open, exclusive on the right, as is common in
/// Rust standard library.
///
/// # Example
///
/// let mut ctx = Context::empty();
///
/// // require exactly 2 arguments
/// ctx.funcn("sum_two", |xs| xs[0] + xs[1], 2);
///
/// // allow an arbitrary number of arguments
/// ctx.funcn("sum", |xs| xs.iter().sum(), ..);
/// ```
pub fn funcn<S, F, N>(&mut self, name: S, func: F, n_args: N) -> &mut Self
where
S: Into<String>,
F: Fn(&[f64]) -> f64 + 'a,
N: ArgGuard,
{
self.funcs.insert(name.into(), n_args.to_arg_guard(func));
self
}
}
impl<'a> Default for Context<'a> {
fn default() -> Self {
Context::new()
}
}
type GuardedFunc<'a> = Rc<dyn Fn(&[f64]) -> Result<f64, FuncEvalError> + 'a>;
/// Trait for types that can specify the number of required arguments for a function with a
/// variable number of arguments.
///
/// # Example
///
/// let mut ctx = Context::empty();
///
/// // require exactly 2 arguments
/// ctx.funcn("sum_two", |xs| xs[0] + xs[1], 2);
///
/// // allow an arbitrary number of arguments
/// ctx.funcn("sum", |xs| xs.iter().sum(), ..);
/// ```
pub trait ArgGuard {
fn to_arg_guard<'a, F: Fn(&[f64]) -> f64 + 'a>(self, func: F) -> GuardedFunc<'a>;
}
impl ArgGuard for usize {
fn to_arg_guard<'a, F: Fn(&[f64]) -> f64 + 'a>(self, func: F) -> GuardedFunc<'a> {
Rc::new(move |args: &[f64]| {
if args.len() == self {
Ok(func(args))
} else {
Err(FuncEvalError::NumberArgs(1))
}
})
}
}
impl ArgGuard for std::ops::RangeFrom<usize> {
fn to_arg_guard<'a, F: Fn(&[f64]) -> f64 + 'a>(self, func: F) -> GuardedFunc<'a> {
Rc::new(move |args: &[f64]| {
if args.len() >= self.start {
Ok(func(args))
} else {
Err(FuncEvalError::TooFewArguments)
}
})
}
}
impl ArgGuard for std::ops::RangeTo<usize> {
fn to_arg_guard<'a, F: Fn(&[f64]) -> f64 + 'a>(self, func: F) -> GuardedFunc<'a> {
Rc::new(move |args: &[f64]| {
if args.len() < self.end {
Ok(func(args))
} else {
Err(FuncEvalError::TooManyArguments)
}
})
}
}
impl ArgGuard for std::ops::Range<usize> {
fn to_arg_guard<'a, F: Fn(&[f64]) -> f64 + 'a>(self, func: F) -> GuardedFunc<'a> {
Rc::new(move |args: &[f64]| {
if args.len() >= self.start && args.len() < self.end {
Ok(func(args))
} else if args.len() < self.start {
Err(FuncEvalError::TooFewArguments)
} else {
Err(FuncEvalError::TooManyArguments)
}
})
}
}
impl ArgGuard for std::ops::RangeFull {
fn to_arg_guard<'a, F: Fn(&[f64]) -> f64 + 'a>(self, func: F) -> GuardedFunc<'a> {
Rc::new(move |args: &[f64]| Ok(func(args)))
}
}
impl<'a> ContextProvider for Context<'a> {
fn get_var(&self, name: &str) -> Option<f64> {
self.vars.get(name).cloned()
}
fn eval_func(&self, name: &str, args: &[f64]) -> Result<f64, FuncEvalError> {
self.funcs
.get(name)
.map_or(Err(FuncEvalError::UnknownFunction), |f| f(args))
}
}
\ No newline at end of file
use std::f64::consts::PI;
use std::rc::Rc;
use fnv::FnvHashMap;
use num_complex::Complex64;
use num_traits::identities::One;
use num_traits::Zero;
use crate::{Expr, FuncEvalError};
use crate::{ContextProvider, Error, factorial};
use crate::Operation;
use crate::Token::*;
impl Expr {
pub fn eval_complex(&self) -> Result<Complex64, Error> {
self.eval_complex_with_ctx(ContextCx::new())
}
pub fn eval_complex_with_ctx<C: ContextProvider>(&self, ctx: C) -> Result<Complex64, Error> {
let mut stack = Vec::with_capacity(16);
if self.rpn.is_empty() {
return Err(Error::EmptyExpression);
}
for token in &self.rpn {
match *token {
Var(ref n) => {
if let Some(v) = ctx.get_var(n) {
stack.push(Complex64::new(v, 0.));
} else if let Some(v) = ctx.get_var_cx(n) {
stack.push(v);
} else {
return Err(Error::UnknownVariable(n.clone()));
}
}
Number(f) => stack.push(Complex64::new(f, 0.)),
Binary(op) => {
let right = stack.pop().unwrap();
let left = stack.pop().unwrap();
let r = match op {
Operation::Plus => left + right,
Operation::Minus => left - right,
Operation::Times => left * right,
Operation::Div => left / right,
Operation::Rem => left % right,
Operation::Pow => left.powf(right.re),
// added by dsf, 2021.3
Operation::LessThan => {
if !left.im.is_zero() || !right.im.is_zero() {
return Err(Error::EvalError(format!(
"Wrong input of for op : {:?}",
op
)));
}
if left.re < right.re {
Complex64::one()
} else {
Complex64::zero()
}
}
Operation::GreatThan => {
if !left.im.is_zero() || !right.im.is_zero() {
return Err(Error::EvalError(format!(
"Wrong input of for op : {:?}",
op
)));
}
if left.re > right.re {
Complex64::one()
} else {
Complex64::zero()
}
}
Operation::LtOrEqual => {
if !left.im.is_zero() || !right.im.is_zero() {
return Err(Error::EvalError(format!(
"Wrong input of for op : {:?}",
op
)));
}
if left.re <= right.re {
Complex64::one()
} else {
Complex64::zero()
}
}
Operation::GtOrEqual => {
if !left.im.is_zero() || !right.im.is_zero() {
return Err(Error::EvalError(format!(
"Wrong input of for op : {:?}",
op
)));
}
if left.re >= right.re {
Complex64::one()
} else {
Complex64::zero()
}
}
Operation::Equal => {
if !left.im.is_zero() || !right.im.is_zero() {
return Err(Error::EvalError(format!(
"Wrong input of for op : {:?}",
op
)));
}
if left.re == right.re {
Complex64::one()
} else {
Complex64::zero()
}
}
Operation::Unequal => {
if left != right {
Complex64::one()
} else {
Complex64::zero()
}
}
Operation::And => {
if !left.im.is_zero() || !right.im.is_zero() {
return Err(Error::EvalError(format!(
"Wrong input of for op : {:?}",
op
)));
}
if (left.re > 0.0) && (right.re > 0.0) {
Complex64::one()
} else {
Complex64::zero()
}
}
Operation::Or => {
if !left.im.is_zero() || !right.im.is_zero() {
return Err(Error::EvalError(format!(
"Wrong input of for op : {:?}",
op
)));
}
if (left.re > 0.0) || (right.re > 0.0) {
Complex64::one()
} else {
Complex64::zero()
}
}
Operation::BitAnd => {
if !left.im.is_zero() || !right.im.is_zero() {
return Err(Error::EvalError(format!(
"Wrong input of for op : {:?}",
op
)));
}
Complex64::new((left.re as i64 & right.re as i64) as f64, 0.)
}
Operation::BitOr => {
if !left.im.is_zero() || !right.im.is_zero() {
return Err(Error::EvalError(format!(
"Wrong input of for op : {:?}",
op
)));
}
Complex64::new((left.re as i64 | right.re as i64) as f64, 0.)
}
Operation::BitXor => {
if left.im.is_zero() || right.im.is_zero() {
return Err(Error::EvalError(format!(
"Wrong input of for op : {:?}",
op
)));
}
Complex64::new((left.re as i64 ^ right.re as i64) as f64, 0.)
}
Operation::BitShl => {
if !left.im.is_zero() || !right.im.is_zero() {
return Err(Error::EvalError(format!(
"Wrong input of for op : {:?}",
op
)));
}
Complex64::new(((left.re as i64) << (right.re as i64)) as f64, 0.)
}
Operation::BitShr => {
if !left.im.is_zero() || !right.im.is_zero() {
return Err(Error::EvalError(format!(
"Wrong input of for op : {:?}",
op
)));
}
Complex64::new(((left.re as i64) >> (right.re as i64)) as f64, 0.)
}
Operation::BitAt => {
if !left.im.is_zero() || !right.im.is_zero() {
return Err(Error::EvalError(format!(
"Wrong input of for op : {:?}",
op
)));
}
if right.re < 1. || right.re > 64. {
return Err(Error::EvalError(format!(
"Operation \"@\" ERROR:the {:?} bit doesn't exist.",
right
)));
}
if (left.re as i64) & 2_i64.pow(right.re as u32 - 1) != 0 {
Complex64::one()
} else {
Complex64::zero()
}
}
_ => {
return Err(Error::EvalError(format!(
"Unimplemented binary operation: {:?}",
op
)));
}
};
stack.push(r);
}
Unary(op) => {
let x = stack.pop().unwrap();
let r = match op {
Operation::Plus => x,
Operation::Minus => -x,
Operation::Not => {
if x.re > 0. {
Complex64::one()
} else {
Complex64::zero()
}
}
Operation::BitNot => {
Complex64::new(!(x.re as i64) as f64, 0.)
}
Operation::Fact => {
// Check to make sure x has no fractional component (can be converted to int without loss)
match factorial(x.re) {
Ok(res) => Complex64::new(res, 0.),
Err(e) => return Err(Error::EvalError(String::from(e))),
}
}
_ => {
return Err(Error::EvalError(format!(
"Unimplemented unary operation: {:?}",
op
)));
}
};
stack.push(r);
}
Func(ref n, Some(i)) => {
if stack.len() < i {
return Err(Error::EvalError(format!(
"eval: stack does not have enough arguments for function token \
{:?}",
token
)));
}
match ctx.eval_func_cx(n, &stack[stack.len() - i..]) {
Ok(r) => {
let nl = stack.len() - i;
stack.truncate(nl);
stack.push(r);
}
Err(e) => return Err(Error::Function(n.to_owned(), e)),
}
}
Func(ref n, None) => match ctx.eval_func_cx(n, &[]) {
Ok(r) => {
stack.push(r);
}
Err(e) => return Err(Error::Function(n.to_owned(), e)),
},
_ => return Err(Error::EvalError(format!("Unrecognized token: {:?}", token))),
}
}
let r = stack.pop().expect("Stack is empty, this is impossible.");
if !stack.is_empty() {
return Err(Error::EvalError(format!(
"There are still {} items on the stack.",
stack.len()
)));
}
Ok(r)
}
}
#[doc(hidden)]
pub fn new_cx(r: Complex64, i: Complex64) -> Complex64 {
Complex64::new(r.re, i.re)
}
#[doc(hidden)]
pub fn new_cx_rad(r: Complex64, i: Complex64) -> Complex64 {
Complex64::new(r.re * i.re.cos(), r.re * i.re.sin())
}
#[doc(hidden)]
pub fn new_cx_angle(r: Complex64, i: Complex64) -> Complex64 {
let rad = PI * i.re / 180.;
Complex64::new(r.re * rad.cos(), r.re * rad.sin())
}
#[doc(hidden)]
pub fn abs(v: Complex64) -> Complex64 {
Complex64::new(v.norm(), 0.)
}
#[doc(hidden)]
pub fn floor(v: Complex64) -> Complex64 {
Complex64::new(v.re.floor(), 0.)
}
#[doc(hidden)]
pub fn ceil(v: Complex64) -> Complex64 {
Complex64::new(v.re.ceil(), 0.)
}
#[doc(hidden)]
pub fn round(v: Complex64) -> Complex64 {
Complex64::new(v.re.round(), 0.)
}
#[doc(hidden)]
pub fn signum(v: Complex64) -> Complex64 {
Complex64::new(v.re.signum(), 0.)
}
#[doc(hidden)]
pub fn conjugate(v: Complex64) -> Complex64 {
Complex64::new(v.re, -v.im)
}
#[doc(hidden)]
pub fn real(v: Complex64) -> Complex64 {
Complex64::new(v.re, 0.)
}
#[doc(hidden)]
pub fn imag(v: Complex64) -> Complex64 {
Complex64::new(0., v.im)
}
#[doc(hidden)]
pub fn radian(v: Complex64) -> Complex64 {
Complex64::new(v.im.atan2(v.re), 0.)
}
#[doc(hidden)]
pub fn atan2(v1: Complex64, v2: Complex64) -> Complex64 {
Complex64::new(v1.re.atan2(v2.re), 0.)
}
#[doc(hidden)]
pub fn max_array(xs: &[Complex64]) -> Complex64 {
xs.iter()
.fold(Complex64::new(f64::NEG_INFINITY, 0.), |m, &x| {
Complex64::new(m.re.max(x.re), 0.)
})
}
#[doc(hidden)]
pub fn min_array(xs: &[Complex64]) -> Complex64 {
xs.iter().fold(Complex64::new(f64::INFINITY, 0.), |m, &x| {
Complex64::new(m.re.min(x.re), 0.)
})
}
#[derive(Clone)]
pub struct ContextCx<'a> {
vars: FnvHashMap<String, f64>,
vars_cx: FnvHashMap<String, Complex64>,
funcs: FnvHashMap<String, GuardedFuncCx<'a>>,
// tensors: ContextHashMap<String, Tensor<'a, f32>>,
}
impl<'a> ContextCx<'a> {
/// Creates a context with built-in constants and functions.
pub fn new() -> ContextCx<'a> {
thread_local!(static DEFAULT_CONTEXT: ContextCx<'static> = {
let mut ctx = ContextCx::empty();
ctx.var("pi", PI);
ctx.var("PI", PI);
ctx.var("e", std::f64::consts::E);
ctx.func1("abs", abs);
ctx.func1("sqrt", Complex64::sqrt);
ctx.func1("exp", Complex64::exp);
ctx.func1("ln", Complex64::ln);
ctx.func1("log10", Complex64::log10);
ctx.func1("sin", Complex64::sin);
ctx.func1("cos", Complex64::cos);
ctx.func1("tan", Complex64::tan);
ctx.func1("asin", Complex64::asin);
ctx.func1("acos", Complex64::acos);
ctx.func1("atan", Complex64::atan);
ctx.func1("sinh", Complex64::sinh);
ctx.func1("cosh", Complex64::cosh);
ctx.func1("tanh", Complex64::tanh);
ctx.func1("asinh", Complex64::asinh);
ctx.func1("acosh", Complex64::acosh);
ctx.func1("atanh", Complex64::atanh);
ctx.func1("floor", floor);
ctx.func1("ceil", ceil);
ctx.func1("round", round);
ctx.func1("signum", signum);
ctx.func1("conj", conjugate);
ctx.func1("real", real);
ctx.func1("imag", imag);
ctx.func1("rad", radian);
ctx.func2("atan2", atan2);
// 建立复数的函数
ctx.func2("c", new_cx);
// 用弧度建立复数
ctx.func2("c1", new_cx_rad);
// 用角度建立复数
ctx.func2("c2", new_cx_angle);
ctx.funcn("max", max_array, 1..);
ctx.funcn("min", min_array, 1..);
ctx
});
DEFAULT_CONTEXT.with(|ctx| ctx.clone())
}
/// Creates an empty contexts.
pub fn empty() -> ContextCx<'a> {
ContextCx {
vars: FnvHashMap::default(),
vars_cx: Default::default(),
funcs: FnvHashMap::default(),
}
}
/// Adds a new variable/constant.
pub fn var<S: Into<String>>(&mut self, var: S, value: f64) -> &mut Self {
self.vars.insert(var.into(), value);
self
}
pub fn var_cx<S: Into<String>>(&mut self, var: S, value: Complex64) -> &mut Self {
self.vars_cx.insert(var.into(), value);
self
}
/// Adds a new function of one argument.
pub fn func0<S, F>(&mut self, name: S, func: F) -> &mut Self
where
S: Into<String>,
F: Fn() -> Complex64 + 'a,
{
self.funcs.insert(name.into(), Rc::new(move |_| Ok(func())));
self
}
/// Adds a new function of one argument.
pub fn func1<S, F>(&mut self, name: S, func: F) -> &mut Self
where
S: Into<String>,
F: Fn(Complex64) -> Complex64 + 'a,
{
self.funcs.insert(
name.into(),
Rc::new(move |args: &[Complex64]| {
if args.len() == 1 {
Ok(func(args[0]))
} else {
Err(FuncEvalError::NumberArgs(1))
}
}),
);
self
}
pub fn func2<S, F>(&mut self, name: S, func: F) -> &mut Self
where
S: Into<String>,
F: Fn(Complex64, Complex64) -> Complex64 + 'a,
{
self.funcs.insert(
name.into(),
Rc::new(move |args: &[Complex64]| {
if args.len() == 2 {
Ok(func(args[0], args[1]))
} else {
Err(FuncEvalError::NumberArgs(2))
}
}),
);
self
}
/// Adds a new function of three arguments.
pub fn func3<S, F>(&mut self, name: S, func: F) -> &mut Self
where
S: Into<String>,
F: Fn(Complex64, Complex64, Complex64) -> Complex64 + 'a,
{
self.funcs.insert(
name.into(),
Rc::new(move |args: &[Complex64]| {
if args.len() == 3 {
Ok(func(args[0], args[1], args[2]))
} else {
Err(FuncEvalError::NumberArgs(3))
}
}),
);
self
}
pub fn funcn<S, F, N>(&mut self, name: S, func: F, n_args: N) -> &mut Self
where
S: Into<String>,
F: Fn(&[Complex64]) -> Complex64 + 'a,
N: ArgGuardCx,
{
self.funcs.insert(name.into(), n_args.to_arg_guard(func));
self
}
}
impl<'a> Default for ContextCx<'a> {
fn default() -> Self {
ContextCx::new()
}
}
type GuardedFuncCx<'a> = Rc<dyn Fn(&[Complex64]) -> Result<Complex64, FuncEvalError> + 'a>;
pub trait ArgGuardCx {
fn to_arg_guard<'a, F: Fn(&[Complex64]) -> Complex64 + 'a>(self, func: F) -> GuardedFuncCx<'a>;
}
impl ArgGuardCx for usize {
fn to_arg_guard<'a, F: Fn(&[Complex64]) -> Complex64 + 'a>(self, func: F) -> GuardedFuncCx<'a> {
Rc::new(move |args: &[Complex64]| {
if args.len() == self {
Ok(func(args))
} else {
Err(FuncEvalError::NumberArgs(1))
}
})
}
}
impl ArgGuardCx for std::ops::RangeFrom<usize> {
fn to_arg_guard<'a, F: Fn(&[Complex64]) -> Complex64 + 'a>(self, func: F) -> GuardedFuncCx<'a> {
Rc::new(move |args: &[Complex64]| {
if args.len() >= self.start {
Ok(func(args))
} else {
Err(FuncEvalError::TooFewArguments)
}
})
}
}
impl ArgGuardCx for std::ops::RangeTo<usize> {
fn to_arg_guard<'a, F: Fn(&[Complex64]) -> Complex64 + 'a>(self, func: F) -> GuardedFuncCx<'a> {
Rc::new(move |args: &[Complex64]| {
if args.len() < self.end {
Ok(func(args))
} else {
Err(FuncEvalError::TooManyArguments)
}
})
}
}
impl ArgGuardCx for std::ops::Range<usize> {
fn to_arg_guard<'a, F: Fn(&[Complex64]) -> Complex64 + 'a>(self, func: F) -> GuardedFuncCx<'a> {
Rc::new(move |args: &[Complex64]| {
if args.len() >= self.start && args.len() < self.end {
Ok(func(args))
} else if args.len() < self.start {
Err(FuncEvalError::TooFewArguments)
} else {
Err(FuncEvalError::TooManyArguments)
}
})
}
}
impl ArgGuardCx for std::ops::RangeFull {
fn to_arg_guard<'a, F: Fn(&[Complex64]) -> Complex64 + 'a>(self, func: F) -> GuardedFuncCx<'a> {
Rc::new(move |args: &[Complex64]| Ok(func(args)))
}
}
impl<'a> ContextProvider for ContextCx<'a> {
fn get_var(&self, name: &str) -> Option<f64> {
self.vars.get(name).cloned()
}
fn get_var_cx(&self, name: &str) -> Option<Complex64> {
self.vars_cx.get(name).cloned()
}
fn eval_func_cx(&self, name: &str, args: &[Complex64]) -> Result<Complex64, FuncEvalError> {
self.funcs
.get(name)
.map_or(Err(FuncEvalError::UnknownFunction), |f| f(args))
}
}
\ No newline at end of file
use std::f64::consts::PI;
use fnv::FnvHashMap;
use ndarray::{Array, Ix1, Ix2, IxDyn};
use num_complex::Complex64;
use crate::{CtxMaps, Expr, Operation, Token::*};
use crate::{ContextProvider, Error, factorial, FuncEvalError, MyCx, MyF};
use crate::expr::Context;
use crate::expr_complex::ContextCx;
use crate::tsfn_basic::*;
thread_local!(static DEFAULT_CONTEXT: Context<'static> = Context::new());
thread_local!(pub static DEFAULT_CONTEXT_TENSOR: ContextTensor<'static> = ContextTensor::new());
impl ContextProvider for CtxMaps {
fn get_var(&self, name: &str) -> Option<f64> {
self.var_values.get(name).cloned()
}
fn get_var_cx(&self, name: &str) -> Option<Complex64> {
self.var_values_cx.get(name).cloned()
}
fn get_tensor(&self, name: &str) -> Option<Array<f64, IxDyn>> {
self.var_values_tensor.get(name).cloned()
}
fn get_tensor_cx(&self, name: &str) -> Option<Array<Complex64, IxDyn>> {
self.var_values_tensor_cx.get(name).cloned()
}
fn eval_func(&self, name: &str, args: &[f64]) -> Result<f64, FuncEvalError> {
DEFAULT_CONTEXT_TENSOR.with(|ctx| ctx.eval_func(name, args))
}
fn eval_func_cx(&self, name: &str, args: &[Complex64]) -> Result<Complex64, FuncEvalError> {
DEFAULT_CONTEXT_TENSOR.with(|ctx| ctx.eval_func_cx(name, args))
}
fn eval_func_tensor(&self, name: &str, args: &[MyF]) -> Result<MyF, FuncEvalError> {
DEFAULT_CONTEXT_TENSOR.with(|ctx| ctx.eval_func_tensor(name, args))
}
fn eval_func_tensor_cx(&self, name: &str, args: &[MyCx]) -> Result<MyCx, FuncEvalError> {
DEFAULT_CONTEXT_TENSOR.with(|ctx| ctx.eval_func_tensor_cx(name, args))
}
}
impl Expr {
pub fn eval_tensor(&self) -> Result<MyF, Error> {
self.eval_tensor_with_ctx(ContextTensor::new())
}
pub fn eval_tensor_cx(&self) -> Result<MyCx, Error> {
self.eval_tensor_with_ctx_cx(ContextTensor::new())
}
pub fn eval_tensor_with_ctx<C: ContextProvider>(&self, ctx: C) -> Result<MyF, Error> {
let mut stack = Vec::with_capacity(16);
if self.rpn.is_empty() {
return Err(Error::EmptyExpression);
}
// 将后缀表达式转换成tensor
for token in &self.rpn {
match token {
Var(n) => {
if let Some(f) = ctx.get_var(n) {
stack.push(MyF::F64(f));
} else if let Some(t) = ctx.get_tensor(n) {
stack.push(MyF::Tensor(t));
} else {
return Err(Error::UnknownVariable(n.clone()));
}
}
Number(f) => {
stack.push(MyF::F64(*f));
}
Tensor(size) => {
if size.is_none() {
return Err(Error::EvalError(format!(
"Tensor size is none: {:?}",
token
)));
}
let size = size.unwrap();
if stack.len() < size {
return Err(Error::EvalError(format!(
"eval: stack does not have enough arguments for function token {:?}",
token
)));
}
let mut floats = Vec::new();
let mut is_array = false;
let mut shape = match stack.last().unwrap() {
MyF::F64(_) => {
vec![]
}
MyF::Tensor(t) => {
is_array = true;
t.shape().to_vec()
}
};
for i in 0..size {
match &stack[stack.len() - size + i] {
MyF::F64(f) => {
if is_array {
return Err(Error::EvalError(format!(
"Not consistent type for tensor token : {:?}",
token
)));
}
floats.push(*f);
}
MyF::Tensor(t) => {
floats.extend(t.as_slice().unwrap());
}
}
}
let nl = stack.len() - size;
stack.truncate(nl);
shape.insert(0, size);
let array = Array::from_shape_vec(shape, floats).unwrap();
stack.push(MyF::Tensor(array.into_dyn()));
}
Binary(op) => {
let right = stack.pop().unwrap();
let left = stack.pop().unwrap();
let r = match op {
Operation::Plus => match left {
MyF::F64(f1) => match right {
MyF::F64(f2) => MyF::F64(f1 + f2),
MyF::Tensor(t) => MyF::Tensor(f1 + t),
},
MyF::Tensor(t1) => match right {
MyF::F64(f) => MyF::Tensor(t1 + f),
MyF::Tensor(t2) => MyF::Tensor(t1 + t2),
},
},
Operation::Minus => match left {
MyF::F64(f1) => match right {
MyF::F64(f2) => MyF::F64(f1 - f2),
MyF::Tensor(t2) => MyF::Tensor(f1 - t2),
},
MyF::Tensor(t1) => match right {
MyF::F64(f2) => MyF::Tensor(t1 - f2),
MyF::Tensor(t2) => MyF::Tensor(t1 - t2),
},
},
Operation::Times => match left {
MyF::F64(f1) => match right {
MyF::F64(f2) => MyF::F64(f1 * f2),
MyF::Tensor(t) => MyF::Tensor(f1 * t),
},
MyF::Tensor(t1) => match right {
MyF::F64(f) => MyF::Tensor(t1 * f),
MyF::Tensor(t2) => match t1.shape().len() {
1 => {
let a = t1.into_dimensionality::<Ix1>().unwrap();
if t2.shape().len() == 2 && a.shape()[0] == t2.shape()[0] {
let b = t2.into_dimensionality::<Ix2>().unwrap();
MyF::Tensor(a.dot(&b).into_dyn())
} else {
MyF::Tensor(a * t2)
}
}
2 => {
let a = t1.into_dimensionality::<Ix2>().unwrap();
if t2.shape().len() == 1 && a.shape()[1] == t2.shape()[0] {
let b = t2.into_dimensionality::<Ix1>().unwrap();
MyF::Tensor(a.dot(&b).into_dyn())
} else if t2.shape().len() == 2
&& a.shape()[1] == t2.shape()[0]
{
let b = t2.into_dimensionality::<Ix2>().unwrap();
MyF::Tensor(a.dot(&b).into_dyn())
} else {
MyF::Tensor(a * t2)
}
}
_ => MyF::Tensor(t1 * t2),
},
},
},
Operation::Div => match left {
MyF::F64(f1) => match right {
MyF::F64(f2) => MyF::F64(f1 / f2),
MyF::Tensor(t2) => MyF::Tensor(f1 / t2),
},
MyF::Tensor(t1) => match right {
MyF::F64(f2) => MyF::Tensor(t1 / f2),
MyF::Tensor(t2) => MyF::Tensor(t1 / t2),
},
},
Operation::Rem => match left {
MyF::F64(f1) => match right {
MyF::F64(f2) => MyF::F64(f1 % f2),
MyF::Tensor(t2) => MyF::Tensor(f1 % t2),
},
MyF::Tensor(t1) => match right {
MyF::F64(f2) => MyF::Tensor(t1 % f2),
MyF::Tensor(t2) => MyF::Tensor(t1 % t2),
},
},
Operation::Pow => match left {
MyF::F64(f1) => match right {
MyF::F64(f2) => MyF::F64(f1.powf(f2)),
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
MyF::Tensor(t1) => {
match right {
MyF::F64(f) => {
let shape = t1.shape();
if f == -1. && shape.len() == 2 && shape[0] == shape[1] {
let t = ctx
.matrix_inv(
&t1.into_dimensionality::<Ix2>().unwrap(),
)
.map_err(|e| {
Error::Function("pow".to_string(), e)
})?;
// let t = t1.into_dimensionality::<Ix2>().unwrap().inv()
// .map_err(|_| Error::Function("pow".to_string(), FuncEvalError::NumberArgs(0)))?;
MyF::Tensor(t.into_dyn())
} else {
MyF::Tensor(t1.mapv(|a| a.powf(f)))
}
}
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
}
}
},
Operation::LessThan => match left {
MyF::F64(f1) => match right {
MyF::F64(f2) => MyF::F64(if f1 < f2 { 1.0 } else { 0.0 }),
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
Operation::GreatThan => match left {
MyF::F64(f1) => match right {
MyF::F64(f2) => MyF::F64(if f1 > f2 { 1.0 } else { 0.0 }),
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
Operation::LtOrEqual => match left {
MyF::F64(f1) => match right {
MyF::F64(f2) => MyF::F64(if f1 <= f2 { 1.0 } else { 0.0 }),
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
Operation::GtOrEqual => match left {
MyF::F64(f1) => match right {
MyF::F64(f2) => MyF::F64(if f1 >= f2 { 1.0 } else { 0.0 }),
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
Operation::Equal => match left {
MyF::F64(f1) => match right {
MyF::F64(f2) => MyF::F64(if f1 == f2 { 1.0 } else { 0.0 }),
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
Operation::Unequal => match left {
MyF::F64(f1) => match right {
MyF::F64(f2) => MyF::F64(if f1 != f2 { 1.0 } else { 0.0 }),
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
Operation::And => match left {
MyF::F64(f1) => match right {
MyF::F64(f2) => {
MyF::F64(if (f1 > 0.0) && (f2 > 0.0) { 1.0 } else { 0.0 })
}
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
Operation::Or => match left {
MyF::F64(f1) => match right {
MyF::F64(f2) => {
MyF::F64(if (f1 > 0.0) || (f2 > 0.0) { 1.0 } else { 0.0 })
}
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
Operation::BitAnd => match left {
MyF::F64(f1) => match right {
MyF::F64(f2) => MyF::F64((f1 as i64 & f2 as i64) as f64),
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
Operation::BitOr => match left {
MyF::F64(f1) => match right {
MyF::F64(f2) => MyF::F64((f1 as i64 | f2 as i64) as f64),
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
Operation::BitXor => match left {
MyF::F64(f1) => match right {
MyF::F64(f2) => MyF::F64((f1 as i64 ^ f2 as i64) as f64),
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
Operation::BitShl => match left {
MyF::F64(f1) => match right {
MyF::F64(f2) => MyF::F64(((f1 as i64) << (f2 as i64)) as f64),
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
Operation::BitShr => match left {
MyF::F64(f1) => match right {
MyF::F64(f2) => MyF::F64(((f1 as i64) >> (f2 as i64)) as f64),
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
Operation::BitAt => match left {
MyF::F64(f1) => match right {
MyF::F64(f2) => {
if f1 < 1. || f2 > 64. {
return Err(Error::EvalError(format!(
"Operation \"@\" ERROR:the {:?} bit doesn't exist.",
right
)));
}
MyF::F64(if (f1 as i64) & 2_i64.pow(f2 as u32 - 1) != 0 {
1.0
} else {
0.0
})
}
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
_ => {
return Err(Error::EvalError(format!("TypeUnsupported : {:?}", token)));
}
};
stack.push(r);
}
Unary(op) => {
let x = stack.pop().unwrap();
let r = match op {
Operation::Plus => x,
Operation::Minus => match x {
MyF::F64(f) => MyF::F64(-f),
MyF::Tensor(t) => MyF::Tensor(-t),
},
Operation::Not => match x {
MyF::F64(f) => MyF::F64(if f > 0. {1.0} else {0.}),
MyF::Tensor(t) => MyF::Tensor(t.mapv_into(|f| if f > 0. {1.0} else {0.} )),
}
Operation::BitNot => match x {
MyF::F64(f) => MyF::F64(!(f as i64) as f64),
MyF::Tensor(t) => MyF::Tensor(t.mapv_into(|f| !(f as i64) as f64)),
},
Operation::Fact => match x {
MyF::F64(f) => match factorial(f) {
Ok(res) => MyF::F64(res),
Err(e) => return Err(Error::EvalError(String::from(e))),
},
_ => {
return Err(Error::EvalError(format!(
"Unimplemented unary operation: {:?}, {:?}",
op, token
)));
}
},
_ => {
return Err(Error::EvalError(format!(
"Unimplemented unary operation: {:?}",
op
)));
}
};
stack.push(r);
}
Func(n, Some(i)) => {
if stack.len() < *i {
let msg = format!(
"stack does not have enough arguments for function token {:?}",
token
);
return Err(Error::EvalError(msg));
}
match ctx.eval_func_tensor(n, &stack[stack.len() - i..]) {
Ok(r) => {
let nl = stack.len() - i;
stack.truncate(nl);
stack.push(r);
}
Err(e) => return Err(Error::Function(n.to_owned(), e)),
}
}
Func(ref n, None) => match ctx.eval_func_tensor(n, &[]) {
Ok(r) => {
stack.push(r);
}
Err(e) => return Err(Error::Function(n.to_owned(), e)),
},
_ => {
return Err(Error::EvalError(format!("TypeUnsupported : {:?}", token)));
}
}
}
let r = stack.pop().expect("Stack is empty, this is impossible.");
if !stack.is_empty() {
return Err(Error::EvalError(format!(
"There are still {} items on the stack.",
stack.len()
)));
}
Ok(r)
}
pub fn eval_tensor_with_ctx_cx<C: ContextProvider>(&self, ctx: C) -> Result<MyCx, Error> {
let mut stack = Vec::with_capacity(16);
if self.rpn.is_empty() {
return Err(Error::EmptyExpression);
}
// 将后缀表达式转换成tensor
for token in &self.rpn {
match token {
Var(n) => {
if let Some(f) = ctx.get_var(n) {
stack.push(MyCx::F64(Complex64::new(f, 0.)));
} else if let Some(c) = ctx.get_var_cx(n) {
stack.push(MyCx::F64(c));
} else if let Some(t) = ctx.get_tensor(n) {
let a = t.mapv(|f| Complex64::new(f, 0.));
stack.push(MyCx::Tensor(a));
} else if let Some(t) = ctx.get_tensor_cx(n) {
stack.push(MyCx::Tensor(t));
} else {
return Err(Error::UnknownVariable(n.clone()));
}
}
Number(f) => stack.push(MyCx::F64(Complex64::new(*f, 0.))),
Tensor(size) => {
if size.is_none() {
return Err(Error::EvalError(format!(
"Tensor size is none: {:?}",
token
)));
}
let size = size.unwrap();
if stack.len() < size {
return Err(Error::EvalError(format!(
"eval: stack does not have enough arguments for function token {:?}",
token
)));
}
let mut floats = Vec::new();
let mut is_array = false;
let mut shape = match stack.last().unwrap() {
MyCx::F64(_) => {
vec![]
}
MyCx::Tensor(t) => {
is_array = true;
t.shape().to_vec()
}
};
for i in 0..size {
match &stack[stack.len() - size + i] {
MyCx::F64(f) => {
if is_array {
return Err(Error::EvalError(format!(
"Not consistent type for tensor token : {:?}",
token
)));
}
floats.push(*f);
}
MyCx::Tensor(t) => floats.extend(t.as_slice().unwrap()),
}
}
let nl = stack.len() - size;
stack.truncate(nl);
shape.insert(0, size);
let array = Array::from_shape_vec(shape, floats).unwrap();
stack.push(MyCx::Tensor(array.into_dyn()));
}
Binary(op) => {
let right = stack.pop().unwrap();
let left = stack.pop().unwrap();
let r = match op {
Operation::Plus => match left {
MyCx::F64(f1) => match right {
MyCx::F64(f2) => MyCx::F64(f1 + f2),
MyCx::Tensor(t) => MyCx::Tensor(f1 + t),
},
MyCx::Tensor(t1) => match right {
MyCx::F64(f) => MyCx::Tensor(t1 + f),
MyCx::Tensor(t2) => MyCx::Tensor(t1 + t2),
},
},
Operation::Minus => match left {
MyCx::F64(f1) => match right {
MyCx::F64(f2) => MyCx::F64(f1 - f2),
MyCx::Tensor(t2) => MyCx::Tensor(f1 - t2),
},
MyCx::Tensor(t1) => match right {
MyCx::F64(f2) => MyCx::Tensor(t1 - f2),
MyCx::Tensor(t2) => MyCx::Tensor(t1 - t2),
},
},
Operation::Times => match left {
MyCx::F64(f1) => match right {
MyCx::F64(f2) => MyCx::F64(f1 * f2),
MyCx::Tensor(t) => MyCx::Tensor(f1 * t),
},
MyCx::Tensor(t1) => match right {
MyCx::F64(f) => MyCx::Tensor(t1 * f),
MyCx::Tensor(t2) => match t1.shape().len() {
1 => {
let a = t1.into_dimensionality::<Ix1>().unwrap();
if t2.shape().len() == 2 && a.shape()[0] == t2.shape()[0] {
let b = t2.into_dimensionality::<Ix2>().unwrap();
MyCx::Tensor(a.dot(&b).into_dyn())
} else {
MyCx::Tensor(a * t2)
}
}
2 => {
let a = t1.into_dimensionality::<Ix2>().unwrap();
if t2.shape().len() == 1 && a.shape()[1] == t2.shape()[0] {
let b = t2.into_dimensionality::<Ix1>().unwrap();
MyCx::Tensor(a.dot(&b).into_dyn())
} else if t2.shape().len() == 2
&& a.shape()[1] == t2.shape()[0]
{
let b = t2.into_dimensionality::<Ix2>().unwrap();
MyCx::Tensor(a.dot(&b).into_dyn())
} else {
MyCx::Tensor(a * t2)
}
}
_ => MyCx::Tensor(t1 * t2),
},
},
},
Operation::Div => match left {
MyCx::F64(f1) => match right {
MyCx::F64(f2) => MyCx::F64(f1 / f2),
MyCx::Tensor(t2) => MyCx::Tensor(f1 / t2),
},
MyCx::Tensor(t1) => match right {
MyCx::F64(f2) => MyCx::Tensor(t1 / f2),
MyCx::Tensor(t2) => MyCx::Tensor(t1 / t2),
},
},
Operation::Rem => match left {
MyCx::F64(f1) => match right {
MyCx::F64(f2) => MyCx::F64(f1 % f2),
MyCx::Tensor(_) => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
MyCx::Tensor(t1) => match right {
MyCx::F64(f2) => MyCx::Tensor(t1 % f2),
MyCx::Tensor(t2) => MyCx::Tensor(t1 % t2),
},
},
Operation::Pow => match left {
MyCx::F64(f1) => match right {
MyCx::F64(f2) => MyCx::F64(f1.powf(f2.re)),
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
MyCx::Tensor(t1) => {
match right {
MyCx::F64(f) => {
let shape = t1.shape();
if f.re == -1. && shape.len() == 2 && shape[0] == shape[1] {
let t = ctx
.matrix_inv_cx(
&t1.into_dimensionality::<Ix2>().unwrap(),
)
.map_err(|e| {
Error::Function("pow".to_string(), e)
})?;
// let t = t1.into_dimensionality::<Ix2>().unwrap().inv()
// .map_err(|_| Error::Function("pow".to_string(), FuncEvalError::NumberArgs(0)))?;
MyCx::Tensor(t.into_dyn())
} else {
MyCx::Tensor(t1.mapv(|a| a.powf(f.re)))
}
}
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
}
}
},
Operation::LessThan => match left {
MyCx::F64(f1) => match right {
MyCx::F64(f2) => MyCx::F64(if f1.re < f2.re {
Complex64::new(1., 0.)
} else {
Complex64::new(0., 0.)
}),
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
Operation::GreatThan => match left {
MyCx::F64(f1) => match right {
MyCx::F64(f2) => MyCx::F64(if f1.re > f2.re {
Complex64::new(1., 0.)
} else {
Complex64::new(0., 0.)
}),
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
Operation::LtOrEqual => match left {
MyCx::F64(f1) => match right {
MyCx::F64(f2) => MyCx::F64(if f1.re <= f2.re {
Complex64::new(1., 0.)
} else {
Complex64::new(0., 0.)
}),
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
Operation::GtOrEqual => match left {
MyCx::F64(f1) => match right {
MyCx::F64(f2) => MyCx::F64(if f1.re >= f2.re {
Complex64::new(1., 0.)
} else {
Complex64::new(0., 0.)
}),
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
Operation::Equal => match left {
MyCx::F64(f1) => match right {
MyCx::F64(f2) => MyCx::F64(if f1.re == f2.re {
Complex64::new(1., 0.)
} else {
Complex64::new(0., 0.)
}),
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
Operation::Unequal => match left {
MyCx::F64(f1) => match right {
MyCx::F64(f2) => MyCx::F64(if f1.re != f2.re {
Complex64::new(1., 0.)
} else {
Complex64::new(0., 0.)
}),
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
Operation::And => match left {
MyCx::F64(f1) => match right {
MyCx::F64(f2) => MyCx::F64(if (f1.re > 0.0) && (f2.re > 0.0) {
Complex64::new(1., 0.)
} else {
Complex64::new(0., 0.)
}),
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
Operation::Or => match left {
MyCx::F64(f1) => match right {
MyCx::F64(f2) => MyCx::F64(if (f1.re > 0.0) || (f2.re > 0.0) {
Complex64::new(1., 0.)
} else {
Complex64::new(0., 0.)
}),
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
Operation::BitAnd => match left {
MyCx::F64(f1) => match right {
MyCx::F64(f2) => MyCx::F64(Complex64::new(
(f1.re as i64 & f2.re as i64) as f64,
0.,
)),
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
Operation::BitOr => match left {
MyCx::F64(f1) => match right {
MyCx::F64(f2) => MyCx::F64(Complex64::new(
(f1.re as i64 | f2.re as i64) as f64,
0.,
)),
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
Operation::BitXor => match left {
MyCx::F64(f1) => match right {
MyCx::F64(f2) => MyCx::F64(Complex64::new(
(f1.re as i64 ^ f2.re as i64) as f64,
0.,
)),
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
Operation::BitShl => match left {
MyCx::F64(f1) => match right {
MyCx::F64(f2) => MyCx::F64(Complex64::new(
((f1.re as i64) << (f2.re as i64)) as f64,
0.,
)),
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
Operation::BitShr => match left {
MyCx::F64(f1) => match right {
MyCx::F64(f2) => MyCx::F64(Complex64::new(
((f1.re as i64) >> (f2.re as i64)) as f64,
0.,
)),
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
Operation::BitAt => match left {
MyCx::F64(f1) => match right {
MyCx::F64(f2) => {
if f1.re < 1. || f2.re > 64. {
return Err(Error::EvalError(format!(
"Operation \"@\" ERROR:the {:?} bit doesn't exist.",
right
)));
}
MyCx::F64(
if (f1.re as i64) & 2_i64.pow(f2.re as u32 - 1) != 0 {
Complex64::new(1., 0.)
} else {
Complex64::new(0., 0.)
},
)
}
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
_ => {
return Err(Error::EvalError(format!(
"Not equal type : {:?}",
token
)));
}
},
_ => {
return Err(Error::EvalError(format!("TypeUnsupported : {:?}", token)));
}
};
stack.push(r);
}
Unary(op) => {
let x = stack.pop().unwrap();
let r = match op {
Operation::Plus => x,
Operation::Minus => match x {
MyCx::F64(f) => MyCx::F64(-f),
MyCx::Tensor(t) => MyCx::Tensor(-t),
},
Operation::Not => match x {
MyCx::F64(f) => MyCx::F64(Complex64::new(if f.re > 0. {1.0} else {0.}, 0.)),
_ => {
return Err(Error::EvalError(format!(
"Unimplemented unary operation: {:?}, {:?}",
op, token
)));
}
}
Operation::BitNot => match x {
MyCx::F64(f) => MyCx::F64(Complex64::new(!(f.re as i64) as f64, 0.)),
_ => {
return Err(Error::EvalError(format!(
"Unimplemented unary operation: {:?}, {:?}",
op, token
)));
}
},
Operation::Fact => match x {
MyCx::F64(f) => match factorial(f.re) {
Ok(res) => MyCx::F64(Complex64::new(res, 0.)),
Err(e) => return Err(Error::EvalError(String::from(e))),
},
_ => {
return Err(Error::EvalError(format!(
"Unimplemented unary operation: {:?}, {:?}",
op, token
)));
}
},
_ => {
return Err(Error::EvalError(format!(
"Unimplemented unary operation: {:?}",
op
)));
}
};
stack.push(r);
}
Func(n, Some(i)) => {
if stack.len() < *i {
let msg = format!(
"stack does not have enough arguments for function token {:?}",
token
);
return Err(Error::EvalError(msg));
}
match ctx.eval_func_tensor_cx(n, &stack[stack.len() - i..]) {
Ok(r) => {
let nl = stack.len() - i;
stack.truncate(nl);
stack.push(r);
}
Err(e) => return Err(Error::Function(n.to_owned(), e)),
}
}
Func(ref n, None) => match ctx.eval_func(n, &[]) {
Ok(r) => {
stack.push(MyCx::F64(Complex64::new(r, 0.)));
}
Err(e) => return Err(Error::Function(n.to_owned(), e)),
},
_ => {
return Err(Error::EvalError(format!("TypeUnsupported : {:?}", token)));
}
}
}
let r = stack.pop().expect("Stack is empty, this is impossible.");
if !stack.is_empty() {
return Err(Error::EvalError(format!(
"There are still {} items on the stack.",
stack.len()
)));
}
Ok(r)
}
}
#[derive(Clone)]
pub struct ContextTensor<'a> {
tensors: FnvHashMap<String, Array<f64, IxDyn>>,
tensors_cx: FnvHashMap<String, Array<Complex64, IxDyn>>,
ctx: Context<'a>,
ctx_cx: ContextCx<'a>,
}
impl<'a> ContextTensor<'a> {
/// Creates a context with built-in constants and functions.
pub fn new() -> ContextTensor<'a> {
thread_local!(static DEFAULT_CONTEXT: ContextTensor<'static> = {
ContextTensor::empty()
});
DEFAULT_CONTEXT.with(|ctx| ctx.clone())
}
/// Creates an empty contexts.
pub fn empty() -> ContextTensor<'a> {
ContextTensor {
tensors: Default::default(),
tensors_cx: Default::default(),
ctx: Default::default(),
ctx_cx: Default::default(),
}
}
/// Adds a new variable/constant.
pub fn var<S: Into<String>>(&mut self, var: S, value: f64) -> &mut Self {
self.ctx.var(var.into(), value);
self
}
pub fn var_cx<S: Into<String>>(&mut self, var: S, value: Complex64) -> &mut Self {
self.ctx_cx.var_cx(var.into(), value);
self
}
/// Adds a new variable/constant.
pub fn tensor<S: Into<String>>(&mut self, var: S, value: Array<f64, IxDyn>) -> &mut Self {
self.tensors.insert(var.into(), value);
self
}
pub fn tensor_cx<S: Into<String>>(
&mut self,
var: S,
value: Array<Complex64, IxDyn>,
) -> &mut Self {
self.tensors_cx.insert(var.into(), value);
self
}
pub fn clean(&mut self) {
self.tensors.clear();
self.tensors_cx.clear();
}
}
impl<'a> Default for ContextTensor<'a> {
fn default() -> Self {
ContextTensor::new()
}
}
impl<'a> ContextProvider for ContextTensor<'a> {
fn get_var(&self, name: &str) -> Option<f64> {
self.ctx.get_var(name)
}
fn get_var_cx(&self, name: &str) -> Option<Complex64> {
self.ctx_cx.get_var_cx(name)
}
fn get_tensor(&self, name: &str) -> Option<Array<f64, IxDyn>> {
self.tensors.get(name).cloned()
}
fn get_tensor_cx(&self, name: &str) -> Option<Array<Complex64, IxDyn>> {
self.tensors_cx.get(name).cloned()
}
fn eval_func_tensor(&self, name: &str, args: &[MyF]) -> Result<MyF, FuncEvalError> {
let mut floats = Vec::with_capacity(args.len());
for arg in args {
match arg {
MyF::F64(v) => floats.push(*v),
MyF::Tensor(_) => break,
}
}
if name.eq("eye") {
return TsfnBasic::ts_eye(args)
} else if name.eq("zeros") {
return TsfnBasic::ts_zeros(args)
} else if name.eq("ones") {
return TsfnBasic::ts_ones(args)
} else if name.eq("range") {
return TsfnBasic::ts_range(args)
}
if floats.len() == args.len() {
return Ok(MyF::F64(self.ctx.eval_func(name, &floats)?));
}
match name {
"get" => TsfnBasic::ts_get(args),
"slice" => TsfnBasic::ts_slice(args),
"abs" => match &args[0] {
MyF::F64(f) => Ok(MyF::F64(self.ctx.eval_func(name, &[*f])?)),
MyF::Tensor(t) => Ok(MyF::Tensor(t.mapv(|a| a.abs()))),
},
"exp" => match &args[0] {
MyF::F64(f) => Ok(MyF::F64(self.ctx.eval_func(name, &[*f])?)),
MyF::Tensor(t) => Ok(MyF::Tensor(t.mapv(|a| a.exp()))),
},
"sin" => match &args[0] {
MyF::F64(f) => Ok(MyF::F64(self.ctx.eval_func(name, &[*f])?)),
MyF::Tensor(t) => Ok(MyF::Tensor(t.mapv(|a| a.sin()))),
},
"cos" => match &args[0] {
MyF::F64(f) => Ok(MyF::F64(self.ctx.eval_func(name, &[*f])?)),
MyF::Tensor(t) => Ok(MyF::Tensor(t.mapv(|a| a.cos()))),
},
"tan" => match &args[0] {
MyF::F64(f) => Ok(MyF::F64(self.ctx.eval_func(name, &[*f])?)),
MyF::Tensor(t) => Ok(MyF::Tensor(t.mapv(|a| a.tan()))),
},
"asin" => match &args[0] {
MyF::F64(f) => Ok(MyF::F64(self.ctx.eval_func(name, &[*f])?)),
MyF::Tensor(t) => Ok(MyF::Tensor(t.mapv(|a| a.asin()))),
},
"acos" => match &args[0] {
MyF::F64(f) => Ok(MyF::F64(self.ctx.eval_func(name, &[*f])?)),
MyF::Tensor(t) => Ok(MyF::Tensor(t.mapv(|a| a.acos()))),
},
"atan" => match &args[0] {
MyF::F64(f) => Ok(MyF::F64(self.ctx.eval_func(name, &[*f])?)),
MyF::Tensor(t) => Ok(MyF::Tensor(t.mapv(|a| a.atan()))),
},
"sinh" => match &args[0] {
MyF::F64(f) => Ok(MyF::F64(self.ctx.eval_func(name, &[*f])?)),
MyF::Tensor(t) => Ok(MyF::Tensor(t.mapv(|a| a.sinh()))),
},
"cosh" => match &args[0] {
MyF::F64(f) => Ok(MyF::F64(self.ctx.eval_func(name, &[*f])?)),
MyF::Tensor(t) => Ok(MyF::Tensor(t.mapv(|a| a.cosh()))),
},
"tanh" => match &args[0] {
MyF::F64(f) => Ok(MyF::F64(self.ctx.eval_func(name, &[*f])?)),
MyF::Tensor(t) => Ok(MyF::Tensor(t.mapv(|a| a.tanh()))),
},
"asinh" => match &args[0] {
MyF::F64(f) => Ok(MyF::F64(self.ctx.eval_func(name, &[*f])?)),
MyF::Tensor(t) => Ok(MyF::Tensor(t.mapv(|a| a.asinh()))),
},
"acosh" => match &args[0] {
MyF::F64(f) => Ok(MyF::F64(self.ctx.eval_func(name, &[*f])?)),
MyF::Tensor(t) => Ok(MyF::Tensor(t.mapv(|a| a.acosh()))),
},
"atanh" => match &args[0] {
MyF::F64(f) => Ok(MyF::F64(self.ctx.eval_func(name, &[*f])?)),
MyF::Tensor(t) => Ok(MyF::Tensor(t.mapv(|a| a.atanh()))),
},
"deg2rad" => match &args[0] {
MyF::F64(f) => Ok(MyF::F64(f * PI / 180.)),
MyF::Tensor(t) => Ok(MyF::Tensor(t * PI / 180.)),
},
"rad2deg" => match &args[0] {
MyF::F64(f) => Ok(MyF::F64(f / PI * 180.)),
MyF::Tensor(t) => Ok(MyF::Tensor(t / PI * 180.)),
},
"ln" => match &args[0] {
MyF::F64(f) => Ok(MyF::F64(self.ctx.eval_func(name, &[*f])?)),
MyF::Tensor(t) => Ok(MyF::Tensor(t.mapv(|a| a.ln()))),
},
"log10" => match &args[0] {
MyF::F64(f) => Ok(MyF::F64(self.ctx.eval_func(name, &[*f])?)),
MyF::Tensor(t) => Ok(MyF::Tensor(t.mapv(|a| a.log10()))),
},
"power" => TsfnBasic::ts_power(args),
"sqrt" => match &args[0] {
MyF::F64(f) => Ok(MyF::F64(self.ctx.eval_func(name, &[*f])?)),
MyF::Tensor(t) => Ok(MyF::Tensor(t.mapv(|a| a.sqrt()))),
},
"floor" => match &args[0] {
MyF::F64(f) => Ok(MyF::F64(self.ctx.eval_func(name, &[*f])?)),
MyF::Tensor(t) => Ok(MyF::Tensor(t.mapv(|a| a.floor()))),
},
"ceil" => match &args[0] {
MyF::F64(f) => Ok(MyF::F64(self.ctx.eval_func(name, &[*f])?)),
MyF::Tensor(t) => Ok(MyF::Tensor(t.mapv(|a| a.ceil()))),
},
"round" => match &args[0] {
MyF::F64(f) => Ok(MyF::F64(self.ctx.eval_func(name, &[*f])?)),
MyF::Tensor(t) => Ok(MyF::Tensor(t.mapv(|a| a.round()))),
},
"signum" => match &args[0] {
MyF::F64(f) => Ok(MyF::F64(self.ctx.eval_func(name, &[*f])?)),
MyF::Tensor(t) => Ok(MyF::Tensor(t.mapv(|a| a.signum()))),
},
"sum_all" => match &args[0] {
MyF::F64(f) => Ok(MyF::F64(*f)),
MyF::Tensor(t) => Ok(MyF::F64(t.sum())),
},
"sum" => TsfnBasic::ts_sum(args),
"transpose" => match &args[0] {
MyF::F64(f) => Ok(MyF::F64(*f)),
MyF::Tensor(t) => Ok(MyF::Tensor(t.clone().reversed_axes()))
},
"size" => TsfnBasic::ts_size(args),
"sparse" => TsfnBasic::ts_sparse(args),
"diag" => TsfnBasic::ts_diag(args),
// "trace" => TsfnBasic::ts_trace(args),
_ => Err(FuncEvalError::UnknownFunction),
}
}
fn eval_func_tensor_cx(&self, name: &str, args: &[MyCx]) -> Result<MyCx, FuncEvalError> {
let mut complex = Vec::with_capacity(args.len());
for arg in args {
match arg {
MyCx::F64(v) => complex.push(*v),
MyCx::Tensor(_) => break,
}
}
if name.eq("eye") {
return TsfnBasic::ts_eye_cx(args)
} else if name.eq("zeros") {
return TsfnBasic::ts_zeros_cx(args)
} else if name.eq("ones") {
return TsfnBasic::ts_ones_cx(args)
}
if complex.len() == args.len() {
return Ok(MyCx::F64(self.ctx_cx.eval_func_cx(name, &complex)?));
}
match name {
"get" => TsfnBasic::ts_get_cx(args),
"slice" => TsfnBasic::ts_slice_cx(args),
"abs" => match &args[0] {
MyCx::F64(f) => Ok(MyCx::F64(self.ctx_cx.eval_func_cx(name, &[*f])?)),
MyCx::Tensor(t) => Ok(MyCx::Tensor(
t.mapv(|a| self.ctx_cx.eval_func_cx(name, &[a]).unwrap()),
)),
},
"exp" => match &args[0] {
MyCx::F64(f) => Ok(MyCx::F64(self.ctx_cx.eval_func_cx(name, &[*f])?)),
MyCx::Tensor(t) => Ok(MyCx::Tensor(t.mapv(|a| a.exp()))),
},
"sin" => match &args[0] {
MyCx::F64(f) => Ok(MyCx::F64(self.ctx_cx.eval_func_cx(name, &[*f])?)),
MyCx::Tensor(t) => Ok(MyCx::Tensor(t.mapv(|a| a.sin()))),
},
"cos" => match &args[0] {
MyCx::F64(f) => Ok(MyCx::F64(self.ctx_cx.eval_func_cx(name, &[*f])?)),
MyCx::Tensor(t) => Ok(MyCx::Tensor(t.mapv(|a| a.cos()))),
},
"tan" => match &args[0] {
MyCx::F64(f) => Ok(MyCx::F64(self.ctx_cx.eval_func_cx(name, &[*f])?)),
MyCx::Tensor(t) => Ok(MyCx::Tensor(t.mapv(|a| a.tan()))),
},
"asin" => match &args[0] {
MyCx::F64(f) => Ok(MyCx::F64(self.ctx_cx.eval_func_cx(name, &[*f])?)),
MyCx::Tensor(t) => Ok(MyCx::Tensor(t.mapv(|a| a.asin()))),
},
"acos" => match &args[0] {
MyCx::F64(f) => Ok(MyCx::F64(self.ctx_cx.eval_func_cx(name, &[*f])?)),
MyCx::Tensor(t) => Ok(MyCx::Tensor(t.mapv(|a| a.acos()))),
},
"atan" => match &args[0] {
MyCx::F64(f) => Ok(MyCx::F64(self.ctx_cx.eval_func_cx(name, &[*f])?)),
MyCx::Tensor(t) => Ok(MyCx::Tensor(t.mapv(|a| a.atan()))),
},
"sinh" => match &args[0] {
MyCx::F64(f) => Ok(MyCx::F64(self.ctx_cx.eval_func_cx(name, &[*f])?)),
MyCx::Tensor(t) => Ok(MyCx::Tensor(t.mapv(|a| a.sinh()))),
},
"cosh" => match &args[0] {
MyCx::F64(f) => Ok(MyCx::F64(self.ctx_cx.eval_func_cx(name, &[*f])?)),
MyCx::Tensor(t) => Ok(MyCx::Tensor(t.mapv(|a| a.cosh()))),
},
"tanh" => match &args[0] {
MyCx::F64(f) => Ok(MyCx::F64(self.ctx_cx.eval_func_cx(name, &[*f])?)),
MyCx::Tensor(t) => Ok(MyCx::Tensor(t.mapv(|a| a.tanh()))),
},
"asinh" => match &args[0] {
MyCx::F64(f) => Ok(MyCx::F64(self.ctx_cx.eval_func_cx(name, &[*f])?)),
MyCx::Tensor(t) => Ok(MyCx::Tensor(t.mapv(|a| a.asinh()))),
},
"acosh" => match &args[0] {
MyCx::F64(f) => Ok(MyCx::F64(self.ctx_cx.eval_func_cx(name, &[*f])?)),
MyCx::Tensor(t) => Ok(MyCx::Tensor(t.mapv(|a| a.acosh()))),
},
"atanh" => match &args[0] {
MyCx::F64(f) => Ok(MyCx::F64(self.ctx_cx.eval_func_cx(name, &[*f])?)),
MyCx::Tensor(t) => Ok(MyCx::Tensor(t.mapv(|a| a.atanh()))),
},
"deg2rad" => match &args[0] {
MyCx::F64(f) => Ok(MyCx::F64(f * PI / 180.)),
MyCx::Tensor(t) => Ok(MyCx::Tensor(t * PI / 180.)),
},
"rad2deg" => match &args[0] {
MyCx::F64(f) => Ok(MyCx::F64(f / PI * 180.)),
MyCx::Tensor(t) => Ok(MyCx::Tensor(t / PI * 180.)),
},
"ln" => match &args[0] {
MyCx::F64(f) => Ok(MyCx::F64(self.ctx_cx.eval_func_cx(name, &[*f])?)),
MyCx::Tensor(t) => Ok(MyCx::Tensor(t.mapv(|a| a.ln()))),
},
"log10" => match &args[0] {
MyCx::F64(f) => Ok(MyCx::F64(self.ctx_cx.eval_func_cx(name, &[*f])?)),
MyCx::Tensor(t) => Ok(MyCx::Tensor(t.mapv(|a| a.log10()))),
},
"power" => TsfnBasic::ts_power_cx(args),
"sqrt" => match &args[0] {
MyCx::F64(f) => Ok(MyCx::F64(self.ctx_cx.eval_func_cx(name, &[*f])?)),
MyCx::Tensor(t) => Ok(MyCx::Tensor(t.mapv(|a| a.sqrt()))),
},
"floor" => match &args[0] {
MyCx::F64(f) => Ok(MyCx::F64(self.ctx_cx.eval_func_cx(name, &[*f])?)),
MyCx::Tensor(t) => Ok(MyCx::Tensor(
t.mapv(|a| self.ctx.eval_func_cx(name, &[a]).unwrap()),
)),
},
"ceil" => match &args[0] {
MyCx::F64(f) => Ok(MyCx::F64(self.ctx_cx.eval_func_cx(name, &[*f])?)),
MyCx::Tensor(t) => Ok(MyCx::Tensor(
t.mapv(|a| self.ctx.eval_func_cx(name, &[a]).unwrap()),
)),
},
"round" => match &args[0] {
MyCx::F64(f) => Ok(MyCx::F64(self.ctx_cx.eval_func_cx(name, &[*f])?)),
MyCx::Tensor(t) => Ok(MyCx::Tensor(
t.mapv(|a| self.ctx.eval_func_cx(name, &[a]).unwrap()),
)),
},
"signum" => match &args[0] {
MyCx::F64(f) => Ok(MyCx::F64(self.ctx_cx.eval_func_cx(name, &[*f])?)),
MyCx::Tensor(t) => Ok(MyCx::Tensor(
t.mapv(|a| self.ctx.eval_func_cx(name, &[a]).unwrap()),
)),
},
"sum_all" => match &args[0] {
MyCx::F64(f) => Ok(MyCx::F64(*f)),
MyCx::Tensor(t) => Ok(MyCx::F64(t.sum())),
},
"sum" => TsfnBasic::ts_sum_cx(args),
"conj" => match &args[0] {
MyCx::F64(f) => Ok(MyCx::F64(self.ctx_cx.eval_func_cx(name, &[*f])?)),
MyCx::Tensor(t) => Ok(MyCx::Tensor(
t.mapv(|a| self.ctx_cx.eval_func_cx(name, &[a]).unwrap()),
)),
},
"real" => match &args[0] {
MyCx::F64(f) => Ok(MyCx::F64(self.ctx_cx.eval_func_cx(name, &[*f])?)),
MyCx::Tensor(t) => Ok(MyCx::Tensor(
t.mapv(|a| self.ctx_cx.eval_func_cx(name, &[a]).unwrap()),
)),
},
"imag" => match &args[0] {
MyCx::F64(f) => Ok(MyCx::F64(self.ctx_cx.eval_func_cx(name, &[*f])?)),
MyCx::Tensor(t) => Ok(MyCx::Tensor(
t.mapv(|a| self.ctx_cx.eval_func_cx(name, &[a]).unwrap()),
)),
},
"angle" => match &args[0] {
MyCx::F64(f) => Ok(MyCx::F64(self.ctx_cx.eval_func_cx("rad", &[*f])?)),
MyCx::Tensor(t) => Ok(MyCx::Tensor(
t.mapv(|a| self.ctx_cx.eval_func_cx("rad", &[a]).unwrap()),
)),
},
"transpose" => match &args[0] {
MyCx::F64(f) => Ok(MyCx::F64(*f)),
MyCx::Tensor(t) => Ok(MyCx::Tensor(t.clone().reversed_axes()))
},
"ctranspose" => match &args[0] {
MyCx::F64(f) => Ok(MyCx::F64(self.ctx_cx.eval_func_cx("conj", &[*f])?)),
MyCx::Tensor(t) => Ok(MyCx::Tensor(t.clone().reversed_axes()
.mapv(|a| self.ctx_cx.eval_func_cx("conj", &[a]).unwrap())))
},
"size" => TsfnBasic::ts_size_cx(args),
// "eig" => TsfnBasic::ts_eig(args),
// "diag" => TsfnBasic::ts_diag_cx(args),
// "trace" => TsfnBasic::ts_trace_cx(args),
_ => Err(FuncEvalError::UnknownFunction),
}
}
}
\ No newline at end of file
extern crate core;
extern crate nom;
use std::collections::HashMap;
use std::fmt;
use std::fmt::{Display, Formatter};
use ndarray::{Array, Array2, IxDyn};
use num_complex::Complex64;
use serde::{Deserialize, Serialize};
pub mod expr;
pub mod expr_complex;
pub mod expr_tensor;
pub mod tokenizer;
pub mod shuntingyard;
pub mod tsfn_basic;
#[derive(Debug, Clone, PartialEq)]
pub enum MyF {
F64(f64),
Tensor(Array<f64, IxDyn>),
}
#[derive(Debug, Clone, PartialEq)]
pub enum MyCx {
F64(Complex64),
Tensor(Array<Complex64, IxDyn>),
}
/// An error reported by the parser.
#[derive(Debug, Clone, PartialEq)]
pub enum ParseError {
/// A token that is not allowed at the given location (contains the location of the offending
/// character in the source string).
UnexpectedToken(usize, usize),
/// Missing right parentheses at the end of the source string (contains the number of missing
/// parens).
MissingRParen(i32),
/// Missing operator or function argument at the end of the expression.
MissingArgument,
}
impl Display for ParseError {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match *self {
ParseError::UnexpectedToken(row, col) => write!(f, "Unexpected char at line: {row} column: {col}"),
ParseError::MissingRParen(i) => write!(f, "Missing {i} right parenthes{}.",
if i == 1 { "is" } else { "es" }),
ParseError::MissingArgument => write!(f, "Missing argument at the end of expression."),
}
}
}
impl std::error::Error for ParseError {
fn description(&self) -> &str {
match *self {
ParseError::UnexpectedToken(_, _) => "unexpected token",
ParseError::MissingRParen(_) => "missing right parenthesis",
ParseError::MissingArgument => "missing argument",
}
}
}
/// Function evaluation error.
#[derive(Debug, Clone, PartialEq)]
pub enum FuncEvalError {
TooFewArguments,
TooManyArguments,
NumberArgs(usize),
UnknownFunction,
}
impl Display for FuncEvalError {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match *self {
FuncEvalError::UnknownFunction => write!(f, "Unknown function"),
FuncEvalError::NumberArgs(i) => write!(f, "Expected {i} arguments"),
FuncEvalError::TooFewArguments => write!(f, "Too few arguments"),
FuncEvalError::TooManyArguments => write!(f, "Too many arguments"),
}
}
}
impl std::error::Error for FuncEvalError {
fn description(&self) -> &str {
match *self {
FuncEvalError::UnknownFunction => "unknown function",
FuncEvalError::NumberArgs(_) => "wrong number of function arguments",
FuncEvalError::TooFewArguments => "too few function arguments",
FuncEvalError::TooManyArguments => "too many function arguments",
}
}
}
/// An error produced by the shunting-yard algorightm.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum RPNError {
/// An extra left parenthesis was found.
MismatchedLParen(usize),
/// An extra left brackets was found.
MismatchedLBracket(usize),
/// An extra right parenthesis was found.
MismatchedRParen(usize),
/// An extra right bracket was found.
MismatchedRBracket(usize),
/// Comma that is not separating function arguments.
UnexpectedComma(usize),
/// Too few operands for some operator.
NotEnoughOperands(usize),
/// Too many operands reported.
TooManyOperands,
}
impl std::error::Error for RPNError {
fn description(&self) -> &str {
match *self {
RPNError::MismatchedLParen(_) => "mismatched left parenthesis",
RPNError::MismatchedRParen(_) => "mismatched right parenthesis",
RPNError::MismatchedLBracket(_) => "mismatched left blackets",
RPNError::MismatchedRBracket(_) => "mismatched right blackets",
RPNError::UnexpectedComma(_) => "unexpected comma",
RPNError::NotEnoughOperands(_) => "missing operands",
RPNError::TooManyOperands => "too many operands left at the end of expression",
}
}
}
impl Display for RPNError {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match *self {
RPNError::MismatchedLParen(i) => {
write!(f, "Mismatched left parenthesis at token {i}.")
}
RPNError::MismatchedRParen(i) => {
write!(f, "Mismatched right parenthesis at token {i}.")
}
RPNError::MismatchedLBracket(i) => {
write!(f, "Mismatched left blackets at token {i}.")
}
RPNError::MismatchedRBracket(i) => {
write!(f, "Mismatched right blackets at token {i}.")
}
RPNError::UnexpectedComma(i) => write!(f, "Unexpected comma at token {i}"),
RPNError::NotEnoughOperands(i) => write!(f, "Missing operands at token {i}"),
RPNError::TooManyOperands => {
write!(f, "Too many operands left at the end of expression.")
}
}
}
}
// extern crate meval;
/// An error produced during parsing or evaluation.
#[derive(Debug, Clone, PartialEq)]
pub enum Error {
UnknownVariable(String),
UnknownTensor(u64),
Function(String, FuncEvalError),
/// An error returned by the parser.
ParseError(ParseError),
/// The shunting-yard algorithm returned an error.
RPNError(RPNError),
// A catch all for all other errors during evaluation
EvalError(String),
EmptyExpression,
}
/**
* @api {枚举_数学符号} /Operation Operation
* @apiPrivate
* @apiGroup A_Enum
* @apiSuccess {String} Plus \+
* @apiSuccess {String} Minus \-
* @apiSuccess {String} Times \*
* @apiSuccess {String} Div /
* @apiSuccess {String} Rem %
* @apiSuccess {String} Pow ^
* @apiSuccess {String} Fact !
* @apiSuccess {String} Equal \==,从这里开始往下是bool操作符
* @apiSuccess {String} Unequal !=
* @apiSuccess {String} LessThan \<
* @apiSuccess {String} GreatThan \>
* @apiSuccess {String} LtOrEqual \<=
* @apiSuccess {String} GtOrEqual \>=
* @apiSuccess {String} And &&
* @apiSuccess {String} Or ||
* @apiSuccess {String} Not ~~
* @apiSuccess {String} BitAnd &,从这里开始往下是位操作
* @apiSuccess {String} BitOr |
* @apiSuccess {String} BitXor ^^
* @apiSuccess {String} BitShl \<<
* @apiSuccess {String} BitShr \>>
* @apiSuccess {String} BitAt @
* @apiSuccess {String} BitNot ~
*/
/// Mathematical operations.
#[derive(Serialize, Deserialize, Debug, PartialEq, Clone, Copy)]
pub enum Operation {
// +
Plus,
// -
Minus,
// *
Times,
// /
Div,
// %
Rem,
// ^
Pow,
// !
Fact,
// bool操作符
// ==
Equal,
// !=
Unequal,
// <
LessThan,
// >
GreatThan,
// <=
LtOrEqual,
// >=
GtOrEqual,
// &&
And,
// ||
Or,
// ~~
Not,
// 下面是位操作
// &
BitAnd,
// |
BitOr,
// ^^
BitXor,
// <<
BitShl,
// >>
BitShr,
// @
BitAt,
// ~
BitNot,
}
/**
* @api {枚举_Token} /Token Token
* @apiPrivate
* @apiGroup A_Enum
* @apiSuccess {Object} Binary Binary operation,{"Binary": Operation}
* @apiSuccess {Object} Unary Unary operation,{"Unary": Operation}
* @apiSuccess {String} LParen Left parenthesis (
* @apiSuccess {String} RParen Right parenthesis )
* @apiSuccess {String} BigLParen Big Left parenthesis {
* @apiSuccess {String} BigRParen Big Right parenthesis }
* @apiSuccess {String} RBracket Right brackets ]
* @apiSuccess {String} Comma function argument separator
* @apiSuccess {Object} Number {"Number": f64}
* @apiSuccess {Object} Tensor {"Tensor": usize}
* @apiSuccess {Object} Var {"Var": String}
* @apiSuccess {Object} Func {"Func": tuple(String, [usize])}
*/
/// Expression tokens.
#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)]
pub enum Token {
/// Binary operation.
Binary(Operation),
/// Unary operation.
Unary(Operation),
/// Left parenthesis. (
LParen,
/// Right parenthesis. )
RParen,
/// Big Left parenthesis. {
BigLParen,
/// Big Right parenthesis. }
BigRParen,
/// Right brackets. ]
RBracket,
/// Comma: function argument separator
Comma,
/// A number.
Number(f64),
/// A tensor.
Tensor(Option<usize>),
/// A variable.
Var(String),
/// A function with name and number of arguments.
Func(String, Option<usize>),
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
pub struct Expr {
pub rpn: Vec<Token>,
}
impl Display for Expr {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
pub trait ContextProvider {
fn get_var(&self, _: &str) -> Option<f64> {
None
}
fn get_var_cx(&self, _: &str) -> Option<Complex64> {
None
}
fn get_tensor(&self, _: &str) -> Option<Array<f64, IxDyn>> {
None
}
fn get_tensor_cx(&self, _: &str) -> Option<Array<Complex64, IxDyn>> {
None
}
fn eval_func(&self, _: &str, _: &[f64]) -> Result<f64, FuncEvalError> {
Err(FuncEvalError::UnknownFunction)
}
fn eval_func_cx(&self, _: &str, _: &[Complex64]) -> Result<Complex64, FuncEvalError> {
Err(FuncEvalError::UnknownFunction)
}
fn eval_func_tensor(&self, _: &str, _: &[MyF]) -> Result<MyF, FuncEvalError> {
Err(FuncEvalError::UnknownFunction)
}
fn eval_func_tensor_cx(&self, _: &str, _: &[MyCx]) -> Result<MyCx, FuncEvalError> {
Err(FuncEvalError::UnknownFunction)
}
fn matrix_inv(&self, _: &Array2<f64>) -> Result<Array2<f64>, FuncEvalError> {
Err(FuncEvalError::UnknownFunction)
}
fn matrix_inv_cx(&self, _: &Array2<Complex64>) -> Result<Array2<Complex64>, FuncEvalError> {
Err(FuncEvalError::UnknownFunction)
}
}
pub struct CtxMaps {
var_values: HashMap<String, f64>,
var_values_cx: HashMap<String, Complex64>,
var_values_tensor: HashMap<String, Array<f64, IxDyn>>,
var_values_tensor_cx: HashMap<String, Array<Complex64, IxDyn>>,
}
impl Default for CtxMaps {
fn default() -> Self {
Self::new()
}
}
impl CtxMaps {
pub fn new() -> Self {
CtxMaps {
var_values: Default::default(),
var_values_cx: Default::default(),
var_values_tensor: Default::default(),
var_values_tensor_cx: Default::default(),
}
}
pub fn var<S: Into<String>>(&mut self, name: S, v: f64) {
self.var_values.insert(name.into(), v);
}
pub fn var_cx<S: Into<String>>(&mut self, name: S, v: Complex64) {
self.var_values_cx.insert(name.into(), v);
}
pub fn tensor<S: Into<String>>(&mut self, name: S, v: Array<f64, IxDyn>) {
self.var_values_tensor.insert(name.into(), v);
}
pub fn tensor_cx<S: Into<String>>(&mut self, name: S, v: Array<Complex64, IxDyn>) {
self.var_values_tensor_cx.insert(name.into(), v);
}
}
fn factorial_unsafe(num: f64) -> f64 {
if num == 0. || num == 1. {
1.
} else {
num * factorial_unsafe(num - 1.)
}
}
pub fn factorial(num: f64) -> Result<f64, &'static str> {
if num.fract() != 0. || num < 0. {
Err("Number must be non-negative with no fractional component!")
} else if num > 170. {
Ok(f64::INFINITY)
} else {
Ok(factorial_unsafe(num))
}
}
pub fn parse_exprs(s: &str) -> Option<Vec<(String, Expr)>> {
let lines: Vec<&str> = s.split(';').collect();
let mut exprs = Vec::new();
for p in lines {
if p.trim().is_empty() {
continue;
}
let id_to_value: Vec<&str> = if p.contains(':') {
p.split(':').collect()
} else if let Some(pos) = p.find('=') {
let (first, second) = p.split_at(pos);
vec![first, &second[1..]]
} else {
vec![]
};
if id_to_value.len() == 2 {
let var_name = id_to_value[0].trim().to_string();
// 检查是否重复变量定义
let var_expr: Expr = id_to_value[1].parse().ok()?;
exprs.push((var_name, var_expr));
} else {
return None;
}
}
Some(exprs)
}
// above should as same as in sparrowzz
\ No newline at end of file
//! Implementation of the shunting-yard algorithm for converting an infix expression to an
//! expression in reverse Polish notation (RPN).
//!
//! See the Wikipedia articles on the [shunting-yard algorithm][shunting] and on [reverse Polish
//! notation][RPN] for more details.
//!
//! [RPN]: https://en.wikipedia.org/wiki/Reverse_Polish_notation
//! [shunting]: https://en.wikipedia.org/wiki/Shunting-yard_algorithm
use crate::shuntingyard::Associativity::*;
use crate::{RPNError, Operation, Token};
#[derive(Debug, Clone, Copy)]
enum Associativity {
Left,
Right,
NA,
}
/// Returns the operator precedence and associativity for a given token.
fn prec_assoc(token: &Token) -> (u32, Associativity) {
use self::Associativity::*;
use crate::Operation::*;
use crate::Token::*;
match *token {
Binary(op) => match op {
Or => (3, Left),
And => (4, Left),
BitOr => (5, Left),
BitXor => (6, Left),
BitAnd => (7, Left),
Equal | Unequal => (8, Left),
LessThan | GreatThan | LtOrEqual | GtOrEqual => (9, Left),
BitShl | BitShr => (10, Left),
Plus | Minus => (11, Left),
Times | Div | Rem => (12, Left),
BitAt => (13, Left),
Pow => (14, Right),
_ => unimplemented!(),
},
Unary(op) => match op {
Plus | Minus | Not | BitNot => (13, NA),
Fact => (15, NA),
_ => unimplemented!(),
},
Var(_) | Number(_) | Func(..) | Tensor(_) | LParen | RParen | BigLParen | BigRParen
| RBracket | Comma => (0, NA),
}
}
/// Converts a tokenized infix expression to reverse Polish notation.
///
/// # Failure
///
/// Returns `Err` if the input expression is not well-formed.
pub fn to_rpn(input: &[Token]) -> Result<Vec<Token>, RPNError> {
use crate::Token::*;
let mut output = Vec::with_capacity(input.len());
let mut stack = Vec::with_capacity(input.len());
for (index, token) in input.iter().enumerate() {
let token = token.clone();
match token {
Number(_) | Var(_) => output.push(token),
Unary(_) => stack.push((index, token)),
Binary(_) => {
let pa1 = prec_assoc(&token);
while !stack.is_empty() {
let pa2 = prec_assoc(&stack.last().unwrap().1);
match (pa1, pa2) {
((i, Left), (j, _)) if i <= j => {
output.push(stack.pop().unwrap().1);
}
((i, Right), (j, _)) if i < j => {
output.push(stack.pop().unwrap().1);
}
_ => {
break;
}
}
}
stack.push((index, token))
}
LParen => stack.push((index, token)),
RParen => {
let mut found = false;
while let Some((_, t)) = stack.pop() {
match t {
LParen => {
found = true;
break;
}
Func(name, nargs) => {
found = true;
output.push(Func(name, Some(nargs.unwrap_or(0) + 1)));
break;
}
_ => output.push(t),
}
}
if !found {
return Err(RPNError::MismatchedRParen(index));
}
}
RBracket => {
let mut found = false;
while let Some((_, t)) = stack.pop() {
match t {
Tensor(size) => {
found = true;
output.push(Tensor(Some(size.unwrap_or(0) + 1)));
break;
}
_ => output.push(t),
}
}
if !found {
return Err(RPNError::MismatchedRBracket(index));
}
}
Comma => {
let mut found = false;
while let Some((i, t)) = stack.pop() {
match t {
LParen => {
return Err(RPNError::UnexpectedComma(index));
}
Func(name, nargs) => {
found = true;
stack.push((i, Func(name, Some(nargs.unwrap_or(0) + 1))));
break;
}
Tensor(size) => {
found = true;
stack.push((i, Tensor(Some(size.unwrap_or(0) + 1))));
break;
}
_ => output.push(t),
}
}
if !found {
return Err(RPNError::UnexpectedComma(index));
}
}
Tensor(Some(0)) => output.push(token),
Tensor(..) => stack.push((index, token)),
Func(_, Some(0)) => output.push(token),
Func(..) => stack.push((index, token)),
_ => {}
}
}
while let Some((index, token)) = stack.pop() {
match token {
Unary(_) | Binary(_) => output.push(token),
Func(_, None) => output.push(token),
Tensor(None) => output.push(token),
LParen | Func(..) => return Err(RPNError::MismatchedLParen(index)),
_ => panic!("Unexpected token on stack."),
}
}
// verify rpn
let mut n_operands = 0isize;
for (index, token) in output.iter().enumerate() {
match *token {
Var(_) | Number(_) => n_operands += 1,
Unary(_) => (),
Binary(_) => n_operands -= 1,
Func(_, None) => continue,
Func(_, Some(n_args)) => n_operands -= n_args as isize - 1,
Tensor(None) => continue,
Tensor(Some(size)) => n_operands -= size as isize - 1,
_ => panic!("Nothing else should be here"),
}
if n_operands <= 0 {
return Err(RPNError::NotEnoughOperands(index));
}
}
if n_operands > 1 {
return Err(RPNError::TooManyOperands);
}
output.shrink_to_fit();
Ok(output)
}
pub fn rpn_to_infix(input: &[Token]) -> Result<Vec<Token>, RPNError> {
use self::Associativity::*;
use crate::Operation::*;
use crate::Token::*;
if input.is_empty() {
return Ok(vec![]);
}
let mut stack = Vec::with_capacity(input.len());
for (index, token) in input.iter().enumerate() {
let token = token.clone();
match token {
Number(_) | Var(_) => {
let pa = prec_assoc(&token);
stack.push((vec![token], pa));
}
Tensor(nargs) => {
let nargs = nargs.unwrap_or(0);
let pa = prec_assoc(&token);
let mut infix = vec![RBracket];
for i in 0..nargs {
if stack.is_empty() {
return Err(RPNError::NotEnoughOperands(index));
}
let mut argu = stack.pop().unwrap().0;
if i >= 1 {
argu.push(Comma);
}
argu.append(&mut infix);
infix = argu;
}
infix.insert(0, token);
stack.push((infix, pa));
}
Unary(_) => {
if stack.is_empty() {
return Err(RPNError::NotEnoughOperands(index));
}
let (i, assoc) = stack.last().unwrap().1;
let mut infix1 = stack.pop().unwrap().0;
let pa = prec_assoc(&token);
match assoc {
NA => {
if i < pa.0 && i != 0 {
infix1.insert(0, LParen);
infix1.push(RParen);
}
}
_ => {
if i <= pa.0 && i != 0 {
infix1.insert(0, LParen);
infix1.push(RParen);
}
}
}
infix1.insert(0, token);
stack.push((infix1, pa));
}
Binary(op) => {
let pa = prec_assoc(&token);
let prec = pa.0;
if stack.is_empty() {
return Err(RPNError::NotEnoughOperands(index));
}
let (precr, assocr) = stack.last().unwrap().1; //右边
let mut infixr = stack.pop().unwrap().0;
if stack.is_empty() {
return Err(RPNError::NotEnoughOperands(index));
}
let (precl, _) = stack.last().unwrap().1; //左边
let mut infixl = stack.pop().unwrap().0;
// let mut laddparen = false;
let mut raddparen = false;
match op {
Plus | Times => {
// 对于+和*,同级运算符不需要加括号
if precl < prec && precl != 0 {
infixl.insert(0, LParen);
infixl.push(RParen);
// laddparen = true;
}
if precr < prec && precr != 0 {
infixr.insert(0, LParen);
infixr.push(RParen);
raddparen = true;
}
}
Pow => {
// 字符串的次幂用普通括号
if precl <= prec && precl != 0 {
infixl.insert(0, LParen);
infixl.push(RParen);
// laddparen = true;
}
if precr < prec && precr != 0 {
infixr.insert(0, LParen);
infixr.push(RParen);
raddparen = true;
}
}
_ => {
if precl < prec && precl != 0 {
infixl.insert(0, LParen);
infixl.push(RParen);
}
if precr <= prec && precr != 0 {
infixr.insert(0, LParen);
infixr.push(RParen);
raddparen = true;
}
}
}
// 左边的单目加括号
// if !laddparen && matches!(assocl,NA) && precl == 13 {
// infixl.insert(0, LParen);
// infixl.push(RParen);
// }
// 右边的单目加括号,单目的优先级较高,但在数学式子中习惯加括号,如-a+-b习惯写作-a+(-b)
if !raddparen && matches!(assocr, NA) && precr == 13 {
infixr.insert(0, LParen);
infixr.push(RParen);
}
infixl.push(token);
infixl.append(&mut infixr);
stack.push((infixl, pa));
}
Func(_, nargs) => {
let nargs = nargs.unwrap_or(0);
let pa = prec_assoc(&token);
let mut infix = vec![RParen];
for i in 0..nargs {
if stack.is_empty() {
return Err(RPNError::NotEnoughOperands(index));
}
let mut argu = stack.pop().unwrap().0;
if i >= 1 {
argu.push(Comma);
}
argu.append(&mut infix);
infix = argu;
}
infix.insert(0, token);
stack.push((infix, pa));
}
_ => {}
}
}
if stack.len() != 1 {
return Err(RPNError::TooManyOperands);
}
let output = stack.pop().unwrap().0;
Ok(output)
}
pub fn rpn_to_string(input: &[Token]) -> Result<String, RPNError> {
let mut output = String::new();
let infix = rpn_to_infix(input)?;
for token in infix.iter() {
match token {
Token::Binary(op) => match op {
Operation::Plus => output.push('+'),
Operation::Minus => output.push('-'),
Operation::Times => output.push('*'),
Operation::Div => output.push('/'),
Operation::Rem => output.push('%'),
Operation::Pow => output.push('^'),
Operation::Equal => output.push_str("=="),
Operation::Unequal => output.push_str("!="),
Operation::LessThan => output.push('<'),
Operation::GreatThan => output.push('>'),
Operation::LtOrEqual => output.push_str("<="),
Operation::GtOrEqual => output.push_str(">="),
Operation::And => output.push_str("&&"),
Operation::Or => output.push_str("||"),
Operation::BitAnd => output.push('&'),
Operation::BitOr => output.push('|'),
Operation::BitXor => output.push_str("^^"),
Operation::BitShl => output.push_str("<<"),
Operation::BitShr => output.push_str(">>"),
Operation::BitAt => output.push('@'),
_ => output.push_str("Unsupported"),
},
Token::Unary(op) => match op {
Operation::Not => output.push_str("~~"),
Operation::BitNot => output.push('~'),
Operation::Fact => output.push('!'),
Operation::Plus => output.push('+'),
Operation::Minus => output.push('-'),
_ => output.push_str("Unsupported"),
},
Token::LParen => output.push('('),
Token::RParen => output.push(')'),
Token::Comma => output.push(','),
Token::Number(n) => output.push_str(&format!("{}", n)),
Token::Var(v) => output.push_str(&v.to_string()),
Token::Func(func, _) => output.push_str(&format!("{}(", func)),
Token::Tensor(_) => output.push('['),
Token::RBracket => output.push(']'),
Token::BigLParen => output.push('{'),
Token::BigRParen => output.push('}'),
}
}
Ok(output)
}
pub fn rpn_to_infix_latex(input: &[Token]) -> Result<Vec<Token>, RPNError> {
// 用于latex的中缀表达式,不同于rpn_to_infix,这里的pow使用{}包裹,除法均改为分式。
use self::Associativity::*;
use crate::Operation::*;
use crate::Token::*;
if input.is_empty() {
return Ok(vec![]);
}
let mut stack = Vec::with_capacity(input.len());
for (index, token) in input.iter().enumerate() {
let token = token.clone();
match token {
Number(_) | Var(_) => {
let pa = prec_assoc(&token);
stack.push((vec![token], pa));
}
Tensor(nargs) => {
let nargs = nargs.unwrap_or(0);
let pa = prec_assoc(&token);
let mut infix = vec![RBracket];
for i in 0..nargs {
if stack.is_empty() {
return Err(RPNError::NotEnoughOperands(index));
}
let mut argu = stack.pop().unwrap().0;
if i >= 1 {
argu.push(Comma);
}
argu.append(&mut infix);
infix = argu;
}
infix.insert(0, token);
stack.push((infix, pa));
}
Unary(op) => {
if stack.is_empty() {
return Err(RPNError::NotEnoughOperands(index));
}
let (i, assoc) = stack.last().unwrap().1;
let mut infix1 = stack.pop().unwrap().0;
let pa = prec_assoc(&token);
match assoc {
NA => {
if i < pa.0 && i != 0 {
infix1.insert(0, LParen);
infix1.push(RParen);
}
}
_ => {
if i <= pa.0 && i != 0 {
infix1.insert(0, LParen);
infix1.push(RParen);
}
}
}
match op {
Plus | Minus | Not | BitNot => infix1.insert(0, token),
Fact => infix1.push(token),
_ => unimplemented!(),
}
stack.push((infix1, pa));
}
Binary(op) => {
let pa = prec_assoc(&token);
let prec = pa.0;
if stack.is_empty() {
return Err(RPNError::NotEnoughOperands(index));
}
let (precr, assocr) = stack.last().unwrap().1; //右边
let mut infixr = stack.pop().unwrap().0;
if stack.is_empty() {
return Err(RPNError::NotEnoughOperands(index));
}
let (precl, _) = stack.last().unwrap().1; //左边
let mut infixl = stack.pop().unwrap().0;
// let mut laddparen = false;
let mut raddparen = false;
match op {
Plus | Times => {
// 对于+和*,同级运算符不需要加括号
if precl < prec && precl != 0 {
infixl.insert(0, LParen);
infixl.push(RParen);
// laddparen = true;
}
if precr < prec && precr != 0 {
infixr.insert(0, LParen);
infixr.push(RParen);
raddparen = true;
}
}
Div => {
// \frac{l}{r} 除法不加括号,直接形成分式
infixl.insert(0, Binary(Div)); //类似函数的逻辑,先把\frac{放进去,再把分子分母放进去
infixl.push(BigRParen);
infixr.insert(0, BigLParen);
infixr.push(BigRParen);
infixl.append(&mut infixr);
stack.push((infixl, (prec, assocr)));
continue;
}
Pow => {
// latex的次幂用大括号
if precl <= prec && precl != 0 {
infixl.insert(0, LParen);
infixl.push(RParen);
// laddparen = true;
}
infixr.insert(0, BigLParen);
infixr.push(BigRParen);
raddparen = true;
}
_ => {
if precl < prec && precl != 0 {
infixl.insert(0, LParen);
infixl.push(RParen);
}
if precr <= prec && precr != 0 {
infixr.insert(0, LParen);
infixr.push(RParen);
raddparen = true;
}
}
}
// 左边的单目加括号
// if !laddparen && matches!(assocl,NA) && precl == 13 {
// infixl.insert(0, LParen);
// infixl.push(RParen);
// }
// 右边的单目加括号,单目的优先级较高,但在数学式子中习惯加括号,如-a+-b习惯写作-a+(-b)
if !raddparen && matches!(assocr, NA) && precr == 13 {
infixr.insert(0, LParen);
infixr.push(RParen);
}
infixl.push(token);
infixl.append(&mut infixr);
stack.push((infixl, pa));
}
Func(_, nargs) => {
let nargs = nargs.unwrap_or(0);
let pa = prec_assoc(&token);
let mut infix = vec![RParen];
for i in 0..nargs {
if stack.is_empty() {
return Err(RPNError::NotEnoughOperands(index));
}
let mut argu = stack.pop().unwrap().0;
if i >= 1 {
argu.push(Comma);
}
argu.append(&mut infix);
infix = argu;
}
infix.insert(0, token);
stack.push((infix, pa));
}
_ => {}
}
}
if stack.len() != 1 {
return Err(RPNError::TooManyOperands);
}
let output = stack.pop().unwrap().0;
Ok(output)
}
pub fn rpn_to_latex(input: &[Token]) -> Result<String, RPNError> {
let mut output = String::new();
let infix = rpn_to_infix_latex(input)?;
for (_, token) in infix.iter().enumerate() {
match token {
Token::Binary(op) => {
match op {
Operation::Plus => output.push('+'),
Operation::Minus => output.push('-'),
Operation::Times => output.push_str("\\times "),
Operation::Div => output.push_str("\\frac{"), // 除法
Operation::Rem => output.push_str("\\mid "),
Operation::Pow => output.push('^'),
Operation::Equal => output.push('='),
Operation::Unequal => output.push_str("\\neq "),
Operation::LessThan => output.push('<'),
Operation::GreatThan => output.push('>'),
Operation::LtOrEqual => output.push_str("\\le "),
Operation::GtOrEqual => output.push_str("\\ge "),
Operation::And => output.push_str("\\&\\&"),
Operation::Or => output.push_str("\\parallel "),
Operation::BitAnd => output.push_str("\\And "),
Operation::BitOr => output.push('|'),
Operation::BitXor => output.push_str("\\oplus "),
Operation::BitShl => output.push_str("<<"),
Operation::BitShr => output.push_str(">>"),
Operation::BitAt => output.push('@'),
_ => output.push_str("Unsupported"),
}
}
Token::Unary(op) => match op {
Operation::Not => output.push_str("~~"), //todo: here is a bug
Operation::BitNot => output.push_str("\\sim "),
Operation::Fact => output.push('!'),
Operation::Plus => output.push('+'),
Operation::Minus => output.push('-'),
_ => output.push_str("Unsupported"),
},
Token::LParen => output.push('('),
Token::RParen => output.push(')'),
Token::BigLParen => output.push('{'),
Token::BigRParen => output.push('}'),
Token::Comma => output.push(','),
Token::Number(n) => output.push_str(&format!("{}", n)),
Token::Var(v) => output.push_str(&v.replace('_', "\\_")),
Token::Func(func, _) => output.push_str(&format!("{}(", func)),
Token::Tensor(_) => output.push('['),
Token::RBracket => output.push(']'),
}
}
Ok(output)
}
\ No newline at end of file
//! Tokenizer that converts a mathematical expression in a string form into a series of `Token`s.
//!
//! The underlying parser is build using the [nom] parser combinator crate.
//!
//! The parser should tokenize only well-formed expressions.
//!
//! [nom]: https://crates.io/crates/nom
//!
use std;
use std::fmt;
use std::fmt::{Display, Formatter};
use std::str::from_utf8;
use nom::branch::alt;
use nom::bytes::complete::tag;
use nom::character::complete::{digit0, digit1, multispace0};
use nom::combinator::{complete, map, map_res, opt};
use nom::error::{Error, ErrorKind};
use nom::sequence::{delimited, preceded, terminated};
use nom::{IResult, Parser};
use crate::{ParseError, Token, Operation};
#[derive(Debug, Clone, Copy)]
enum TokenizerState {
// accept any token that is an expression from the left: var, num, (, negpos
LExpr,
// accept any token that needs an expression on the left: fact, binop, ), comma
AfterRExpr,
}
#[derive(Debug, Clone, Copy)]
enum ParenState {
Subexpr,
Func,
Tensor,
}
/// Continuing the trend of starting from the simplest piece and building up,
/// we start by creating a parser for the built-in operator functions.
fn binop(i: &[u8]) -> IResult<&[u8], Token, Error<&[u8]>> {
alt((
// bool操作符
map(tag(">="), |_| Token::Binary(Operation::GtOrEqual)),
map(tag("<="), |_| Token::Binary(Operation::LtOrEqual)),
map(tag("=="), |_| Token::Binary(Operation::Equal)),
map(tag("!="), |_| Token::Binary(Operation::Unequal)),
map(tag("&&"), |_| Token::Binary(Operation::And)),
map(tag("||"), |_| Token::Binary(Operation::Or)),
// 位运算
map(tag("^^"), |_| Token::Binary(Operation::BitXor)),
map(tag("<<"), |_| Token::Binary(Operation::BitShl)),
map(tag(">>"), |_| Token::Binary(Operation::BitShr)),
map(tag(">>"), |_| Token::Binary(Operation::BitShr)),
map(tag("&"), |_| Token::Binary(Operation::BitAnd)),
map(tag("|"), |_| Token::Binary(Operation::BitOr)),
map(tag("@"), |_| Token::Binary(Operation::BitAt)),
// 四则混合运算
map(tag("+"), |_| Token::Binary(Operation::Plus)),
map(tag("-"), |_| Token::Binary(Operation::Minus)),
map(tag("*"), |_| Token::Binary(Operation::Times)),
map(tag("/"), |_| Token::Binary(Operation::Div)),
map(tag("%"), |_| Token::Binary(Operation::Rem)),
map(tag("^"), |_| Token::Binary(Operation::Pow)),
// alt有21个parser的限制,可以通过嵌套alt方法突破
alt((
// bool运算
map(tag(">"), |_| Token::Binary(Operation::GreatThan)),
map(tag("<"), |_| Token::Binary(Operation::LessThan)),
)),
)).parse(i)
}
//
fn lparen(i: &[u8]) -> IResult<&[u8], Token, Error<&[u8]>> {
map(tag("("), |_| Token::LParen).parse(i)
}
fn tensor(i: &[u8]) -> IResult<&[u8], Token, Error<&[u8]>> {
map(tag("["), |_| Token::Tensor(None)).parse(i)
}
fn rparen(i: &[u8]) -> IResult<&[u8], Token, Error<&[u8]>> {
map(tag(")"), |_| Token::RParen).parse(i)
}
fn rbracket(i: &[u8]) -> IResult<&[u8], Token, Error<&[u8]>> {
map(tag("]"), |_| Token::RBracket).parse(i)
}
fn fact(i: &[u8]) -> IResult<&[u8], Token, Error<&[u8]>> {
if !i.starts_with(&b"!="[..]) {
map(tag("!"), |_| Token::Unary(Operation::Fact)).parse(i)
} else {
Err(nom::Err::Error(Error {
input: i,
code: ErrorKind::Tag,
}))
}
}
fn comma(i: &[u8]) -> IResult<&[u8], Token, Error<&[u8]>> {
map(tag(","), |_| Token::Comma).parse(i)
}
fn negpos(i: &[u8]) -> IResult<&[u8], Token, Error<&[u8]>> {
alt((
map(tag("+"), |_| Token::Unary(Operation::Plus)),
map(tag("-"), |_| Token::Unary(Operation::Minus)),
)).parse(i)
}
fn not(i: &[u8]) -> IResult<&[u8], Token, Error<&[u8]>> {
map(tag("~~"), |_| Token::Unary(Operation::Not)).parse(i)
}
fn bitnot(i: &[u8]) -> IResult<&[u8], Token, Error<&[u8]>> {
map(tag("~"), |_| Token::Unary(Operation::BitNot)).parse(i)
}
fn number(i: &[u8]) -> IResult<&[u8], Token, Error<&[u8]>> {
let (left, mut len) = map(digit1, |s: &[u8]| s.len()).parse(i)?;
let (left, s) = opt(preceded(tag("."), map(digit0, |s: &[u8]| s.len() + 1))).parse(left)?;
len += s.unwrap_or(0);
let (mut left, op) = opt(alt((tag("e"), tag("E")))).parse(left)?;
if op.is_some() {
let (l, s) = alt((
preceded(
alt((tag("+"), tag("-"))),
map(digit1, |s: &[u8]| s.len() + 2),
),
map(digit1, |s: &[u8]| s.len() + 1),
)).parse(left)?;
len += s;
left = l;
}
let f_bytes = &i[0..len];
let f = from_utf8(f_bytes).unwrap().parse::<f64>().unwrap();
Ok((left, Token::Number(f)))
}
fn ident(input: &[u8]) -> IResult<&[u8], &[u8], Error<&[u8]>> {
// first character must be 'a'...'z' | 'A'...'Z' | '_'
match input.first().cloned() {
Some(b'a'..=b'z') | Some(b'A'..=b'Z') | Some(b'_') | Some(b'$') => {
let n = input
.iter()
.skip(1)
.take_while(|&&c| matches!(c, b'a'..=b'z' | b'A'..=b'Z' | b'_' | b'0'..=b'9'))
.count();
let (parsed, rest) = input.split_at(n + 1);
Ok((rest, parsed))
}
// support chinese variable name
Some(b'\'') | Some(b'\"')=> {
let start = *input.first().unwrap();
let n = input
.iter()
.skip(1)
.take_while(|&&c| c != start)
.count();
let (parsed, rest) = input.split_at(n + 2);
if parsed.len() == 2 {
Err(nom::Err::Error(Error {
input,
code: ErrorKind::Alpha,
}))
} else {
Ok((rest, &parsed[1..parsed.len()-1]))
}
}
_ => Err(nom::Err::Error(Error {
input,
code: ErrorKind::Alpha,
})),
}
}
fn var(i: &[u8]) -> IResult<&[u8], Token, Error<&[u8]>> {
map(map_res(ident, from_utf8), |s: &str| Token::Var(s.into())).parse(i)
}
fn func(i: &[u8]) -> IResult<&[u8], Token, Error<&[u8]>> {
map(
map_res(
terminated(ident, preceded(multispace0, complete(tag("(")))),
from_utf8,
),
|s: &str| Token::Func(s.into(), None),
).parse(i)
}
fn lexpr(i: &[u8]) -> IResult<&[u8], Token, Error<&[u8]>> {
delimited(
multispace0,
alt((number, func, tensor, var, negpos, lparen, not, bitnot, fact)),
multispace0,
).parse(i)
}
fn after_rexpr(i: &[u8]) -> IResult<&[u8], Token, Error<&[u8]>> {
delimited(
multispace0,
alt((binop, rparen, rbracket)),
multispace0,
).parse(i)
}
fn after_rexpr_no_paren(i: &[u8]) -> IResult<&[u8], Token, Error<&[u8]>> {
delimited(multispace0, binop, multispace0).parse(i)
}
fn after_rexpr_comma(i: &[u8]) -> IResult<&[u8], Token, Error<&[u8]>> {
delimited(
multispace0,
alt((binop, rparen, rbracket, comma)),
multispace0,
).parse(i)
}
pub fn tokenize<S: AsRef<str>>(input: S) -> Result<Vec<Token>, ParseError> {
let mut state = TokenizerState::LExpr;
// number of function arguments left
let mut paren_stack = vec![];
let mut res = vec![];
let input = input.as_ref().as_bytes();
let mut s = input;
while !s.is_empty() {
let r = match (state, paren_stack.last()) {
(TokenizerState::LExpr, _) => lexpr(s),
(TokenizerState::AfterRExpr, None) => after_rexpr_no_paren(s),
(TokenizerState::AfterRExpr, Some(&ParenState::Subexpr)) => after_rexpr(s),
(TokenizerState::AfterRExpr, Some(&ParenState::Func)) => after_rexpr_comma(s),
(TokenizerState::AfterRExpr, Some(&ParenState::Tensor)) => after_rexpr_comma(s),
};
match r {
Ok((rest, t)) => {
match &t {
Token::LParen => {
paren_stack.push(ParenState::Subexpr);
}
Token::Tensor(_) => {
paren_stack.push(ParenState::Tensor);
if let Ok((rest2, _)) = delimited(multispace0, rbracket, multispace0).parse(rest) {
res.push(Token::Tensor(Some(0)));
s = rest2;
paren_stack.pop().expect("The paren_stack is empty!");
state = TokenizerState::AfterRExpr;
continue;
}
}
Token::Func(name, _) => {
paren_stack.push(ParenState::Func);
if let Ok((rest2, _)) = delimited(multispace0, rparen, multispace0).parse(rest) {
res.push(Token::Func(name.clone(), Some(0)));
s = rest2;
paren_stack.pop().expect("The paren_stack is empty!");
state = TokenizerState::AfterRExpr;
continue;
}
}
Token::RParen => {
paren_stack.pop().expect("The paren_stack is empty!");
}
Token::RBracket => {
paren_stack.pop().expect("The bracket_stack is empty!");
}
Token::Var(_) | Token::Number(_) => {
state = TokenizerState::AfterRExpr;
}
Token::Binary(_) | Token::Comma => {
state = TokenizerState::LExpr;
}
_ => {}
}
res.push(t);
s = rest;
}
_ => {
println!(
"Unexpected parse result when parsing `{}` at `{}`: {:?}",
String::from_utf8_lossy(input),
String::from_utf8_lossy(s),
r
);
return Err(ParseError::UnexpectedToken(1, s.len()));
}
}
}
match state {
TokenizerState::LExpr => Err(ParseError::MissingArgument),
_ if !paren_stack.is_empty() => Err(ParseError::MissingRParen(paren_stack.len() as i32)),
_ => Ok(res),
}
}
impl Display for Token {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
Token::Binary(op) => match op {
Operation::Plus => write!(f, "+"),
Operation::Minus => write!(f, "-"),
Operation::Times => write!(f, "\\times "),
Operation::Div => write!(f, "\\div "),
Operation::Rem => write!(f, "\\mid "),
Operation::Pow => write!(f, "^"),
Operation::Fact => write!(f, "!"),
Operation::Equal => write!(f, "=="),
Operation::Unequal => write!(f, "\\neq "),
Operation::LessThan => write!(f, "<"),
Operation::GreatThan => write!(f, ">"),
Operation::LtOrEqual => write!(f, "\\leqslant "),
Operation::GtOrEqual => write!(f, "\\geqslant "),
Operation::And => write!(f, "\\&\\&"),
Operation::Or => write!(f, "\\parallel "),
Operation::BitAnd => write!(f, "\\And "),
Operation::BitOr => write!(f, "|"),
Operation::BitXor => write!(f, "\\oplus "),
Operation::BitShl => write!(f, "<<"),
Operation::BitShr => write!(f, ">>"),
Operation::BitAt => write!(f, "@"),
_ => write!(f, "Unsupported"),
},
Token::Unary(op) => match op {
Operation::Not => write!(f, "!"),
Operation::BitNot => write!(f, "\\sim "),
Operation::Fact => write!(f, "!"),
Operation::Plus => write!(f, "+"),
Operation::Minus => write!(f, "-"),
_ => write!(f, "Unsupported"),
},
Token::LParen => write!(f, "("),
Token::RParen => write!(f, ")"),
Token::RBracket => write!(f, "]"),
Token::Comma => write!(f, ","),
Token::BigLParen => write!(f, "{{"),
Token::BigRParen => write!(f, "}}"),
Token::Number(n) => write!(f, "{}", n),
Token::Var(v) => write!(f, "{}", v),
Token::Func(func, _) => write!(f, "{}(", func),
Token::Tensor(size) => write!(f, "Tensor({:?})", size),
}
}
}
#[cfg(test)]
mod tests {
use nom::error;
use nom::error::ErrorKind::{Alpha, Digit};
use nom::Err::Error;
use crate::ParseError;
use super::*;
#[test]
fn test_binop() {
assert_eq!(
binop(b"+"),
Ok((&b""[..], Token::Binary(Operation::Plus)))
);
assert_eq!(
binop(b"-"),
Ok((&b""[..], Token::Binary(Operation::Minus)))
);
assert_eq!(
binop(b"*"),
Ok((&b""[..], Token::Binary(Operation::Times)))
);
assert_eq!(
binop(b"/"),
Ok((&b""[..], Token::Binary(Operation::Div)))
);
}
#[test]
fn test_number() {
assert_eq!(
number(b"32143"),
Ok((&b""[..], Token::Number(32143f64)))
);
assert_eq!(
number(b"2."),
Ok((&b""[..], Token::Number(2.0f64)))
);
assert_eq!(
number(b"32143.25"),
Ok((&b""[..], Token::Number(32143.25f64)))
);
assert_eq!(
number(b"0.125e9"),
Ok((&b""[..], Token::Number(0.125e9f64)))
);
assert_eq!(
number(b"20.5E-3"),
Ok((&b""[..], Token::Number(20.5E-3f64)))
);
assert_eq!(
number(b"123423e+50"),
Ok((&b""[..], Token::Number(123423e+50f64)))
);
assert_eq!(
number(b""),
Err(Error(error::Error {
input: &b""[..],
code: Digit
}))
);
assert_eq!(
number(b".2"),
Err(Error(error::Error {
input: &b".2"[..],
code: Digit
}))
);
assert_eq!(
number(b"+"),
Err(Error(error::Error {
input: &b"+"[..],
code: Digit
}))
);
assert_eq!(
number(b"e"),
Err(Error(error::Error {
input: &b"e"[..],
code: Digit
}))
);
assert_eq!(
number(b"1E"),
Err(Error(error::Error {
input: &b""[..],
code: Digit
}))
);
assert_eq!(
number(b"1e+"),
Err(Error(error::Error {
input: &b"+"[..],
code: Digit
}))
);
}
#[test]
fn test_var() {
for &s in ["abc", "U0", "_034", "a_be45EA", "aAzZ_"].iter() {
assert_eq!(
var(s.as_bytes()),
Ok((&b""[..], Token::Var(s.into())))
);
}
for &s in ["\'a\'", "\"U0\"", "\"_034\"", "'*'", "\"+\""].iter() {
assert_eq!(
var(s.as_bytes()),
Ok((&b""[..], tokenize(s).unwrap()[0].clone()))
);
}
assert_eq!(
var(b""),
Err(Error(error::Error {
input: &b""[..],
code: Alpha
}))
);
assert_eq!(
var(b"0"),
Err(Error(error::Error {
input: &b"0"[..],
code: Alpha
}))
);
}
#[test]
fn test_func() {
for &s in ["abc(", "u0(", "_034 (", "A_be45EA ("].iter() {
assert_eq!(
func(s.as_bytes()),
Ok((&b""[..], Token::Func(s[0..s.len() - 1].trim().into(), None)))
);
}
assert_eq!(
func(b""),
Err(Error(error::Error {
input: &b""[..],
code: Alpha
}))
);
assert_eq!(
func(b"("),
Err(Error(error::Error {
input: &b"("[..],
code: Alpha
}))
);
assert_eq!(
func(b"0("),
Err(Error(error::Error {
input: &b"0("[..],
code: Alpha
}))
);
}
#[test]
fn test_tokenize() {
use super::Operation::*;
use super::Token::*;
assert_eq!(tokenize("a"), Ok(vec![Var("a".into())]));
assert_eq!(
tokenize("2 +(3--2) "),
Ok(vec![
Number(2f64),
Binary(Plus),
LParen,
Number(3f64),
Binary(Minus),
Unary(Minus),
Number(2f64),
RParen,
])
);
assert_eq!(
tokenize("-2^ ab0 *12 - C_0"),
Ok(vec![
Unary(Minus),
Number(2f64),
Binary(Pow),
Var("ab0".into()),
Binary(Times),
Number(12f64),
Binary(Minus),
Var("C_0".into()),
])
);
assert_eq!(
tokenize("-sin(pi * 3)^ cos(2) / Func2(x, f(y), z) * _buildIN(y)"),
Ok(vec![
Unary(Minus),
Func("sin".into(), None),
Var("pi".into()),
Binary(Times),
Number(3f64),
RParen,
Binary(Pow),
Func("cos".into(), None),
Number(2f64),
RParen,
Binary(Div),
Func("Func2".into(), None),
Var("x".into()),
Comma,
Func("f".into(), None),
Var("y".into()),
RParen,
Comma,
Var("z".into()),
RParen,
Binary(Times),
Func("_buildIN".into(), None),
Var("y".into()),
RParen,
])
);
assert_eq!(
tokenize("2 % 3"),
Ok(vec![Number(2f64), Binary(Rem), Number(3f64)])
);
assert_eq!(
tokenize("1 + !3 + 1"),
Ok(vec![
Number(1f64),
Binary(Plus),
Unary(Fact),
Number(3f64),
Binary(Plus),
Number(1f64),
])
);
assert_eq!(tokenize("3!"), Err(ParseError::UnexpectedToken(1, 1)));
assert_eq!(tokenize("()"), Err(ParseError::UnexpectedToken(1, 1)));
assert_eq!(tokenize(""), Err(ParseError::MissingArgument));
assert_eq!(tokenize("2)"), Err(ParseError::UnexpectedToken(1, 1)));
assert_eq!(tokenize("2^"), Err(ParseError::MissingArgument));
assert_eq!(tokenize("(((2)"), Err(ParseError::MissingRParen(2)));
assert_eq!(tokenize("f(2,)"), Err(ParseError::UnexpectedToken(1, 1)));
assert_eq!(tokenize("f(,2)"), Err(ParseError::UnexpectedToken(1, 3)));
}
#[test]
fn test_func_with_no_para() {
assert_eq!(
tokenize("f()"),
Ok(vec![Token::Func("f".to_string(), Some(0))])
);
assert_eq!(
tokenize("f( )"),
Ok(vec![Token::Func("f".to_string(), Some(0))])
);
assert!(tokenize("f(f2(1), f3())").is_ok());
assert!(tokenize("f(f2(1), f3(), a)").is_ok());
assert!(tokenize("f(a, b, f2(), f3(), c)").is_ok());
assert!(tokenize("-sin(pi * 3)^ cos(2) / Func2(x, f(), z) * _buildIN()").is_ok());
}
#[test]
fn test_show_latex() {
//let test_token = tokenize("x1^2-10*x1+x2^2+8<=5*2").unwrap();
//let test_token = tokenize("max((5*1)*x1+3*x2+2*x3+(10-3)*x4+4*x5)").unwrap();
//let test_token = tokenize("1*3*x2+sin(8-2)*x3 - cos(pi)< 7").unwrap();
//let test_token = tokenize("x1%5+3/3*x2+min(2,5)*x3*2e19 && 1").unwrap();
//let test_token = tokenize("2!").unwrap();
let test_token = tokenize("~x1").unwrap();
println!("{:?}", test_token);
for x in test_token {
println!("{}", x);
}
}
#[test]
fn test_tensor() {
assert_eq!(
tokenize("[3]"),
Ok(vec![Token::Tensor(None), Token::Number(3.), Token::RBracket])
);
assert!(tokenize("[[1,2],[3,4]]").is_ok());
}
}
// flowing should as same as in sparrowzz
use ndarray::{arr1, Array1, Array2, Axis, IxDyn, SliceInfo, SliceInfoElem};
use num_traits::ToPrimitive;
use crate::{FuncEvalError, MyCx, MyF};
use num_complex::Complex64;
pub trait TsLinalgFn {
fn ts_eig(_: &[MyCx]) -> Result<MyCx, FuncEvalError> {
Err(FuncEvalError::UnknownFunction)
}
fn ts_trace(_: &[MyF]) -> Result<MyF, FuncEvalError> {
Err(FuncEvalError::UnknownFunction)
}
fn ts_trace_cx(_: &[MyCx]) -> Result<MyCx, FuncEvalError> {
Err(FuncEvalError::UnknownFunction)
}
}
pub struct TsfnBasic {
}
impl TsfnBasic {
pub fn ts_get(args: &[MyF]) -> Result<MyF, FuncEvalError> {
match &args[0] {
MyF::F64(_) => Err(FuncEvalError::UnknownFunction),
MyF::Tensor(t) => {
if args.len() == 1 {
Ok(MyF::Tensor(t.clone()))
} else {
let mut index = Vec::with_capacity(args.len() - 1);
for (i, arg) in args.iter().enumerate().skip(1) {
match arg {
MyF::F64(f) => index.push(*f as usize),
MyF::Tensor(_) => return Err(FuncEvalError::NumberArgs(i)),
}
}
match t.get(&*index) {
None => Err(FuncEvalError::NumberArgs(0)),
Some(v) => Ok(MyF::F64(*v)),
}
}
}
}
}
pub fn ts_get_cx(args: &[MyCx]) -> Result<MyCx, FuncEvalError> {
match &args[0] {
MyCx::F64(_) => Err(FuncEvalError::UnknownFunction),
MyCx::Tensor(t) => {
if args.len() == 1 {
Ok(MyCx::Tensor(t.clone()))
} else {
let mut index = Vec::with_capacity(args.len() - 1);
for (i, arg) in args.iter().enumerate().skip(1) {
match arg {
MyCx::F64(f) => index.push(f.re as usize),
MyCx::Tensor(_) => return Err(FuncEvalError::NumberArgs(i)),
}
}
match t.get(&*index) {
None => Err(FuncEvalError::NumberArgs(0)),
Some(v) => Ok(MyCx::F64(*v)),
}
}
}
}
}
pub(crate) fn ts_slice(args: &[MyF]) -> Result<MyF, FuncEvalError> {
match &args[0] {
MyF::F64(_) => Err(FuncEvalError::UnknownFunction),
MyF::Tensor(t) => {
if args.len() == 1 {
Ok(MyF::Tensor(t.clone()))
} else {
let mut indices = Vec::with_capacity(args.len() - 1);
for (i, arg) in args.iter().enumerate().skip(1) {
match arg {
MyF::F64(f) => {
let s = SliceInfoElem::Index(*f as isize);
indices.push(s);
}
MyF::Tensor(t) => {
let v = t.as_slice().ok_or(FuncEvalError::NumberArgs(i))?;
let s = match v.len() {
0 => {
SliceInfoElem::NewAxis
}
1 => {
SliceInfoElem::Slice {
start: v[0] as isize,
end: None,
step: 1,
}
}
2 => {
SliceInfoElem::Slice {
start: v[0] as isize,
end: Some(v[1] as isize),
step: 1,
}
}
3 => {
SliceInfoElem::Slice {
start: v[0] as isize,
end: Some(v[1] as isize),
step: v[2] as isize,
}
}
_ => {
return Err(FuncEvalError::NumberArgs(i));
}
};
indices.push(s);
}
}
}
let iter: SliceInfo<Vec<SliceInfoElem>, IxDyn, IxDyn> = SliceInfo::try_from(indices).map_err(
|_| FuncEvalError::NumberArgs(0),
)?;
Ok(MyF::Tensor(t.slice(iter).into_dyn().to_owned()))
}
}
}
}
pub(crate) fn ts_slice_cx(args: &[MyCx]) -> Result<MyCx, FuncEvalError> {
match &args[0] {
MyCx::F64(_) => Err(FuncEvalError::UnknownFunction),
MyCx::Tensor(t) => {
if args.len() == 1 {
Ok(MyCx::Tensor(t.clone()))
} else {
let mut indices = Vec::with_capacity(args.len() - 1);
for (i, arg) in args.iter().enumerate().skip(1) {
match arg {
MyCx::F64(f) => {
let s = SliceInfoElem::Index(f.re as isize);
indices.push(s);
}
MyCx::Tensor(t) => {
let v = t.as_slice().ok_or(FuncEvalError::NumberArgs(i))?;
let s = match v.len() {
0 => {
SliceInfoElem::NewAxis
}
1 => {
SliceInfoElem::Slice {
start: v[0].re as isize,
end: None,
step: 1,
}
}
2 => {
SliceInfoElem::Slice {
start: v[0].re as isize,
end: Some(v[1].re as isize),
step: 1,
}
}
3 => {
SliceInfoElem::Slice {
start: v[0].re as isize,
end: Some(v[1].re as isize),
step: v[2].re as isize,
}
}
_ => {
return Err(FuncEvalError::NumberArgs(i));
}
};
indices.push(s);
}
}
}
let iter: SliceInfo<Vec<SliceInfoElem>, IxDyn, IxDyn> = SliceInfo::try_from(indices).map_err(
|_| FuncEvalError::NumberArgs(0),
)?;
Ok(MyCx::Tensor(t.slice(iter).into_dyn().to_owned()))
}
}
}
}
pub fn ts_sum(args: &[MyF]) -> Result<MyF, FuncEvalError> {
match &args[0] {
MyF::F64(f) => Ok(MyF::F64(*f)),
MyF::Tensor(t) => {
match args.len() {
1 => {
if t.ndim() > 1 {
if t.shape().len() == 2 && t.shape()[1] == 1 {
Ok(MyF::F64(t.sum()))
} else {
Ok(MyF::Tensor(t.sum_axis(Axis(0))))
}
} else {
Ok(MyF::F64(t.sum()))
}
},
_ => {
match &args[1] {
MyF::F64(dim_f) => {
match dim_f.to_usize() {
Some(dim) => {
if dim < t.ndim() {
Ok(MyF::Tensor(t.sum_axis(Axis(dim))))
} else {
Err(FuncEvalError::NumberArgs(1))
}
},
None => Err(FuncEvalError::NumberArgs(1)),
}
},
MyF::Tensor(vexdim_f) => {
match vexdim_f.shape().len() {
0 => {
if t.ndim() > 1 {
if t.shape().len() == 2 && t.shape()[1] == 1 {
Ok(MyF::F64(t.sum()))
} else {
Ok(MyF::Tensor(t.sum_axis(Axis(0))))
}
} else {
Ok(MyF::F64(t.sum()))
}
},
1 => {
let mut sum_t = t.clone();
let mut count = 0;
for dim_f in vexdim_f {
match dim_f.to_usize() {
Some(dim) =>
if dim < t.ndim() {
sum_t = sum_t.sum_axis(Axis(dim - count))
} else {
return Err(FuncEvalError::NumberArgs(1))
},
None => return Err(FuncEvalError::NumberArgs(1)),
};
count += 1;
}
Ok(MyF::Tensor(sum_t))
},
_ => Err(FuncEvalError::NumberArgs(1)),
}
},
}
},
}
}
}
}
pub fn ts_sum_cx(args: &[MyCx]) -> Result<MyCx, FuncEvalError> {
match &args[0] {
MyCx::F64(f) => Ok(MyCx::F64(*f)),
MyCx::Tensor(t) => {
match args.len() {
1 => {
if t.ndim() > 1 {
if t.shape().len() == 2 && t.shape()[1] == 1 {
Ok(MyCx::F64(t.sum()))
} else {
Ok(MyCx::Tensor(t.sum_axis(Axis(0))))
}
} else {
Ok(MyCx::F64(t.sum()))
}
},
_ => {
match &args[1] {
MyCx::F64(dim_f) => {
match dim_f.re.to_usize() {
Some(dim) => {
if dim < t.ndim() {
Ok(MyCx::Tensor(t.sum_axis(Axis(dim))))
} else {
Err(FuncEvalError::NumberArgs(1))
}
},
None => Err(FuncEvalError::NumberArgs(1)),
}
},
MyCx::Tensor(vexdim_f) => {
match vexdim_f.shape().len() {
0 => {
if t.ndim() > 1 {
if t.shape().len() == 2 && t.shape()[1] == 1 {
Ok(MyCx::F64(t.sum()))
} else {
Ok(MyCx::Tensor(t.sum_axis(Axis(0))))
}
} else {
Ok(MyCx::F64(t.sum()))
}
},
1 => {
let mut sum_t = t.clone();
for (count, dim_f) in vexdim_f.iter().enumerate() {
match dim_f.re.to_usize() {
Some(dim) =>
if dim < t.ndim() {
sum_t = sum_t.sum_axis(Axis(dim - count))
} else {
return Err(FuncEvalError::NumberArgs(1))
},
None => return Err(FuncEvalError::NumberArgs(1)),
};
}
Ok(MyCx::Tensor(sum_t))
},
_ => Err(FuncEvalError::NumberArgs(1)),
}
},
}
},
}
}
}
}
pub fn ts_power(args: &[MyF]) -> Result<MyF, FuncEvalError> {
match args.len() {
0 => Err(FuncEvalError::TooFewArguments),
1 => Err(FuncEvalError::TooFewArguments),
2 => {
match &args[1] {
MyF::F64(b) => {
match &args[0] {
MyF::F64(f) => Ok(MyF::F64(f.powf(*b))),
MyF::Tensor(t) => Ok(MyF::Tensor(t.mapv(|a| a.powf(*b)))),
}
},
MyF::Tensor(_) => Err(FuncEvalError::NumberArgs(2)),
}
},
_ => Err(FuncEvalError::TooManyArguments),
}
}
pub fn ts_power_cx(args: &[MyCx]) -> Result<MyCx, FuncEvalError> {
match args.len() {
0 => Err(FuncEvalError::TooFewArguments),
1 => Err(FuncEvalError::TooFewArguments),
2 => {
match &args[1] {
MyCx::F64(b) => {
match &args[0] {
MyCx::F64(f) => Ok(MyCx::F64(f.powc(*b))),
MyCx::Tensor(t) => Ok(MyCx::Tensor(t.mapv(|a| a.powc(*b)))),
}
},
MyCx::Tensor(_) => Err(FuncEvalError::NumberArgs(2)),
}
},
_ => Err(FuncEvalError::TooManyArguments),
}
}
pub fn ts_diag(args: &[MyF]) -> Result<MyF, FuncEvalError> {
match &args[0] {
MyF::F64(f) => Ok(MyF::F64(*f)),
MyF::Tensor(t) => {
if t.ndim() > 1 {
if t.shape().len() == 2 && (t.shape()[0] == 1 || t.shape()[1] == 1) {
Ok(MyF::Tensor(Array2::from_diag(&arr1(t.clone().into_raw_vec_and_offset().0.as_slice())).into_dyn()))
} else {
Ok(MyF::Tensor(t.diag().into_dyn().to_owned()))
}
} else {
Ok(MyF::Tensor(Array2::from_diag(&arr1(t.clone().into_raw_vec_and_offset().0.as_slice())).into_dyn()))
}
}
}
}
pub(crate) fn ts_diag_cx(args: &[MyCx]) -> Result<MyCx, FuncEvalError> {
match &args[0] {
MyCx::F64(f) => Ok(MyCx::F64(*f)),
MyCx::Tensor(t) => {
if t.ndim() > 1 {
if t.shape().len() == 2 && t.shape()[1] == 1 {
Ok(MyCx::Tensor(Array2::from_diag(&arr1(t.clone().into_raw_vec_and_offset().0.as_slice())).into_dyn()))
} else {
Ok(MyCx::Tensor(t.diag().into_dyn().to_owned()))
}
} else {
Ok(MyCx::Tensor(Array2::from_diag(&arr1(t.clone().into_raw_vec_and_offset().0.as_slice())).into_dyn()))
}
}
}
}
pub fn ts_eye(args: &[MyF]) -> Result<MyF, FuncEvalError> {
match &args[0] {
MyF::F64(f) => {
if *f < 1. {
Err(FuncEvalError::NumberArgs(0))
} else {
Ok(MyF::Tensor(Array2::eye(f.to_usize().unwrap()).into_dyn()))
}
},
MyF::Tensor(_) => Err(FuncEvalError::NumberArgs(0)),
}
}
pub fn ts_eye_cx(args: &[MyCx]) -> Result<MyCx, FuncEvalError> {
match &args[0] {
MyCx::F64(f) => {
if f.re < 1. {
Err(FuncEvalError::NumberArgs(0))
} else {
Ok(MyCx::Tensor(Array2::eye(f.re.to_usize().unwrap()).into_dyn()))
}
},
MyCx::Tensor(_) => Err(FuncEvalError::NumberArgs(0)),
}
}
pub fn ts_zeros(args: &[MyF]) -> Result<MyF, FuncEvalError> {
match &args[0] {
MyF::F64(f1) => {
match &args[1] {
MyF::F64(f2) => {
if *f1 < 1. {
Err(FuncEvalError::NumberArgs(0))
} else if *f2 < 1. {
Err(FuncEvalError::NumberArgs(1))
} else {
Ok(MyF::Tensor(Array2::zeros([f1.to_usize().unwrap(), f2.to_usize().unwrap()]).into_dyn()))
}
}
MyF::Tensor(_) => Err(FuncEvalError::NumberArgs(1)),
}
},
MyF::Tensor(_) => Err(FuncEvalError::NumberArgs(0)),
}
}
pub fn ts_zeros_cx(args: &[MyCx]) -> Result<MyCx, FuncEvalError> {
match &args[0] {
MyCx::F64(f1) => {
match &args[1] {
MyCx::F64(f2) => {
if f1.re < 1. {
Err(FuncEvalError::NumberArgs(0))
} else if f2.re < 1. {
Err(FuncEvalError::NumberArgs(1))
} else {
Ok(MyCx::Tensor(Array2::zeros([f1.re.to_usize().unwrap(), f2.re.to_usize().unwrap()]).into_dyn()))
}
}
MyCx::Tensor(_) => Err(FuncEvalError::NumberArgs(1)),
}
},
MyCx::Tensor(_) => Err(FuncEvalError::NumberArgs(0)),
}
}
pub fn ts_ones(args: &[MyF]) -> Result<MyF, FuncEvalError> {
match &args[0] {
MyF::F64(f1) => {
match &args[1] {
MyF::F64(f2) => {
if *f1 < 1. {
Err(FuncEvalError::NumberArgs(0))
} else if *f2 < 1. {
Err(FuncEvalError::NumberArgs(1))
} else {
Ok(MyF::Tensor(Array2::ones([f1.to_usize().unwrap(), f2.to_usize().unwrap()]).into_dyn()))
}
}
MyF::Tensor(_) => Err(FuncEvalError::NumberArgs(1)),
}
},
MyF::Tensor(_) => Err(FuncEvalError::NumberArgs(0)),
}
}
pub fn ts_ones_cx(args: &[MyCx]) -> Result<MyCx, FuncEvalError> {
match &args[0] {
MyCx::F64(f1) => {
match &args[1] {
MyCx::F64(f2) => {
if f1.re < 1. {
Err(FuncEvalError::NumberArgs(0))
} else if f2.re < 1. {
Err(FuncEvalError::NumberArgs(1))
} else {
Ok(MyCx::Tensor(Array2::ones([f1.re.to_usize().unwrap(), f2.re.to_usize().unwrap()]).into_dyn()))
}
}
MyCx::Tensor(_) => Err(FuncEvalError::NumberArgs(1)),
}
},
MyCx::Tensor(_) => Err(FuncEvalError::NumberArgs(0)),
}
}
pub fn ts_range(args: &[MyF]) -> Result<MyF, FuncEvalError> {
match args.len() {
0 => Err(FuncEvalError::TooFewArguments),
1 => Err(FuncEvalError::TooFewArguments),
2 => Err(FuncEvalError::TooFewArguments),
3 => {
match &args[0] {
MyF::F64(start) => {
match &args[1] {
MyF::F64(end) => {
match &args[2] {
MyF::F64(step) => {
Ok(MyF::Tensor(Array1::range(*start, *end, *step).into_dyn()))
}
MyF::Tensor(_) => Err(FuncEvalError::NumberArgs(2)),
}
},
MyF::Tensor(_) => Err(FuncEvalError::NumberArgs(1)),
}
},
MyF::Tensor(_) => Err(FuncEvalError::NumberArgs(0)),
}
},
_ => Err(FuncEvalError::TooManyArguments),
}
}
pub fn ts_sparse(args: &[MyF]) -> Result<MyF, FuncEvalError> {
if args.len() < 5 {
return Err(FuncEvalError::TooFewArguments)
} else if args.len() > 5 {
return Err(FuncEvalError::TooManyArguments)
}
match &args[0] {
MyF::F64(_) => Err(FuncEvalError::NumberArgs(0)),
MyF::Tensor(i) => {
match &args[1] {
MyF::F64(_) => Err(FuncEvalError::NumberArgs(1)),
MyF::Tensor(j) => {
match &args[2] {
MyF::F64(_) => Err(FuncEvalError::NumberArgs(2)),
MyF::Tensor(v) => {
match &args[3] {
MyF::F64(m) => {
match &args[4] {
MyF::F64(n) => {
if *m < 1. {
Err(FuncEvalError::NumberArgs(3))
} else if *n < 1. {
Err(FuncEvalError::NumberArgs(4))
} else {
let mut eq_size = i.len() == j.len();
if j.len() != v.len() {
eq_size = false;
}
if eq_size {
let mut matrix = Array2::zeros([m.to_usize().unwrap(), n.to_usize().unwrap()]);
let i_vec = i.to_owned().into_raw_vec_and_offset().0;
let j_vec = j.to_owned().into_raw_vec_and_offset().0;
let v_vec = v.to_owned().into_raw_vec_and_offset().0;
for k in 0..i.len() {
if i_vec[k] >= *m {
return Err(FuncEvalError::NumberArgs(0))
}
if j_vec[k] >= *n {
return Err(FuncEvalError::NumberArgs(1))
}
*matrix.get_mut([i_vec[k].to_usize().unwrap(), j_vec[k].to_usize().unwrap()]).unwrap() = v_vec[k];
}
Ok(MyF::Tensor(matrix.into_dyn()))
} else {
Err(FuncEvalError::NumberArgs(0))
}
}
}
MyF::Tensor(_) => Err(FuncEvalError::NumberArgs(4)),
}
},
MyF::Tensor(_) => Err(FuncEvalError::NumberArgs(3)),
}
}
}
},
}
},
}
}
pub fn ts_size(args: &[MyF]) -> Result<MyF, FuncEvalError> {
match &args[0] {
MyF::Tensor(t) => {
Ok(MyF::Tensor(Array1::from_vec(t.shape().iter().map(|e| e.to_f64().unwrap()).collect::<Vec<f64>>()).into_dyn()))
},
MyF::F64(_) => Err(FuncEvalError::NumberArgs(0)),
}
}
pub fn ts_size_cx(args: &[MyCx]) -> Result<MyCx, FuncEvalError> {
match &args[0] {
MyCx::Tensor(t) => {
Ok(MyCx::Tensor(Array1::from_vec(t.shape().iter().map(|e| Complex64::new(e.to_f64().unwrap(), 0.)).collect::<Vec<Complex64>>()).into_dyn()))
},
MyCx::F64(_) => Err(FuncEvalError::NumberArgs(0)),
}
}
}
// above should as same as in sparrowzz
\ No newline at end of file
[package]
name = "mems"
version = "0.1.0"
edition.workspace = true
rust-version.workspace = true
authors = ["dongshufeng <dongshufeng@zju.edu.cn>"]
[dependencies]
serde = { version = "1.0", features = ["derive"] }
ciborium = "0.2"
arrow-schema = { version = "56.1", features = ["serde"] }
# this project
eig-domain = { path = "../eig-domain" }
[package]
name = "ds-guizhou"
version = "0.1.0"
edition = "2021"
[lib]
crate-type = ["cdylib"]
[dependencies]
log = "0.4"
ciborium = "0.2"
arrow-schema = { version = "56.1", features = ["serde"] }
mems = { path = "../.." }
csv = "1.3.0"
bytes = "1.10"
chrono = "0.4.38"
chrono-tz = "0.10"
use arrow_schema::{DataType, TimeUnit, Field, Schema};
use bytes::{Buf, BufMut, BytesMut};
use log::info;
use mems::model::{PluginInput, PluginOutput};
use chrono::{Days, Duration, Timelike, Utc};
use chrono_tz::Asia::Shanghai;
static mut OUTPUT: Vec<u8> = vec![];
#[no_mangle]
pub unsafe fn run(ptr: i32, len: u32) -> u64 {
info!("Read plugin input firstly");
// 从内存中获取字符串
let input = unsafe {
let slice = std::slice::from_raw_parts(ptr as _, len as _);
let input: PluginInput = ciborium::from_reader(slice).unwrap();
input
};
let schema = Schema::new(vec![
Field::new("datetime", DataType::Timestamp(TimeUnit::Millisecond, Some("Asia/Shanghai".into())), false),
Field::new("value", DataType::Float64, false),
]);
let mut csv_str = String::from("datetime,value\n");
let mut rdr = csv::ReaderBuilder::new().has_headers(true).from_reader(&*input.bytes);
let records = rdr.records();
let mut values = vec![];
for record in records {
if let Ok(f) = record {
let s = f.get(0).unwrap().trim();
let value = s.parse::<f64>().unwrap();
values.push(value);
}
}
let now = Utc::now().with_timezone(&Shanghai);
let today = now.date_naive();
let starttime = if values.len() >= 24 {
let startday = if now.hour() < 1 {
today
} else {
today.checked_add_days(Days::new(1)).unwrap()
};
startday.and_hms_opt(0, 0, 0)
.unwrap()
.and_local_timezone(Shanghai)
.unwrap()
} else {
let minutes = now.minute() / 15 * 15;
let time0 = today.and_hms_opt(now.hour(), minutes, 0)
.unwrap()
.and_local_timezone(Shanghai)
.unwrap();
time0 + Duration::minutes(15)
};
for (i, value) in values.into_iter().enumerate() {
let date = starttime + Duration::minutes((15 * i) as i64);
let date_str = date.format("%Y-%m-%d %H:%M:%S");
csv_str.push_str(&format!("{date_str}, {value}\n"));
}
let csv_bytes = vec![("".to_string(), csv_str.into_bytes())];
let output = PluginOutput {
error_msg: None,
schema: Some(vec![schema]),
csv_bytes,
};
#[allow(static_mut_refs)]
ciborium::into_writer(&output, &mut OUTPUT).unwrap();
let offset = OUTPUT.as_ptr() as i32;
let len = OUTPUT.len() as u32;
let mut bytes = BytesMut::with_capacity(8);
bytes.put_i32(offset);
bytes.put_u32(len);
return bytes.get_u64();
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test() {
let now = Utc::now().with_timezone(&Shanghai);
let today = now.date_naive();
let startday = if now.hour() < 1 {
today
} else {
today.checked_add_days(chrono::Days::new(1)).unwrap()
};
let a = startday.and_hms_opt(0, 0, 0)
.unwrap()
.and_local_timezone(Shanghai)
.unwrap();
println!("{:?}", a);
}
}
\ No newline at end of file
# 插件化开发实例之配电网潮流计算软件
## 1. 项目简介
### 1.1 项目背景
## 2. 拓扑分析
### 2.1 静态拓扑分析
#### 输入
1. 电气岛
2. 属性定义
3. 资源定义
#### 输出
- static_topo,如果输出的边是该名称,或者输出的边不是下面两种情况,输出静态拓扑,即下面六列
<table>
<th>source</th>
<th>target</th>
<th>id</th>
<th>type</th>
<th>open</th>
<th>name</th>
<tr>
<td>UInt64</td>
<td>UInt64</td>
<td>UInt64</td>
<td>UInt32</td>
<td>Boolean</td>
<td>Utf8</td>
</tr>
</table>
- terminal_cn_dev,如果输出的边是该名称,则输出下面几列
<table>
<th>terminal</th>
<th>cn</th>
<th>dev</th>
<th>type</th>
<tr>
<td>UInt64</td>
<td>UInt64</td>
<td>UInt64</td>
<td>UInt32</td>
</tr>
</table>
- point_terminal_phase: 如果输出的边是该名称,则输出下面几列
<table>
<th>point</th>
<th>terminal</th>
<th>phase</th>
<tr>
<td>UInt64</td>
<td>UInt64</td>
<td>Utf8</td>
</tr>
</table>
### 2.2 动态拓扑分析
#### 输入
- 电气岛
- 量测
- 静态拓扑:上述输出的三个表格
#### 输出
- dyn_topo: 如果输出的边是该名称,或者不是下面的情况,默认输出下面几列
<table>
<th>cn</th>
<th>tn</th>
<tr>
<td>UInt64</td>
<td>UInt64</td>
</tr>
</table>
- dev_topo: 如果输出的边是该名称,则输出下面几列
<table>
<th>terminal</th>
<th>cn</th>
<th>tn</th>
<th>dev</th>
<tr>
<td>UInt64</td>
<td>UInt64</td>
<td>UInt64</td>
<td>UInt64</td>
</tr>
</table>
## 3. 输入参数准备
### 3.1 设备电气参数计算
#### 输入
- 电气岛
- 配置表格,第1列是config的key,第二列是json格式的矩阵
<table>
<th>config</th>
<th>ohm_per_km</th>
<tr>
<td>Utf8</td>
<td>Utf8</td>
</tr>
</table>
#### 输出
<table>
<th>dev_id</th>
<th>a_re</th>
<th>a_im</th>
<th>b_re</th>
<th>b_im</th>
<th>c_re</th>
<th>c_im</th>
<th>d_re</th>
<th>d_im</th>
<tr>
<td>UInt64</td>
<td>Utf8</td>
<td>Utf8</td>
<td>Utf8</td>
<td>Utf8</td>
<td>Utf8</td>
<td>Utf8</td>
<td>Utf8</td>
<td>Utf8</td>
</tr>
</table>
### 3.2 潮流方程准备脚本
#### shunt_meas
需要输入以下两个表格
- terminal_cn_dev
- point_terminal_phase
输出下面几列
<table>
<th>point</th>
<th>terminal</th>
<th>phase</th>
<tr>
<td>UInt64</td>
<td>UInt64</td>
<td>Utf8</td>
</tr>
</table>
#### tn_input
需要输入以下两个表格
- dev_topo
- shunt_meas
输出下面几列
<table>
<th>tn</th>
<th>phase</th>
<th>unit</th>
<th>value</th>
<tr>
<td>UInt64</td>
<td>Utf8</td>
<td>Utf8</td>
<td>Float64</td>
</tr>
</table>
## 4. 潮流计算
### 4.1 潮流计算模型
\ No newline at end of file
[package]
name = "ds-3phase-pf"
version = "0.1.0"
edition = "2021"
[lib]
crate-type = ["cdylib"]
[dependencies]
serde_json = "1.0"
ciborium = "0.2"
arrow-schema = { version = "56.1", features = ["serde"] }
csv = "1.3.0"
num-complex = "0.4"
ndarray = "0.16"
nalgebra = "0.34"
ds-common = { path = "../ds-common" }
eig-domain = { path = "../../../../eig-domain" }
mems = { path = "../../.." }
bytes = "1.10"
\ No newline at end of file
#![allow(non_snake_case)]
use std::collections::HashMap;
use arrow_schema::{DataType, Field, Schema};
use bytes::{Buf, BufMut, BytesMut};
use ndarray::Array2;
use ds_common::{DEV_CONDUCTOR_DF_NAME, DEV_TOPO_DF_NAME, DS_PF_NLP_CONS, DS_PF_NLP_OBJ, DYN_TOPO_DF_NAME, TN_INPUT_DF_NAME};
use ds_common::dyn_topo::{read_dev_topo, read_dyn_topo};
use ds_common::tn_input::read_tn_input;
use mems::model::{PluginInput, PluginOutput};
use crate::read::read_dev_ohm;
mod read;
mod nlp;
static mut OUTPUT: Vec<u8> = vec![];
#[no_mangle]
pub unsafe fn run(ptr: i32, len: u32) -> u64 {
// 从内存中获取字符串
let input = unsafe {
let slice = std::slice::from_raw_parts(ptr as _, len as _);
let input: PluginInput = ciborium::from_reader(slice).unwrap();
input
};
let from = 0;
let mut error = None;
// get from dynamic topology wasm node
// cn, tn
let mut dyn_topo: Vec<Vec<u64>>;
// terminal, cn, tn, dev
let mut dev_topo: Vec<Vec<u64>>;
// dev id, conductor matrix, get from conductor impedance cal wasm node
let mut dev_conductor: HashMap<u64, Vec<Array2<f64>>>;
// tn id with input
let mut input_tns;
// input pos
let mut input_phases;
// input types
let mut input_types;
// input values
let mut input_values;
for i in 0..input.dfs_len.len() {
let size = input.dfs_len[i] as usize;
let end = from + size;
let mut rdr = csv::ReaderBuilder::new().has_headers(true).from_reader(&input.bytes[from..end]);
let mut records = rdr.records();
// 对第i个边输入该节点的 dataframe 进行处理
if input.dfs[i] == DYN_TOPO_DF_NAME {
match read_dyn_topo(&mut records) {
Ok(v) => dyn_topo = v,
Err(s) => {
error = Some(s);
break;
}
}
} else if input.dfs[i] == DEV_TOPO_DF_NAME {
match read_dev_topo(&mut records) {
Ok(v) => dev_topo = v,
Err(s) => {
error = Some(s);
break;
}
}
} else if input.dfs[i] == DEV_CONDUCTOR_DF_NAME {
match read_dev_ohm(&mut records) {
Ok(v) => dev_conductor = v,
Err(s) => {
error = Some(s);
break;
}
}
} else if input.dfs[i] == TN_INPUT_DF_NAME {
match read_tn_input(&mut records) {
Ok(v) => (input_tns, input_phases, input_types, input_values) = v,
Err(s) => {
error = Some(s);
break;
}
}
}
}
let output = if error.is_some() {
PluginOutput {
error_msg: error,
schema: None,
csv_bytes: vec![],
}
} else {
let mut obj_csv_str = String::from("cn,tn\n");
// build schema
let obj_schema = Schema::new(vec![
Field::new("cn", DataType::UInt64, false),
Field::new("tn", DataType::UInt64, false),
]);
let mut cons_csv_str = String::from("cn,tn\n");
// build schema
let cons_schema = Schema::new(vec![
Field::new("cn", DataType::UInt64, false),
Field::new("tn", DataType::UInt64, false),
]);
let csv_bytes = vec![
(DS_PF_NLP_OBJ.to_string(), obj_csv_str.into_bytes()),
(DS_PF_NLP_CONS.to_string(), cons_csv_str.into_bytes()),
];
PluginOutput {
error_msg: None,
schema: Some(vec![obj_schema, cons_schema]),
csv_bytes,
}
};
ciborium::into_writer(&output, &mut OUTPUT).unwrap();
let offset = OUTPUT.as_ptr() as i32;
let len = OUTPUT.len() as u32;
let mut bytes = BytesMut::with_capacity(8);
bytes.put_i32(offset);
bytes.put_u32(len);
bytes.get_u64()
}
\ No newline at end of file
use std::collections::HashMap;
use ndarray::{Array, array, Array2, Ix2};
use num_complex::{Complex64, ComplexFloat};
use eig_domain::prop::DataUnit;
use mems::model::dev::MeasPhase;
pub fn get_pf_nlp_constraints(
tns: &[u64],
// cn, tn
dyn_topo: Vec<Vec<u64>>,
// terminal, cn, tn, dev
dev_topo: Vec<Vec<u64>>,
dev_matrix: HashMap<u64, Vec<Array2<f64>>>,
input_tns: Vec<u64>,
input_phases: Vec<MeasPhase>,
input_types: Vec<DataUnit>,
input_values: Vec<f64>
) -> Option<Vec<String>> {
let mut constraint = Vec::with_capacity(dyn_topo.len());
// 每个tn的节点注入功率表达式
for tn1 in tns {
let mut power_exp = HashMap::new();
let connect_tn_devs = get_connect_tn_devs(*tn1, &dev_topo);
if connect_tn_devs.len() == 0 {
continue;
}
for tn_dev in connect_tn_devs {
let tn2 = tn_dev[0];
let dev = tn_dev[1];
let matrix = dev_matrix.get(&dev).unwrap();
let r = matrix[0].clone();
let x = matrix[1].clone();
let r_x = array![
[Complex64::new(r[[0, 0]], x[[0, 0]]), Complex64::new(r[[0, 1]], x[[0, 1]]), Complex64::new(r[[0, 2]], x[[0, 2]])],
[Complex64::new(r[[1, 0]], x[[1, 0]]), Complex64::new(r[[1, 1]], x[[1, 1]]), Complex64::new(r[[1, 2]], x[[1, 2]])],
[Complex64::new(r[[2, 0]], x[[2, 0]]), Complex64::new(r[[2, 1]], x[[2, 1]]), Complex64::new(r[[2, 2]], x[[2, 2]])]
];
let (exps, mode) = get_pq_of_acline(r_x, *tn1, tn2).unwrap();
extend_exp_pq_of_acline(&mut power_exp, exps, mode);
}
get_node_pq_load_constraints(&mut constraint, power_exp, *tn1);
}
for i in 0..input_tns.len() {
let tn = input_tns[i];
let v = input_values[i];
match input_types[i] {
DataUnit::W => {
let mut active_exp = String::new();
match input_phases[i] {
MeasPhase::A => active_exp = format!("P_{tn}_A-{v:.4}:[0/0]"),
MeasPhase::B => active_exp = format!("P_{tn}_B-{v:.4}:[0/0]"),
MeasPhase::C => active_exp = format!("P_{tn}_C-{v:.4}:[0/0]"),
_ => continue,
}
constraint.push(active_exp);
}
DataUnit::Var => {
let mut reactive_exp = String::new();
match input_phases[i] {
MeasPhase::A => reactive_exp = format!("Q_{tn}_A-{v:.4}:[0/0]"),
MeasPhase::B => reactive_exp = format!("Q_{tn}_B-{v:.4}:[0/0]"),
MeasPhase::C => reactive_exp = format!("Q_{tn}_C-{v:.4}:[0/0]"),
_ => continue,
}
constraint.push(reactive_exp);
}
DataUnit::V => {
match input_phases[i] {
MeasPhase::Total => {
let v_p = v / 3.0f64.sqrt();
constraint.push(format!("V_{tn}_A-{v_p:.4}:[0/0]"));
constraint.push(format!("D_{tn}_A-0:[0/0]"));
constraint.push(format!("V_{tn}_B-{v_p:.4}:[0/0]"));
constraint.push(format!("D_{tn}_B--2/3*pi:[0/0]"));
constraint.push(format!("V_{tn}_C-{v_p:.4}:[0/0]"));
constraint.push(format!("D_{tn}_C-2/3*pi:[0/0]"));
},
MeasPhase::A => constraint.push(format!("V_{tn}_A-{v:.4}:[0/0]")),
MeasPhase::B => constraint.push(format!("V_{tn}_B-{v:.4}:[0/0]")),
MeasPhase::C => constraint.push(format!("V_{tn}_C-{v:.4}:[0/0]")),
_ => continue,
}
}
_ => {}
}
}
Some(constraint)
}
// 找到连接tn的设备
pub fn get_connect_tn_devs(tn: u64, dev_topo: &Vec<Vec<u64>>) -> Vec<Vec<u64>> {
let mut connect_devs = Vec::new();
for topo in dev_topo {
if topo[2] == tn {
connect_devs.push(topo[3]);
}
}
// tn, dev
let mut connect_tn_devs = Vec::new();
for topo in dev_topo {
if connect_devs.contains(&topo[3]) {
if topo[2] != tn {
connect_tn_devs.push(vec![topo[2], topo[3]]);
}
}
}
connect_tn_devs
}
pub fn get_pf_nlp_variables(tns: &[u64]) -> String {
let mut variable = String::new();
// 生成变量名
for tn in tns {
variable.push_str(&format!("V_{tn}_A:[0/99999999],D_{tn}_A:[-3.2/3.2],\
V_{tn}_B:[0/99999999],D_{tn}_B:[-3.2/3.2],V_{tn}_C:[0/99999999],D_{tn}_C:[-3.2/3.2],\
P_{tn}_A:[-99999999/99999999],P_{tn}_B:[-99999999/99999999],P_{tn}_C:[-99999999/99999999],\
Q_{tn}_A:[-99999999/99999999],Q_{tn}_B:[-99999999/99999999],Q_{tn}_C:[-99999999/99999999],"));
}
variable
}
// 没有办法处理Delta连接的情况,因为Delta load只能先由相电压计算相电流,再推导线电流。即不能用节点三相功率平衡方程。
fn get_pq_of_acline(r_x: Array<Complex64, Ix2>, tn1: u64, tn2: u64) -> Option<(Vec<String>, u32)> {
let mut mode:u32 = 0; //判断相位的模式
if r_x[[0, 0]] != Complex64::new(0.0, 0.0) {
mode += 1;
}
if r_x[[1, 1]] != Complex64::new(0.0, 0.0) {
mode += 2;
}
if r_x[[2, 2]] != Complex64::new(0.0, 0.0) {
mode += 4;
}
let mut result = Vec::new();
// 计算导纳阵
match mode {
// A 或 B 或 C r_x[[2,2]].inv().unwrap()
1 => {
let gb = r_x[[0, 0]].inv();
let g = gb.re();
let b = gb.im();
// P: V1a*(V1a*g-V2a*(g*cos(t1a-t2a)+b*sin(t1a-t2a)))
// Q: V1a*(-V1a*b-V2a*(g*sin(t1a-t2a)-b*cos(t1a-t2a)))
// = -V1a*(V1a*b+V2a*(g*sin(t1a-t2a)-b*cos(t1a-t2a)))
//P_A
result.push(
format!("V_{tn1}_A*(V_{tn1}_A*{g:.4}-V_{tn2}_A*({g:.4}*cos(D_{tn1}_A-D_{tn2}_A)+{b:.4}*sin(D_{tn1}_A-D_{tn2}_A)))")
);
//Q_A
result.push(
format!("-V_{tn1}_A*(V_{tn1}_A*{b:.4}+V_{tn2}_A*({g:.4}*sin(D_{tn1}_A-D_{tn2}_A)-{b:.4}*cos(D_{tn1}_A-D_{tn2}_A)))"),
)
}
2 => {
let gb = r_x[[1, 1]].inv();
let g = gb.re();
let b = gb.im();
//P_B
result.push(
format!("V_{tn1}_B*(V_{tn1}_B*{g:.4}-V_{tn2}_B*({g:.4}*cos(D_{tn1}_B-D_{tn2}_B)+{b:.4}*sin(D_{tn1}_B-D_{tn2}_B)))"),
);
//Q_B
result.push(
format!("-V_{tn1}_B*(V_{tn1}_B*{b:.4}+V_{tn2}_B*({g:.4}*sin(D_{tn1}_B-D_{tn2}_B)-{b:.4}*cos(D_{tn1}_B-D_{tn2}_B)))"),
)
}
4 => {
let gb = r_x[[2, 2]].inv();
let g = gb.re();
let b = gb.im();
//P_C
result.push(
format!("V_{tn1}_C*(V_{tn1}_C*{g:.4}-V_{tn2}_C*({g:.4}*cos(D_{tn1}_C-D_{tn2}_C)+{b:.4}*sin(D_{tn1}_C-D_{tn2}_C)))"),
);
//Q_C
result.push(
format!("-V_{tn1}_C*(V_{tn1}_C*{b:.4}+V_{tn2}_C*({g:.4}*sin(D_{tn1}_C-D_{tn2}_C)-{b:.4}*cos(D_{tn1}_C-D_{tn2}_C)))"),
)
}
// AB
3 => {
let rx = nalgebra::Matrix2::new(
r_x[[0, 0]], r_x[[0, 1]],
r_x[[1, 0]], r_x[[1, 1]]);
let gb = rx.try_inverse().unwrap();
let (g_aa, b_aa) = (gb.m11.re, gb.m11.im);
let (g_ab, b_ab) = (gb.m12.re, gb.m12.im);
let (g_ba, b_ba) = (gb.m21.re, gb.m21.im);
let (g_bb, b_bb) = (gb.m22.re, gb.m22.im);
//P_A
result.push(
format!(
"V_{tn1}_A*V_{tn1}_A*{g_aa:.4}\
-V_{tn1}_A*V_{tn2}_A*({g_aa:.4}*cos(D_{tn1}_A-D_{tn2}_A)+{b_aa:.4}*sin(D_{tn1}_A-D_{tn2}_A))\
+V_{tn1}_A*V_{tn1}_B*({g_ab:.4}*cos(D_{tn1}_A-D_{tn1}_B)+{b_ab:.4}*sin(D_{tn1}_A-D_{tn1}_B))\
-V_{tn1}_A*V_{tn2}_B*({g_ab:.4}*cos(D_{tn1}_A-D_{tn2}_B)+{b_ab:.4}*sin(D_{tn1}_A-D_{tn2}_B))"),
);
//Q_A
result.push(
format!(
"-V_{tn1}_A*V_{tn1}_A*{b_aa:.4}\
+V_{tn1}_A*V_{tn2}_A*({b_aa:.4}*cos(D_{tn1}_A-D_{tn2}_A)-{g_aa:.4}*sin(D_{tn1}_A-D_{tn2}_A))\
+V_{tn1}_A*V_{tn1}_B*({g_ab:.4}*sin(D_{tn1}_A-D_{tn1}_B)-{b_ab:.4}*cos(D_{tn1}_A-D_{tn1}_B))\
+V_{tn1}_A*V_{tn2}_B*({b_ab:.4}*cos(D_{tn1}_A-D_{tn2}_B)-{g_ab:.4}*sin(D_{tn1}_A-D_{tn2}_B))"),
);
//P_B
result.push(
format!(
"V_{tn1}_B*V_{tn1}_B*{g_bb:.4}\
-V_{tn1}_B*V_{tn2}_B*({g_bb:.4}*cos(D_{tn1}_B-D_{tn2}_B)+{b_bb:.4}*sin(D_{tn1}_B-D_{tn2}_B))\
+V_{tn1}_B*V_{tn1}_A*({g_ba:.4}*cos(D_{tn1}_B-D_{tn1}_A)+{b_ba:.4}*sin(D_{tn1}_B-D_{tn1}_A))\
-V_{tn1}_B*V_{tn2}_A*({g_ba:.4}*cos(D_{tn1}_B-D_{tn2}_A)+{b_ba:.4}*sin(D_{tn1}_B-D_{tn2}_A))"),
);
//Q_B
result.push(
format!(
"-V_{tn1}_B*V_{tn1}_B*{b_bb:.4}\
+V_{tn1}_B*V_{tn2}_B*({b_bb:.4}*cos(D_{tn1}_B-D_{tn2}_B)-{g_bb:.4}*sin(D_{tn1}_B-D_{tn2}_B))\
+V_{tn1}_B*V_{tn1}_A*({g_ba:.4}*sin(D_{tn1}_B-D_{tn1}_A)-{b_ba:.4}*cos(D_{tn1}_B-D_{tn1}_A))\
+V_{tn1}_B*V_{tn2}_A*({b_ba:.4}*cos(D_{tn1}_B-D_{tn2}_A)-{g_ba:.4}*sin(D_{tn1}_B-D_{tn2}_A))"),
);
}
// AC
5 => {
let rx = nalgebra::Matrix2::new(
r_x[[0, 0]], r_x[[0, 2]],
r_x[[2, 0]], r_x[[2, 2]]);
let gb = rx.try_inverse().unwrap();
let (g_aa, b_aa) = (gb.m11.re, gb.m11.im);
let (g_ac, b_ac) = (gb.m12.re, gb.m12.im);
let (g_ca, b_ca) = (gb.m21.re, gb.m21.im);
let (g_cc, b_cc) = (gb.m22.re, gb.m22.im);
//P_A
result.push(
format!(
"V_{tn1}_A*V_{tn1}_A*{g_aa:.4}\
-V_{tn1}_A*V_{tn2}_A*({g_aa:.4}*cos(D_{tn1}_A-D_{tn2}_A)+{b_aa:.4}*sin(D_{tn1}_A-D_{tn2}_A))\
+V_{tn1}_A*V_{tn1}_C*({g_ac:.4}*cos(D_{tn1}_A-D_{tn1}_C)+{b_ac:.4}*sin(D_{tn1}_A-D_{tn1}_C))\
-V_{tn1}_A*V_{tn2}_C*({g_ac:.4}*cos(D_{tn1}_A-D_{tn2}_C)+{b_ac:.4}*sin(D_{tn1}_A-D_{tn2}_C))"),
);
//Q_A
result.push(
format!(
"-V_{tn1}_A*V_{tn1}_A*{b_aa:.4}\
+V_{tn1}_A*V_{tn2}_A*({b_aa:.4}*cos(D_{tn1}_A-D_{tn2}_A)-{g_aa:.4}*sin(D_{tn1}_A-D_{tn2}_A))\
+V_{tn1}_A*V_{tn1}_C*({g_ac:.4}*sin(D_{tn1}_A-D_{tn1}_C)-{b_ac:.4}*cos(D_{tn1}_A-D_{tn1}_C))\
+V_{tn1}_A*V_{tn2}_C*({b_ac:.4}*cos(D_{tn1}_A-D_{tn2}_C)-{g_ac:.4}*sin(D_{tn1}_A-D_{tn2}_C))"),
);
//P_C
result.push(
format!(
"V_{tn1}_C*V_{tn1}_C*{g_cc:.4}\
-V_{tn1}_C*V_{tn2}_C*({g_cc:.4}*cos(D_{tn1}_C-D_{tn2}_C)+{b_cc:.4}*sin(D_{tn1}_C-D_{tn2}_C))\
+V_{tn1}_C*V_{tn1}_A*({g_ca:.4}*cos(D_{tn1}_C-D_{tn1}_A)+{b_ca:.4}*sin(D_{tn1}_C-D_{tn1}_A))\
-V_{tn1}_C*V_{tn2}_A*({g_ca:.4}*cos(D_{tn1}_C-D_{tn2}_A)+{b_ca:.4}*sin(D_{tn1}_C-D_{tn2}_A))"),
);
//Q_C
result.push(
format!(
"-V_{tn1}_C*V_{tn1}_C*{b_cc:.4}\
+V_{tn1}_C*V_{tn2}_C*({b_cc:.4}*cos(D_{tn1}_C-D_{tn2}_C)-{g_cc:.4}*sin(D_{tn1}_C-D_{tn2}_C))\
+V_{tn1}_C*V_{tn1}_A*({g_ca:.4}*sin(D_{tn1}_C-D_{tn1}_A)-{b_ca:.4}*cos(D_{tn1}_C-D_{tn1}_A))\
+V_{tn1}_C*V_{tn2}_A*({b_ca:.4}*cos(D_{tn1}_C-D_{tn2}_A)-{g_ca:.4}*sin(D_{tn1}_C-D_{tn2}_A))"),
);
}
// BC
6 => {
let rx = nalgebra::Matrix2::new(
r_x[[1, 1]], r_x[[1, 2]],
r_x[[2, 1]], r_x[[2, 2]]);
let gb = rx.try_inverse().unwrap();
let (g_bb, b_bb) = (gb.m11.re, gb.m11.im);
let (g_bc, b_bc) = (gb.m12.re, gb.m12.im);
let (g_cb, b_cb) = (gb.m21.re, gb.m21.im);
let (g_cc, b_cc) = (gb.m22.re, gb.m22.im);
//P_B
result.push(
format!(
"V_{tn1}_B*V_{tn1}_B*{g_bb:.4}\
-V_{tn1}_B*V_{tn2}_B*({g_bb:.4}*cos(D_{tn1}_B-D_{tn2}_B)+{b_bb:.4}*sin(D_{tn1}_B-D_{tn2}_B))\
+V_{tn1}_B*V_{tn1}_C*({g_bc:.4}*cos(D_{tn1}_B-D_{tn1}_C)+{b_bc:.4}*sin(D_{tn1}_B-D_{tn1}_C))\
-V_{tn1}_B*V_{tn2}_C*({g_bc:.4}*cos(D_{tn1}_B-D_{tn2}_C)+{b_bc:.4}*sin(D_{tn1}_B-D_{tn2}_C))"),
);
//Q_B
result.push(
format!(
"-V_{tn1}_B*V_{tn1}_B*{b_bb:.4}\
+V_{tn1}_B*V_{tn2}_B*({b_bb:.4}*cos(D_{tn1}_B-D_{tn2}_B)-{g_bb:.4}*sin(D_{tn1}_B-D_{tn2}_B))\
+V_{tn1}_B*V_{tn1}_C*({g_bc:.4}*sin(D_{tn1}_B-D_{tn1}_C)-{b_bc:.4}*cos(D_{tn1}_B-D_{tn1}_C))\
+V_{tn1}_B*V_{tn2}_C*({b_bc:.4}*cos(D_{tn1}_B-D_{tn2}_C)-{g_bc:.4}*sin(D_{tn1}_B-D_{tn2}_C))"),
);
//P_C
result.push(
format!(
"V_{tn1}_C*V_{tn1}_C*{g_cc:.4}\
-V_{tn1}_C*V_{tn2}_C*({g_cc:.4}*cos(D_{tn1}_C-D_{tn2}_C)+{b_cc:.4}*sin(D_{tn1}_C-D_{tn2}_C))\
+V_{tn1}_C*V_{tn1}_B*({g_cb:.4}*cos(D_{tn1}_C-D_{tn1}_B)+{b_cb:.4}*sin(D_{tn1}_C-D_{tn1}_B))\
-V_{tn1}_C*V_{tn2}_B*({g_cb:.4}*cos(D_{tn1}_C-D_{tn2}_B)+{b_cb:.4}*sin(D_{tn1}_C-D_{tn2}_B))"),
);
//Q_C
result.push(
format!(
"-V_{tn1}_C*V_{tn1}_C*{b_cc:.4}\
+V_{tn1}_C*V_{tn2}_C*({b_cc:.4}*cos(D_{tn1}_C-D_{tn2}_C)-{g_cc:.4}*sin(D_{tn1}_C-D_{tn2}_C))\
+V_{tn1}_C*V_{tn1}_B*({g_cb:.4}*sin(D_{tn1}_C-D_{tn1}_B)-{b_cb:.4}*cos(D_{tn1}_C-D_{tn1}_B))\
+V_{tn1}_C*V_{tn2}_B*({b_cb:.4}*cos(D_{tn1}_C-D_{tn2}_B)-{g_cb:.4}*sin(D_{tn1}_C-D_{tn2}_B))"),
);
}
// ABC
7 => {
let rx = nalgebra::Matrix3::new(
r_x[[0, 0]], r_x[[0, 1]], r_x[[0, 2]],
r_x[[1, 0]], r_x[[1, 1]], r_x[[1, 2]],
r_x[[2, 0]], r_x[[2, 1]], r_x[[2, 2]]);
let gb = rx.try_inverse().unwrap();
let (g_aa, b_aa) = (gb.m11.re, gb.m11.im);
let (g_ab, b_ab) = (gb.m12.re, gb.m12.im);
let (g_ac, b_ac) = (gb.m13.re, gb.m13.im);
let (g_ba, b_ba) = (gb.m21.re, gb.m21.im);
let (g_bb, b_bb) = (gb.m22.re, gb.m22.im);
let (g_bc, b_bc) = (gb.m23.re, gb.m23.im);
let (g_ca, b_ca) = (gb.m31.re, gb.m31.im);
let (g_cb, b_cb) = (gb.m32.re, gb.m32.im);
let (g_cc, b_cc) = (gb.m33.re, gb.m33.im);
//P_A
result.push(
format!(
"V_{tn1}_A*V_{tn1}_A*{g_aa:.4}\
-V_{tn1}_A*V_{tn2}_A*({g_aa:.4}*cos(D_{tn1}_A-D_{tn2}_A)+{b_aa:.4}*sin(D_{tn1}_A-D_{tn2}_A))\
+V_{tn1}_A*V_{tn1}_B*({g_ab:.4}*cos(D_{tn1}_A-D_{tn1}_B)+{b_ab:.4}*sin(D_{tn1}_A-D_{tn1}_B))\
-V_{tn1}_A*V_{tn2}_B*({g_ab:.4}*cos(D_{tn1}_A-D_{tn2}_B)+{b_ab:.4}*sin(D_{tn1}_A-D_{tn2}_B))\
+V_{tn1}_A*V_{tn1}_C*({g_ac:.4}*cos(D_{tn1}_A-D_{tn1}_C)+{b_ac:.4}*sin(D_{tn1}_A-D_{tn1}_C))\
-V_{tn1}_A*V_{tn2}_C*({g_ac:.4}*cos(D_{tn1}_A-D_{tn2}_C)+{b_ac:.4}*sin(D_{tn1}_A-D_{tn2}_C))"),
);
//Q_A
result.push(
format!(
"-V_{tn1}_A*V_{tn1}_A*{b_aa:.4}\
+V_{tn1}_A*V_{tn2}_A*({b_aa:.4}*cos(D_{tn1}_A-D_{tn2}_A)-{g_aa:.4}*sin(D_{tn1}_A-D_{tn2}_A))\
+V_{tn1}_A*V_{tn1}_B*({g_ab:.4}*sin(D_{tn1}_A-D_{tn1}_B)-{b_ab:.4}*cos(D_{tn1}_A-D_{tn1}_B))\
+V_{tn1}_A*V_{tn2}_B*({b_ab:.4}*cos(D_{tn1}_A-D_{tn2}_B)-{g_ab:.4}*sin(D_{tn1}_A-D_{tn2}_B))\
+V_{tn1}_A*V_{tn1}_C*({g_ac:.4}*sin(D_{tn1}_A-D_{tn1}_C)-{b_ac:.4}*cos(D_{tn1}_A-D_{tn1}_C))\
+V_{tn1}_A*V_{tn2}_C*({b_ac:.4}*cos(D_{tn1}_A-D_{tn2}_C)-{g_ac:.4}*sin(D_{tn1}_A-D_{tn2}_C))"),
);
//P_B
result.push(
format!(
"V_{tn1}_B*V_{tn1}_B*{g_bb:.4}\
-V_{tn1}_B*V_{tn2}_B*({g_bb:.4}*cos(D_{tn1}_B-D_{tn2}_B)+{b_bb:.4}*sin(D_{tn1}_B-D_{tn2}_B))\
+V_{tn1}_B*V_{tn1}_A*({g_ba:.4}*cos(D_{tn1}_B-D_{tn1}_A)+{b_ba:.4}*sin(D_{tn1}_B-D_{tn1}_A))\
-V_{tn1}_B*V_{tn2}_A*({g_ba:.4}*cos(D_{tn1}_B-D_{tn2}_A)+{b_ba:.4}*sin(D_{tn1}_B-D_{tn2}_A))\
+V_{tn1}_B*V_{tn1}_C*({g_bc:.4}*cos(D_{tn1}_B-D_{tn1}_C)+{b_bc:.4}*sin(D_{tn1}_B-D_{tn1}_C))\
-V_{tn1}_B*V_{tn2}_C*({g_bc:.4}*cos(D_{tn1}_B-D_{tn2}_C)+{b_bc:.4}*sin(D_{tn1}_B-D_{tn2}_C))"),
);
//Q_B
result.push(
format!(
"-V_{tn1}_B*V_{tn1}_B*{b_bb:.4}\
+V_{tn1}_B*V_{tn2}_B*({b_bb:.4}*cos(D_{tn1}_B-D_{tn2}_B)-{g_bb:.4}*sin(D_{tn1}_B-D_{tn2}_B))\
+V_{tn1}_B*V_{tn1}_A*({g_ba:.4}*sin(D_{tn1}_B-D_{tn1}_A)-{b_ba:.4}*cos(D_{tn1}_B-D_{tn1}_A))\
+V_{tn1}_B*V_{tn2}_A*({b_ba:.4}*cos(D_{tn1}_B-D_{tn2}_A)-{g_ba:.4}*sin(D_{tn1}_B-D_{tn2}_A))\
+V_{tn1}_B*V_{tn1}_C*({g_bc:.4}*sin(D_{tn1}_B-D_{tn1}_C)-{b_bc:.4}*cos(D_{tn1}_B-D_{tn1}_C))\
+V_{tn1}_B*V_{tn2}_C*({b_bc:.4}*cos(D_{tn1}_B-D_{tn2}_C)-{g_bc:.4}*sin(D_{tn1}_B-D_{tn2}_C))"),
);
//P_C
result.push(
format!(
"V_{tn1}_C*V_{tn1}_C*{g_cc:.4}\
-V_{tn1}_C*V_{tn2}_C*({g_cc:.4}*cos(D_{tn1}_C-D_{tn2}_C)+{b_cc:.4}*sin(D_{tn1}_C-D_{tn2}_C))\
+V_{tn1}_C*V_{tn1}_A*({g_ca:.4}*cos(D_{tn1}_C-D_{tn1}_A)+{b_ca:.4}*sin(D_{tn1}_C-D_{tn1}_A))\
-V_{tn1}_C*V_{tn2}_A*({g_ca:.4}*cos(D_{tn1}_C-D_{tn2}_A)+{b_ca:.4}*sin(D_{tn1}_C-D_{tn2}_A))\
+V_{tn1}_C*V_{tn1}_B*({g_cb:.4}*cos(D_{tn1}_C-D_{tn1}_B)+{b_cb:.4}*sin(D_{tn1}_C-D_{tn1}_B))\
-V_{tn1}_C*V_{tn2}_B*({g_cb:.4}*cos(D_{tn1}_C-D_{tn2}_B)+{b_cb:.4}*sin(D_{tn1}_C-D_{tn2}_B))"),
);
//Q_C
result.push(
format!(
"-V_{tn1}_C*V_{tn1}_C*{b_cc:.4}\
+V_{tn1}_C*V_{tn2}_C*({b_cc:.4}*cos(D_{tn1}_C-D_{tn2}_C)-{g_cc:.4}*sin(D_{tn1}_C-D_{tn2}_C))\
+V_{tn1}_C*V_{tn1}_A*({g_ca:.4}*sin(D_{tn1}_C-D_{tn1}_A)-{b_ca:.4}*cos(D_{tn1}_C-D_{tn1}_A))\
+V_{tn1}_C*V_{tn2}_A*({b_ca:.4}*cos(D_{tn1}_C-D_{tn2}_A)-{g_ca:.4}*sin(D_{tn1}_C-D_{tn2}_A))\
+V_{tn1}_C*V_{tn1}_B*({g_cb:.4}*sin(D_{tn1}_C-D_{tn1}_B)-{b_cb:.4}*cos(D_{tn1}_C-D_{tn1}_B))\
+V_{tn1}_C*V_{tn2}_B*({b_cb:.4}*cos(D_{tn1}_C-D_{tn2}_B)-{g_cb:.4}*sin(D_{tn1}_C-D_{tn2}_B))"),
);
}
_ => { return None; }
};
let re = result.clone();
Some((result, mode))
}
pub fn extend_exp_pq_of_acline(power_exp: &mut HashMap<&str, String>, exps: Vec<String>, mode: u32) {
let key = match mode {
1 => vec!["P_A", "Q_A"],
2 => vec!["P_B", "Q_B"],
4 => vec!["P_C", "Q_C"],
3 => vec!["P_A", "Q_A", "P_B", "Q_B"],
5 => vec!["P_A", "Q_A", "P_C", "Q_C"],
6 => vec!["P_B", "Q_B", "P_C", "Q_C"],
7 => vec!["P_A", "Q_A", "P_B", "Q_B", "P_C", "Q_C"],
_ => return,
};
for i in 0..key.len() {
let k = key[i];
if let Some(exp) = power_exp.get(k) {
power_exp.insert(k, format!("{}+{}", exp, exps[i]));
} else {
power_exp.insert(k, format!("{}", exps[i]));
};
}
}
pub fn get_node_pq_load_constraints(constraint: &mut Vec<String>, power_exp: HashMap<&str, String>, tn: u64) {
if let Some(exp) = power_exp.get("P_A") {
constraint.push(format!("P_{tn}_A+{}:[0/0]", exp));
}
if let Some(exp) = power_exp.get("Q_A") {
constraint.push(format!("Q_{tn}_A+{}:[0/0]", exp));
}
if let Some(exp) = power_exp.get("P_B") {
constraint.push(format!("P_{tn}_B+{}:[0/0]", exp));
}
if let Some(exp) = power_exp.get("Q_B") {
constraint.push(format!("Q_{tn}_B+{}:[0/0]", exp));
}
if let Some(exp) = power_exp.get("P_C") {
constraint.push(format!("P_{tn}_C+{}:[0/0]", exp));
}
if let Some(exp) = power_exp.get("Q_C") {
constraint.push(format!("Q_{tn}_C+{}:[0/0]", exp));
}
}
// test
#[cfg(test)]
mod test {
use ndarray::array;
use super::*;
#[test]
fn test_get_pq_of_acline() {
// 原矩阵:
// 0.3465+1.0179j 0.1560+0.5017j 0.1580+0.4236j
// 0.1560+0.5017j 0.3375+1.0478j 0.1535+0.3849j
// 0.1580+0.4236j 0.1535+0.3849j 0.3414+1.0348j
// 求逆的结果:
// 0.4338 - 1.2502i -0.1840 + 0.4622i -0.1008 + 0.3455i
// -0.1840 + 0.4622i 0.3798 - 1.1847i -0.0478 + 0.2639i
// -0.1008 + 0.3455i -0.0478 + 0.2639i 0.3359 - 1.1176i
let arr = array![ [Complex64::new(0.3465,1.0179), Complex64::new(0.1560,0.5017), Complex64::new(0.1580,0.4236)],
[Complex64::new(0.1560,0.5017), Complex64::new(0.3375,1.0478), Complex64::new(0.1535,0.3849)],
[Complex64::new(0.1580,0.4236), Complex64::new(0.1535,0.3849), Complex64::new(0.3414,1.0348)]];
let (p, q) = get_pq_of_acline(arr,1,2).unwrap();
assert_eq!(p[0],
"V_1_A*V_1_A*0.4338\
-V_1_A*V_2_A*(0.4338*cos(D_1_A-D_2_A)+-1.2502*sin(D_1_A-D_2_A))\
+V_1_A*(V_1_B*(-0.1840*cos(D_1_A-D_1_B)+0.4622*sin(D_1_A-D_1_B))\
-V_1_A*(V_2_B*(-0.1840*cos(D_1_A-D_2_B)+0.4622*sin(D_1_A-D_2_B))\
+V_1_A*(V_1_C*(-0.1008*cos(D_1_A-D_1_C)+0.3455*sin(D_1_A-D_1_C)))\
-V_1_A*(V_2_C*(-0.1008*cos(D_1_A-D_2_C)+0.3455*sin(D_1_A-D_2_C))"
);
let arr = array![ [Complex64::new(0.0,0.0), Complex64::new(0.0,0.0), Complex64::new(0.0,0.0)],
[Complex64::new(0.0,0.0), Complex64::new(0.0,0.0), Complex64::new(0.0,0.0)],
[Complex64::new(0.0,0.0), Complex64::new(0.0,0.0), Complex64::new(0.3414,1.0348)]];
// 0.2875 - 0.8715i
let (p, q) = get_pq_of_acline(arr,1,2).unwrap();
assert_eq!(p[0], "V_1_C*(V_1_C*0.2875-V_2_C*(0.2875*cos(D_1_C-D_2_C)+-0.8715*sin(D_1_C-D_2_C)))");
}
// |source1|.@1@.|line2|.@2@.|load4|
// 1 2 3 4
#[test]
fn test_get_pf_nlp_constraints() {
let tns = vec![1, 2];
let dyn_topo = vec![vec![1, 1], vec![2, 2]];
let dev_topo = vec![vec![1, 1, 1, 1], vec![2, 1, 1, 2], vec![3, 2, 2, 2], vec![4, 2, 2, 4]];
let mut dev_matrix = HashMap::new();
dev_matrix.insert(2, vec![array![[0.3465, 0.1560, 0.1580], [0.1560, 0.3375, 0.1535], [0.1580, 0.1535, 0.3414]]*2000.0/5280.0,
array![[1.0179, 0.5017, 0.4236], [0.5017, 1.0478, 0.3849], [0.4236, 0.3849, 1.0348]]*2000.0/5280.0 ]);
// dev_matrix.insert(2, vec![array![[1.3425, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.3414]],
// array![[0.5124, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 1.0348]]]);
// dev_matrix.insert(2, vec![array![[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.3414]],
// array![[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 1.0348]]]);
let input_tns = vec![1, 2, 2, 2, 2, 2, 2];
let input_phases = vec![MeasPhase::Total, MeasPhase::A, MeasPhase::A, MeasPhase::B, MeasPhase::B, MeasPhase::C, MeasPhase::C];
let input_types = vec![DataUnit::V, DataUnit::W, DataUnit::Var, DataUnit::W, DataUnit::Var, DataUnit::W, DataUnit::Var];
let input_values = vec![12470.0, 1275000.0, 790174.0, 1800000.0, 871779.8, 2375000.0, 780624.7];
let constraints = get_pf_nlp_constraints(
&tns, dyn_topo, dev_topo, dev_matrix, input_tns, input_phases, input_types, input_values
);
assert!(constraints.is_some());
let constraints = constraints.unwrap();
assert_eq!(constraints.len(), 24);
for c in constraints {
println!("{}", c);
}
println!("{}", get_pf_nlp_variables(&tns));
}
}
use std::collections::HashMap;
use csv::StringRecordsIter;
use ndarray::Array2;
use eig_domain::prop::DataUnit;
const MAT_SIZE: usize = 18;
pub(crate) fn read_dev_ohm(records: &mut StringRecordsIter<&[u8]>)
-> Result<HashMap<u64, Vec<Array2<f64>>>, String> {
let mut map = HashMap::new();
let mut dev_id = 0u64;
let mut matrix: Vec<f64> = Vec::with_capacity(MAT_SIZE);
let mut row = 0;
loop {
match records.next() {
Some(Ok(record)) => {
let mut col = 0;
for str in record.iter() {
if col == 0 {
if let Ok(id) = str.parse() {
if dev_id != id {
if dev_id != 0 {
if matrix.len() != MAT_SIZE {
return Err(format!("matrix len must be {MAT_SIZE}"));
} else {
let v = create_rx(&matrix);
map.insert(dev_id, v);
}
}
dev_id = id;
matrix.clear();
}
} else {
return Err(format!("Wrong dev matrix, row {row} col {col}"));
}
} else {
if let Ok(f) = str.parse() {
matrix.push(f);
} else {
return Err(format!("Wrong dev matrix, row {row} col {col}"));
}
}
col += 1;
}
}
Some(Err(e)) => {
return Err(format!("Wrong dev matrix, err: {:?}", e));
}
None => {
break;
}
}
row += 1;
}
if dev_id != 0 {
if matrix.len() != MAT_SIZE {
return Err(format!("matrix len must be {MAT_SIZE}"));
} else {
let v = create_rx(&matrix);
map.insert(dev_id, v);
}
}
Ok(map)
}
fn create_rx(matrix: &[f64]) -> Vec<Array2<f64>> {
let r = Array2::from_shape_vec((3, 3), matrix[0..9].to_vec()).unwrap();
let x = Array2::from_shape_vec((3, 3), matrix[9..18].to_vec()).unwrap();
let v = vec![r, x];
v
}
#[test]
fn test_unit_parse() {
let units = vec![DataUnit::kVar, DataUnit::kW];
let s = serde_json::to_string(&units).unwrap();
assert_eq!("[\"kVar\",\"kW\"]", s);
let r: Vec<DataUnit> = serde_json::from_str("[\"kVar\", \"kW\"]").unwrap();
assert_eq!(r, units);
}
\ No newline at end of file
dy_topo = [];
dev_topo = [];
terminal_cn_dev = [];
tn_input = [];
shunt_meas = [];
// general matrix
gm = [];
\ No newline at end of file
% 测试ieee13配电网算例的单段导线
clc;
mpopt = mpoption('verbose',2);
mpc = loadcase('t_case3p_a_1');
run_pf(mpc,mpopt,'mpx',mp.xt_3p);
% 以下为手工计算结果
r = [ 0.3465 0.1560 0.1580 ; 0.1560 0.3375 0.1535 ; 0.1580 0.1535 0.3414 ];
x= [1.0179 0.5017 0.4236; 0.5017 1.0478 0.3849; 0.4236 0.3849 1.0348];
len = 2000/5280;
y = inv(r+1j*x)/len;
v1 = [complexd(7.1996,0.00) ; complexd(7.1996,-120.00) ; complexd(7.1996,120.00)].*1000;
v2 = [complexd(7.1769,-0.08) ; complexd(7.1396,-120.37) ; complexd(7.1500,119.25)].*1000;
i = y*(v2-v1);
abs(i)
rad2deg(angle(i))
pd = mpc.load3p(1,4:6);
pf = mpc.load3p(1,7:9);
qd = pd .* tan(acos(pf));
function mpc = t_case3p_a_1
% t_case3p_a - Four bus, unbalanced 3-phase test case.
%
% This data comes from ``4Bus-YY-UnB.DSS``, a modified version (with unbalanced
% load) of ``4Bus-YY-Bal.DSS`` [1], the OpenDSS 4 bus IEEE test case with
% grounded-wye to grounded-wye transformer.
%
% [1] https://sourceforge.net/p/electricdss/code/HEAD/tree/trunk/Distrib/IEEETestCases/4Bus-YY-Bal/4Bus-YY-Bal.DSS
%% MATPOWER Case Format : Version 2
mpc.version = '2';
%%----- Power Flow Data -----%%
%% system MVA base
mpc.baseMVA = 100;
mpc.bus = [];
mpc.gen = [];
mpc.branch = [];
mpc.gencost = [];
%%----- 3 Phase Model Data -----%%
%% system data
mpc.freq = 60; %% frequency, Hz
mpc.basekVA = 1000; %% system kVA base
%% bus data
% busid type basekV Vm1 Vm2 Vm3 Va1 Va2 Va3
mpc.bus3p = [
1 3 12.47 1 1 1 0 -120 120;
2 1 12.47 1 1 1 0 -120 120;
% 3 1 4.16 1 1 1 0 -120 120;
% 4 1 4.16 1 1 1 0 -120 120;
];
%% branch data
% brid fbus tbus status lcid len
mpc.line3p = [
1 1 2 1 1 2000/5280;
% 2 3 4 1 1 2500/5280;
];
%% transformer
% xfid fbus tbus status R X basekVA basekV
mpc.xfmr3p = [
% 1 2 3 1 0.01 0.06 6000 12.47;
];
%% load
% ldid ldbus status Pd1 Pd2 Pd3 ldpf1 ldpf2 ldpf3
mpc.load3p = [
1 2 1 1275 1800 2375 0.85 0.9 0.95;
% 1 4 1 1275 1800 2375 0.85 0.9 0.95;
];
%% gen
% genid gbus status Vg1 Vg2 Vg3 Pg1 Pg2 Pg3 Qg1 Qg2 Qg3
mpc.gen3p = [
1 1 1 1 1 1 2000 2000 2000 0 0 0;
];
%% line construction
% lcid R11 R21 R31 R22 R32 R33 X11 X21 X31 X22 X32 X33 C11 C21 C31 C22 C32 C33
mpc.lc = [
1 0.3465 0.1560 0.1580 0.3375 0.1535 0.3414 1.0179 0.5017 0.4236 1.0478 0.3849 1.0348 0 0 0 0 0 0
%1 0.457541 0.15594 0.153474 0.466617 0.157996 0.461462 1.078 0.501648 0.384909 1.04813 0.423624 1.06502 15.0671 -4.86241 -1.85323 15.875 -3.09098 14.3254
];
在nlp.rs中写了函数test_get_pf_nlp_constraints()进行了测试,模型为一条线路,选择ieee13节点配电网模型,config601线路。
电压等级为12.47kv,具体参数见t_case2p_a_1.m文件。
用matpower进行了测试,结果如下:
```matlab
MATPOWER Version 8.0, 17-May-2024
Power Flow -- AC-polar-power formulation
it max residual max x
---- -------------- --------------
0 2.375e+00 -
1 1.985e-02 1.293e-02
2 3.719e-06 1.653e-04
3 1.430e-13 3.221e-08
Newton's method converged in 3 iterations.
PF successful
PF succeeded in 0.04 seconds (0.03 setup + 0.00 solve)
================================================================================
| System Summary |
================================================================================
elements on off total
--------------------- ------- ------- -------
3-ph Buses 2 - 2
3-ph Generators 1 - 1
3-ph Loads 1 - 1
3-ph Lines 1 - 1
Total 3-ph generation 5468.4 kW 2502.3 kVAr
Total 3-ph load 5450.0 kW 2442.6 kVAr
Total 3-ph line loss 18.4 kW 59.7 kVAr
================================================================================
| 3-ph Bus Data |
================================================================================
3-ph Phase A Voltage Phase B Voltage Phase C Voltage
Bus ID Status (kV) (deg) (kV) (deg) (kV) (deg)
-------- ------ ------- ------- ------- ------- ------- -------
1 1 7.1996 0.00 7.1996 -120.00 7.1996 120.00
2 1 7.1769 -0.08 7.1396 -120.37 7.1500 119.25
================================================================================
| 3-ph Generator Data |
================================================================================
3-ph 3-ph Phase A Power Phase B Power Phase C Power
Gen ID Bus ID Status (kW) (KVAr) (kW) (kVAr) (kW) (kVAr)
-------- -------- ------ ------- ------ ------- ------ ------- ------
1 1 1 1277.88 794.49 1809.44 890.72 2381.04 817.09
================================================================================
| 3-ph Load Data |
================================================================================
3-ph 3-ph Phase A Power Phase B Power Phase C Power
Load ID Bus ID Status (kW) (PF) (kW) (PF) (kW) (PF)
-------- -------- ------ ------- ------ ------- ------ ------- ------
1 2 1 1275.00 0.8500 1800.00 0.9000 2375.00 0.9500
================================================================================
| 3-ph Line Data |
================================================================================
--> Current Injections at "From" Bus
3-ph 3-ph Bus 3-ph Bus Phase A Current Phase B Current Phase C Current
Line ID From ID To ID Status (A) (deg) (A) (deg) (A) (deg)
-------- -------- -------- ------ ------ ------ ------ ------ ------ ------
1 1 2 1 209.00 -31.9 280.13 -146.2 349.65 101.1
<-- Current Injections at "To" Bus
3-ph 3-ph Bus 3-ph Bus Phase A Current Phase B Current Phase C Current
Line ID From ID To ID Status (A) (deg) (A) (deg) (A) (deg)
-------- -------- -------- ------ ------ ------ ------ ------ ------ ------
1 1 2 1 209.00 148.1 280.13 33.8 349.65 -78.9
--> Power Injections at "From" Bus
3-ph 3-ph Bus 3-ph Bus Phase A Power Phase B Power Phase C Power
Line ID From ID To ID Status (kW) (kVAr) (kW) (kVAr) (kW) (kVAr)
-------- -------- -------- ------ ------ ------ ------ ------ ------ ------
1 1 2 1 1277.9 794.5 1809.4 890.7 2381.0 817.1
<-- Power Injections at "To" Bus
3-ph 3-ph Bus 3-ph Bus Phase A Power Phase B Power Phase C Power
Line ID From ID To ID Status (kW) (kVAr) (kW) (kVAr) (kW) (kVAr)
-------- -------- -------- ------ ------ ------ ------ ------ ------ ------
1 1 2 1 -1275.0 -790.2 -1800.0 -871.8 -2375.0 -780.6
```
函数test_get_pf_nlp_constraints()输出结果为:
```text
P_1_A+V_1_A*V_1_A*1.1451-V_1_A*V_2_A*(1.1451*cos(D_1_A-D_2_A)+-3.3006*sin(D_1_A-D_2_A))+V_1_A*V_1_B*(-0.4859*cos(D_1_A-D_1_B)+1.2203*sin(D_1_A-D_1_B))-V_1_A*V_2_B*(-0.4859*cos(D_1_A-D_2_B)+1.2203*sin(D_1_A-D_2_B))+V_1_A*V_1_C*(-0.2661*cos(D_1_A-D_1_C)+0.9122*sin(D_1_A-D_1_C))-V_1_A*V_2_C*(-0.2661*cos(D_1_A-D_2_C)+0.9122*sin(D_1_A-D_2_C)):[0/0]
Q_1_A+-V_1_A*V_1_A*-3.3006+V_1_A*V_2_A*(-3.3006*cos(D_1_A-D_2_A)-1.1451*sin(D_1_A-D_2_A))+V_1_A*V_1_B*(-0.4859*sin(D_1_A-D_1_B)-1.2203*cos(D_1_A-D_1_B))+V_1_A*V_2_B*(1.2203*cos(D_1_A-D_2_B)--0.4859*sin(D_1_A-D_2_B))+V_1_A*V_1_C*(-0.2661*sin(D_1_A-D_1_C)-0.9122*cos(D_1_A-D_1_C))+V_1_A*V_2_C*(0.9122*cos(D_1_A-D_2_C)--0.2661*sin(D_1_A-D_2_C)):[0/0]
P_1_B+V_1_B*V_1_B*1.0027-V_1_B*V_2_B*(1.0027*cos(D_1_B-D_2_B)+-3.1276*sin(D_1_B-D_2_B))+V_1_B*V_1_A*(-0.4859*cos(D_1_B-D_1_A)+1.2203*sin(D_1_B-D_1_A))-V_1_B*V_2_A*(-0.4859*cos(D_1_B-D_2_A)+1.2203*sin(D_1_B-D_2_A))+V_1_B*V_1_C*(-0.1263*cos(D_1_B-D_1_C)+0.6967*sin(D_1_B-D_1_C))-V_1_B*V_2_C*(-0.1263*cos(D_1_B-D_2_C)+0.6967*sin(D_1_B-D_2_C)):[0/0]
Q_1_B+-V_1_B*V_1_B*-3.1276+V_1_B*V_2_B*(-3.1276*cos(D_1_B-D_2_B)-1.0027*sin(D_1_B-D_2_B))+V_1_B*V_1_A*(-0.4859*sin(D_1_B-D_1_A)-1.2203*cos(D_1_B-D_1_A))+V_1_B*V_2_A*(1.2203*cos(D_1_B-D_2_A)--0.4859*sin(D_1_B-D_2_A))+V_1_B*V_1_C*(-0.1263*sin(D_1_B-D_1_C)-0.6967*cos(D_1_B-D_1_C))+V_1_B*V_2_C*(0.6967*cos(D_1_B-D_2_C)--0.1263*sin(D_1_B-D_2_C)):[0/0]
P_1_C+V_1_C*V_1_C*0.8867-V_1_C*V_2_C*(0.8867*cos(D_1_C-D_2_C)+-2.9506*sin(D_1_C-D_2_C))+V_1_C*V_1_A*(-0.2661*cos(D_1_C-D_1_A)+0.9122*sin(D_1_C-D_1_A))-V_1_C*V_2_A*(-0.2661*cos(D_1_C-D_2_A)+0.9122*sin(D_1_C-D_2_A))+V_1_C*V_1_B*(-0.1263*cos(D_1_C-D_1_B)+0.6967*sin(D_1_C-D_1_B))-V_1_C*V_2_B*(-0.1263*cos(D_1_C-D_2_B)+0.6967*sin(D_1_C-D_2_B)):[0/0]
Q_1_C+-V_1_C*V_1_C*-2.9506+V_1_C*V_2_C*(-2.9506*cos(D_1_C-D_2_C)-0.8867*sin(D_1_C-D_2_C))+V_1_C*V_1_A*(-0.2661*sin(D_1_C-D_1_A)-0.9122*cos(D_1_C-D_1_A))+V_1_C*V_2_A*(0.9122*cos(D_1_C-D_2_A)--0.2661*sin(D_1_C-D_2_A))+V_1_C*V_1_B*(-0.1263*sin(D_1_C-D_1_B)-0.6967*cos(D_1_C-D_1_B))+V_1_C*V_2_B*(0.6967*cos(D_1_C-D_2_B)--0.1263*sin(D_1_C-D_2_B)):[0/0]
P_2_A+V_2_A*V_2_A*1.1451-V_2_A*V_1_A*(1.1451*cos(D_2_A-D_1_A)+-3.3006*sin(D_2_A-D_1_A))+V_2_A*V_2_B*(-0.4859*cos(D_2_A-D_2_B)+1.2203*sin(D_2_A-D_2_B))-V_2_A*V_1_B*(-0.4859*cos(D_2_A-D_1_B)+1.2203*sin(D_2_A-D_1_B))+V_2_A*V_2_C*(-0.2661*cos(D_2_A-D_2_C)+0.9122*sin(D_2_A-D_2_C))-V_2_A*V_1_C*(-0.2661*cos(D_2_A-D_1_C)+0.9122*sin(D_2_A-D_1_C)):[0/0]
Q_2_A+-V_2_A*V_2_A*-3.3006+V_2_A*V_1_A*(-3.3006*cos(D_2_A-D_1_A)-1.1451*sin(D_2_A-D_1_A))+V_2_A*V_2_B*(-0.4859*sin(D_2_A-D_2_B)-1.2203*cos(D_2_A-D_2_B))+V_2_A*V_1_B*(1.2203*cos(D_2_A-D_1_B)--0.4859*sin(D_2_A-D_1_B))+V_2_A*V_2_C*(-0.2661*sin(D_2_A-D_2_C)-0.9122*cos(D_2_A-D_2_C))+V_2_A*V_1_C*(0.9122*cos(D_2_A-D_1_C)--0.2661*sin(D_2_A-D_1_C)):[0/0]
P_2_B+V_2_B*V_2_B*1.0027-V_2_B*V_1_B*(1.0027*cos(D_2_B-D_1_B)+-3.1276*sin(D_2_B-D_1_B))+V_2_B*V_2_A*(-0.4859*cos(D_2_B-D_2_A)+1.2203*sin(D_2_B-D_2_A))-V_2_B*V_1_A*(-0.4859*cos(D_2_B-D_1_A)+1.2203*sin(D_2_B-D_1_A))+V_2_B*V_2_C*(-0.1263*cos(D_2_B-D_2_C)+0.6967*sin(D_2_B-D_2_C))-V_2_B*V_1_C*(-0.1263*cos(D_2_B-D_1_C)+0.6967*sin(D_2_B-D_1_C)):[0/0]
Q_2_B+-V_2_B*V_2_B*-3.1276+V_2_B*V_1_B*(-3.1276*cos(D_2_B-D_1_B)-1.0027*sin(D_2_B-D_1_B))+V_2_B*V_2_A*(-0.4859*sin(D_2_B-D_2_A)-1.2203*cos(D_2_B-D_2_A))+V_2_B*V_1_A*(1.2203*cos(D_2_B-D_1_A)--0.4859*sin(D_2_B-D_1_A))+V_2_B*V_2_C*(-0.1263*sin(D_2_B-D_2_C)-0.6967*cos(D_2_B-D_2_C))+V_2_B*V_1_C*(0.6967*cos(D_2_B-D_1_C)--0.1263*sin(D_2_B-D_1_C)):[0/0]
P_2_C+V_2_C*V_2_C*0.8867-V_2_C*V_1_C*(0.8867*cos(D_2_C-D_1_C)+-2.9506*sin(D_2_C-D_1_C))+V_2_C*V_2_A*(-0.2661*cos(D_2_C-D_2_A)+0.9122*sin(D_2_C-D_2_A))-V_2_C*V_1_A*(-0.2661*cos(D_2_C-D_1_A)+0.9122*sin(D_2_C-D_1_A))+V_2_C*V_2_B*(-0.1263*cos(D_2_C-D_2_B)+0.6967*sin(D_2_C-D_2_B))-V_2_C*V_1_B*(-0.1263*cos(D_2_C-D_1_B)+0.6967*sin(D_2_C-D_1_B)):[0/0]
Q_2_C+-V_2_C*V_2_C*-2.9506+V_2_C*V_1_C*(-2.9506*cos(D_2_C-D_1_C)-0.8867*sin(D_2_C-D_1_C))+V_2_C*V_2_A*(-0.2661*sin(D_2_C-D_2_A)-0.9122*cos(D_2_C-D_2_A))+V_2_C*V_1_A*(0.9122*cos(D_2_C-D_1_A)--0.2661*sin(D_2_C-D_1_A))+V_2_C*V_2_B*(-0.1263*sin(D_2_C-D_2_B)-0.6967*cos(D_2_C-D_2_B))+V_2_C*V_1_B*(0.6967*cos(D_2_C-D_1_B)--0.1263*sin(D_2_C-D_1_B)):[0/0]
V_1_A-7199.5579:[0/0]
D_1_A-0:[0/0]
V_1_B-7199.5579:[0/0]
D_1_B--2/3*pi:[0/0]
V_1_C-7199.5579:[0/0]
D_1_C-2/3*pi:[0/0]
P_2_A-1275000.0000:[0/0]
Q_2_A-790174.0000:[0/0]
P_2_B-1800000.0000:[0/0]
Q_2_B-871779.8000:[0/0]
P_2_C-2375000.0000:[0/0]
Q_2_C-780624.7000:[0/0]
V_1_A:[0/99999999],D_1_A:[-3.2/3.2],V_1_B:[0/99999999],D_1_B:[-3.2/3.2],V_1_C:[0/99999999],D_1_C:[-3.2/3.2],P_1_A:[-99999999/99999999],P_1_B:[-99999999/99999999],P_1_C:[-99999999/99999999],Q_1_A:[-99999999/99999999],Q_1_B:[-99999999/99999999],Q_1_C:[-99999999/99999999],V_2_A:[0/99999999],D_2_A:[-3.2/3.2],V_2_B:[0/99999999],D_2_B:[-3.2/3.2],V_2_C:[0/99999999],D_2_C:[-3.2/3.2],P_2_A:[-99999999/99999999],P_2_B:[-99999999/99999999],P_2_C:[-99999999/99999999],Q_2_A:[-99999999/99999999],Q_2_B:[-99999999/99999999],Q_2_C:[-99999999/99999999]
```
输入至easyslove,求解得到:
```text
V_1_A : 7199.5579
D_1_A : 0
V_1_B : 7199.5579
D_1_B : -2.0943951023931953
V_1_C : 7199.5579
D_1_C : 2.0943951023931953
P_1_A : -1277881.0174413954
P_1_B : -1809439.2939235603
P_1_C : -2381044.287784358
Q_1_A : -794494.3206242201
Q_1_B : -890721.1548199678
Q_1_C : -817091.6107552513
V_2_A : 7176.944412608694
D_2_A : -0.001431890501404535
V_2_B : 7139.609540861016
D_2_B : -2.1008076662494224
V_2_C : 7149.960093956684
D_2_C : 2.0813825033502016
P_2_A : 1275000
P_2_B : 1800000
P_2_C : 2375000
Q_2_A : 790174
Q_2_B : 871779.8
Q_2_C : 780624.7
```
二者结果相同。
\ No newline at end of file
[package]
name = "ds-common"
version = "0.1.0"
edition = "2021"
[dependencies]
serde_json = "1.0"
csv = "1.3"
eig-domain = { path = "../../../../eig-domain" }
mems = { path = "../../.." }
\ No newline at end of file
use csv::StringRecordsIter;
pub fn read_dyn_topo(records: &mut StringRecordsIter<&[u8]>)
-> Result<Vec<Vec<u64>>, String> {
let mut dyn_topo = Vec::new();
// 按行读取csv
let mut row = 0;
loop {
match records.next() {
Some(Ok(record)) => {
let mut col = 0;
dyn_topo.push(vec![0u64; 2]);
for str in record.iter() {
if let Ok(id) = str.parse() {
dyn_topo[row][col] = id;
} else {
return Err(format!("Wrong dynamic topology input, row {row} col {col}"));
}
col += 1;
if col == 2 {
break;
}
}
if col != 2 {
return Err(format!("Wrong dynamic topology input, expected col 2, actual {col}"));
}
}
Some(Err(e)) => {
return Err(format!("Wrong dynamic topology input, err: {:?}", e));
}
None => {
break;
}
}
row += 1;
}
Ok(dyn_topo)
}
pub fn read_dev_topo(records: &mut StringRecordsIter<&[u8]>)
-> Result<Vec<Vec<u64>>, String> {
let mut dev_topo = Vec::new();
// 按行读取csv
let mut row = 0;
loop {
match records.next() {
Some(Ok(record)) => {
let mut col = 0;
dev_topo.push(vec![0u64; 4]);
for str in record.iter() {
if let Ok(id) = str.parse() {
dev_topo[row][col] = id;
} else {
return Err(format!("Wrong device topology, row {row} col {col}"));
}
col += 1;
if col == 4 {
break;
}
}
if col != 4 {
return Err(format!("Wrong device topology input, expected col 4, actual {col}"));
}
}
Some(Err(e)) => {
return Err(format!("Wrong device topology input, err: {:?}", e));
}
None => {
break;
}
}
row += 1;
}
Ok(dev_topo)
}
pub mod dyn_topo;
pub mod static_topo;
pub mod tn_input;
// static topo
pub const STATIC_TOPO_DF_NAME: &str = "static_topo";
pub const TERMINAL_DF_NAME: &str = "terminal_cn_dev";
pub const POINT_DF_NAME: &str = "point_terminal_phase";
// dynamic topo
pub const DYN_TOPO_DF_NAME: &str = "dyn_topo";
pub const DEV_TOPO_DF_NAME: &str = "dev_topo";
// impedance matrix
pub const DEV_CONDUCTOR_DF_NAME: &str = "dev_ohm";
// pf input
pub const SHUNT_MEAS_DF_NAME: &str = "shunt_meas";
pub const TN_INPUT_DF_NAME: &str = "tn_input";
// pf nlp
pub const DS_PF_NLP_OBJ: &str = "3phase_pf_obj";
pub const DS_PF_NLP_CONS: &str = "3phase_pf_cons";
\ No newline at end of file
use std::collections::HashMap;
use csv::StringRecordsIter;
use mems::model::dev::{MeasPhase, PsRsrType};
pub fn read_point_terminal(records: &mut StringRecordsIter<&[u8]>,
mut meas_phase: Option<&mut Vec<MeasPhase>>) -> Result<Vec<Vec<u64>>, String> {
let mut points = Vec::new();
// 按行读取csv
let mut row = 0;
loop {
match records.next() {
Some(Ok(record)) => {
points.push(vec![0u64; 2]);
let mut col = 0;
for str in record.iter() {
if col < 2 {
if let Ok(id) = str.parse() {
points[row][col] = id;
} else {
return Err(format!("Wrong point input, row {row} col {col}"));
}
} else if meas_phase.is_some() {
meas_phase.as_mut().unwrap().push(MeasPhase::from(str))
}
col += 1;
if col == 3 {
break;
}
}
if col != 3 {
return Err(format!("Wrong point input, expected col at least 3, actual {col}"));
}
}
Some(Err(e)) => {
return Err(format!("Wrong point input, err: {:?}", e));
}
None => {
break;
}
}
row += 1;
}
Ok(points)
}
pub fn read_terminal_cn_dev(records: &mut StringRecordsIter<&[u8]>, mut dev_type: Option<&mut HashMap<u64, u16>>)
-> Result<Vec<Vec<u64>>, String> {
let mut terminals: Vec<Vec<u64>> = Vec::new();
// 按行读取csv
let mut row = 0;
loop {
match records.next() {
Some(Ok(record)) => {
terminals.push(vec![0u64; 3]);
let mut col = 0;
for str in record.iter() {
if col < 3 {
if let Ok(id) = str.parse() {
terminals[row][col] = id;
} else {
return Err(format!("Wrong terminal input, row {row} col {col}: {str}"));
}
} else if col == 3 && dev_type.is_some() {
if let Ok(type_u16) = str.parse::<u16>() {
dev_type.as_mut().unwrap().insert(terminals[row][2], type_u16);
} else {
return Err(format!("Wrong terminal input, row {row} col {col}: {str}"));
}
}
col += 1;
if col == 4 {
break;
}
}
if col != 4 {
return Err(format!("Wrong terminal input, expected col 4, actual {col}"));
}
}
Some(Err(e)) => {
return Err(format!("Wrong terminal input, err: {:?}", e));
}
None => {
break;
}
}
row += 1;
}
Ok(terminals)
}
pub fn read_static_topo(records: &mut StringRecordsIter<&[u8]>,
mut normal_open: Option<&mut HashMap<u64, bool>>)
-> Result<Vec<Vec<u64>>, String> {
let mut edges = Vec::new();
let mut row = 0;
let swich_type = PsRsrType::Switch as u16;
// 按行读取csv
loop {
match records.next() {
Some(Ok(record)) => {
edges.push(vec![0u64; 3]);
let mut col = 0;
let mut is_switch = false;
for str in record.iter() {
if col < 3 {
if let Ok(id) = str.parse() {
edges[row][col] = id;
} else {
return Err(format!("Wrong static topology input, row {row} col {col}: {str}"));
}
} else if col == 3 {
if let Ok(type_u16) = str.parse::<u16>() {
is_switch = type_u16 == swich_type;
} else {
return Err(format!("Wrong static topology input, row {row} col {col}: {str}"));
}
} else if col == 4 && is_switch && normal_open.is_some() {
if let Ok(b) = str.parse::<bool>() {
normal_open.as_mut().unwrap().insert(edges[row][2], b);
} else {
return Err(format!("Wrong static topology input, row {row} col {col}: {str}"));
}
}
col += 1;
if col == 5 {
break;
}
}
if col != 5 {
return Err(format!("Wrong static topology input, expected col at least 5, actual {col}"));
}
}
Some(Err(e)) => {
return Err(format!("Wrong static topology input, err: {:?}", e));
}
None => {
break;
}
}
row += 1;
}
Ok(edges)
}
\ No newline at end of file
use std::collections::HashMap;
use csv::StringRecordsIter;
use eig_domain::prop::DataUnit;
use mems::model::dev::MeasPhase;
pub fn read_shunt_measures(records: &mut StringRecordsIter<&[u8]>)
-> Result<HashMap<u64, (u64, MeasPhase)>, String> {
let mut meas = HashMap::new();
// 按行读取csv
let mut row = 0;
loop {
match records.next() {
Some(Ok(record)) => {
let mut col = 0;
let mut point = 0u64;
let mut terminal = 0u64;
for str in record.iter() {
if col == 0 {
if let Ok(id) = str.parse() {
point = id;
} else {
return Err(format!("Wrong shunt measure input, row {row} col {col}"));
}
} else if col == 1 {
if let Ok(id) = str.parse() {
terminal = id;
} else {
return Err(format!("Wrong shunt measure input, row {row} col {col}"));
}
} else if col == 2 {
meas.insert(point, (terminal, MeasPhase::from(str)));
}
col += 1;
if col == 3 {
break;
}
}
if col != 3 {
return Err(format!("Wrong shunt measure input, expected col at least 3, actual {col}"));
}
}
Some(Err(e)) => {
return Err(format!("Wrong shunt measure input, err: {:?}", e));
}
None => {
break;
}
}
row += 1;
}
Ok(meas)
}
pub fn read_tn_input(records: &mut StringRecordsIter<&[u8]>)
-> Result<(Vec<u64>, Vec<MeasPhase>, Vec<DataUnit>, Vec<f64>), String> {
let mut tn = Vec::new();
let mut input_type = Vec::new();
let mut input_phase = Vec::new();
let mut value = Vec::new();
// 按行读取csv
let mut row = 0;
loop {
match records.next() {
Some(Ok(record)) => {
let mut col = 0;
for str in record.iter() {
if col == 0 {
if let Ok(v) = str.parse() {
tn.push(v);
} else {
return Err(format!("Wrong bus input, row {row} col {col}"));
}
} else if col == 1 {
if let Ok(v) = serde_json::from_str(str) {
input_phase.push(v);
} else {
return Err(format!("Wrong bus input, row {row} col {col}"));
}
} else if col == 2 {
if let Ok(v) = serde_json::from_str(str) {
input_type.push(v);
} else {
return Err(format!("Wrong bus input, row {row} col {col}"));
}
} else if col == 3 {
if let Ok(v) = serde_json::from_str(str) {
value.push(v);
} else {
return Err(format!("Wrong bus input, row {row} col {col}"));
}
}
col += 1;
if col == 4 {
break;
}
}
if col != 4 {
return Err(format!("Wrong bus input, expected col 4, actual {col}"));
}
}
Some(Err(e)) => {
return Err(format!("Wrong bus input, err: {:?}", e));
}
None => {
break;
}
}
row += 1;
}
Ok((tn, input_phase, input_type, value))
}
\ No newline at end of file
[package]
name = "ds-dev-ohm-cal"
version = "0.1.0"
edition = "2021"
[lib]
crate-type = ["cdylib"]
[dependencies]
log = "0.4"
serde_json = "1.0"
ciborium = "0.2"
arrow-schema = { version = "56.1", features = ["serde"] }
eig-domain = { path = "../../../../eig-domain" }
mems = { path = "../../.." }
csv = "1.3.0"
ndarray = "0.16"
bytes = "1.10"
use std::collections::HashMap;
use arrow_schema::{DataType, Field, Schema};
use bytes::{Buf, BufMut, BytesMut};
use csv::StringRecordsIter;
use log::{info, warn};
use ndarray::{Array2, ArrayBase, Ix2, OwnedRepr};
use eig_domain::prop::PropValue;
use mems::model::{get_csv_str, get_df_from_in_plugin, get_island_from_plugin_input, PluginInput, PluginOutput};
use mems::model::dev::PsRsrType;
static mut OUTPUT: Vec<u8> = vec![];
#[no_mangle]
pub unsafe fn run(ptr: i32, len: u32) -> u64 {
info!("Read plugin input firstly");
// 从内存中获取字符串
let input = unsafe {
let slice = std::slice::from_raw_parts(ptr as _, len as _);
let input: PluginInput = ciborium::from_reader(slice).unwrap();
input
};
let mut error = None;
let r = get_island_from_plugin_input(&input);
if let Err(s) = &r {
error = Some(s.clone());
}
let r2 = get_df_from_in_plugin(&input);
if let Err(s) = &r2 {
error = Some(s.clone());
}
let mut config= HashMap::with_capacity(0);
let mut csv_str = String::from("dev_id,ohm\n");
if error.is_none() {
let (island, prop_defs, defines) = r.unwrap();
let from = r2.unwrap();
info!("input dataframe num from edges is {}", input.dfs.len());
for i in 0..input.dfs_len.len() {
let size = input.dfs_len[i] as usize;
let end = from + size;
let mut rdr = csv::ReaderBuilder::new().has_headers(true).from_reader(&input.bytes[from..end]);
let mut records = rdr.records();
match read_config( &mut records) {
Ok(v) => config = v,
Err(s) => error = Some(s),
}
break;
}
if error.is_none() {
let mut prop_defines = HashMap::with_capacity(prop_defs.len());
for def in prop_defs.into_iter() {
prop_defines.insert(def.id, def);
}
for (_, rsr) in &island.resources {
if let Some(def) = defines.get(&rsr.define_id) {
if def.rsr_type == PsRsrType::ACline {
let dev_id = rsr.id;
let line_conf = rsr.get_prop_value("model", &island.prop_groups, &prop_defines);
let length = rsr.get_prop_value("length", &island.prop_groups, &prop_defines);
if let PropValue::Str(s) = line_conf {
if let Some((mat_re, mat_im)) = config.get(&s) {
if let Some(f) = length.get_f64() {
let ratio = f / 1000.0;
let u_re = vec![1., 0., 0., 0., 1., 0., 0., 0., 1.];
let u_im = vec![0., 0., 0., 0., 0., 0., 0., 0., 0.];
let (z_re, _) = (mat_re * ratio).into_raw_vec_and_offset();
let (z_im, _) = (mat_im * ratio).into_raw_vec_and_offset();
let u_re_json = get_csv_str(&serde_json::to_string(&u_re).unwrap());
let u_im_json = get_csv_str(&serde_json::to_string(&u_im).unwrap());
let z_re_json = get_csv_str(&serde_json::to_string(&z_re).unwrap());
let z_im_json = get_csv_str(&serde_json::to_string(&z_im).unwrap());
csv_str.push_str(&format!("{dev_id},{u_re_json},{u_im_json},{z_re_json},{z_im_json},{u_im_json},{u_im_json},{z_re_json},{z_im_json}\n"));
} else {
warn!("Length is not set for acline {}", rsr.name);
continue;
}
} else {
warn!("!!Failed to find matrix for line_conf: {s}");
}
}
}
// todo: add other types: transformer\regulator
}
}
}
}
let output = if error.is_none() {
// build schema
let schema = Schema::new(vec![
Field::new("dev_id", DataType::UInt64, false),
Field::new("a_re", DataType::Utf8, false),
Field::new("a_im", DataType::Utf8, false),
Field::new("b_re", DataType::Utf8, false),
Field::new("b_im", DataType::Utf8, false),
Field::new("c_re", DataType::Utf8, false),
Field::new("c_im", DataType::Utf8, false),
Field::new("d_re", DataType::Utf8, false),
Field::new("d_im", DataType::Utf8, false),
]);
let csv_bytes = vec![("".to_string(), csv_str.into_bytes())];
PluginOutput {
error_msg: None,
schema: Some(vec![schema]),
csv_bytes,
}
} else {
PluginOutput {
error_msg: error,
schema: None,
csv_bytes: vec![],
}
};
ciborium::into_writer(&output, &mut OUTPUT).unwrap();
let offset = OUTPUT.as_ptr() as i32;
let len = OUTPUT.len() as u32;
let mut bytes = BytesMut::with_capacity(8);
bytes.put_i32(offset);
bytes.put_u32(len);
bytes.get_u64()
}
fn read_config(records: &mut StringRecordsIter<&[u8]>)
-> Result<HashMap<String, (ArrayBase<OwnedRepr<f64>, Ix2>, ArrayBase<OwnedRepr<f64>, Ix2>)>, String>{
let mut config: HashMap<String, (ArrayBase<OwnedRepr<f64>, Ix2>, ArrayBase<OwnedRepr<f64>, Ix2>)> = HashMap::new();
// 按行读取csv
loop {
match records.next() {
Some(Ok(record)) => {
let mut col = 0;
let mut name = "".to_string();
let mut ohm_per_km = "".to_string();
for str in record.iter() {
if col == 0 {
name = str.to_string();
} else {
ohm_per_km = str.to_string();
}
col += 1;
if col == 2 {
break;
}
}
if col != 2 {
return Err(format!("Wrong config input, expected col more than 2, actual {col}"));
}
match serde_json::from_str::<[f64; 18]>(&ohm_per_km) {
Ok(ohm) => {
let mat_re = Array2::from_shape_vec((3, 3), ohm[0..9].to_vec()).unwrap();
let mat_im = Array2::from_shape_vec((3, 3), ohm[9..18].to_vec()).unwrap();
config.insert(name, (mat_re, mat_im));
}
Err(e) => {
return Err(format!("Failed to parse matrix from {ohm_per_km}, err: {:?}", e));
}
}
}
Some(Err(e)) => {
return Err(format!("{:?}", e));
}
None => {
break;
}
}
}
Ok(config)
}
\ No newline at end of file
[package]
name = "ds-dyn-topo"
version = "0.1.0"
edition = "2021"
[lib]
crate-type = ["cdylib"]
[dependencies]
log = "0.4"
csv = "1.3.0"
ciborium = "0.2"
arrow-schema = { version = "56.1", features = ["serde"] }
eig-domain = { path = "../../../../eig-domain" }
mems = { path = "../../.." }
ds-common = { path = "../ds-common" }
bytes = "1.10"
\ No newline at end of file
use std::collections::HashMap;
use arrow_schema::{DataType, Field, Schema};
use bytes::{Buf, BufMut, BytesMut};
use ds_common::{DEV_TOPO_DF_NAME, DYN_TOPO_DF_NAME, POINT_DF_NAME, STATIC_TOPO_DF_NAME, TERMINAL_DF_NAME};
use ds_common::static_topo::{read_point_terminal, read_static_topo, read_terminal_cn_dev};
use eig_domain::prop::DataUnit;
use mems::model::{get_df_from_in_plugin, get_meas_from_plugin_input, ModelType, PluginInput, PluginOutput};
static mut OUTPUT: Vec<u8> = vec![];
#[no_mangle]
pub unsafe fn run(ptr: i32, len: u32) -> u64 {
// 从内存中获取字符串
let input = unsafe {
let slice = std::slice::from_raw_parts(ptr as _, len as _);
let input: PluginInput = ciborium::from_reader(slice).unwrap();
input
};
let mut error = None;
let r1 = get_meas_from_plugin_input(&input);
if let Err(s) = &r1 {
error = Some(s.clone());
}
let r2 = get_df_from_in_plugin(&input);
// source, target, dev
let mut edges: Vec<Vec<u64>> = vec![];
// switch id to normal open
let mut normal_open: HashMap<u64, bool> = HashMap::new();
// terminal, cn, dev
let mut terminals: Vec<Vec<u64>> = vec![];
// point, terminal
let mut points: Vec<Vec<u64>> = vec![];
if error.is_none() {
if let Err(s) = &r2 {
error = Some(s.clone());
} else {
let mut from = r2.unwrap();
for i in 0..input.dfs_len.len() {
let size = input.dfs_len[i] as usize;
let end = from + size;
let mut rdr = csv::ReaderBuilder::new().has_headers(true).from_reader(&input.bytes[from..end]);
let mut records = rdr.records();
// 开始读取输入的static topology DataFrame
if input.dfs[i] == STATIC_TOPO_DF_NAME {
match read_static_topo(&mut records, Some(&mut normal_open)) {
Ok(v) => edges = v,
Err(s) => error = Some(s),
}
} else if input.dfs[i] == TERMINAL_DF_NAME {
match read_terminal_cn_dev(&mut records, None) {
Ok(v) => terminals = v,
Err(s) => error = Some(s),
}
} else if input.dfs[i] == POINT_DF_NAME {
match read_point_terminal(&mut records, None) {
Ok(v) => points = v,
Err(s) => error = Some(s),
}
}
from += size;
}
}
}
let output = if error.is_none() {
let (meas, units) = r1.unwrap();
let mut point_map: HashMap<u64, u64> = HashMap::with_capacity(points.len());
let mut terminal_cn: HashMap<u64, u64> = HashMap::with_capacity(points.len());
let mut terminal_dev: HashMap<u64, u64> = HashMap::with_capacity(points.len());
for ids in points {
point_map.insert(ids[0], ids[1]);
}
for ids in terminals {
terminal_cn.insert(ids[0], ids[1]);
terminal_dev.insert(ids[0], ids[2]);
}
// 开始构建
let mut closed_switch_to_cn: HashMap<u64, u64> = HashMap::with_capacity(terminal_cn.len() / 2);
// 开始处理开关量
for m in meas {
if let Some(unit) = units.get(&m.point_id) {
if DataUnit::OnOrOff == *unit {
if let Some(terminal_id) = point_map.get(&m.point_id) {
if let Some(cn_id) = terminal_cn.get(terminal_id) {
if let Some(dev_id) = terminal_dev.get(terminal_id) {
if m.get_value2() > 0 {
closed_switch_to_cn.insert(*dev_id, *cn_id);
}
}
}
}
}
}
}
// build tns
let mut cn_tn: HashMap<u64, usize> = HashMap::with_capacity(terminal_cn.len() / 2);
let mut not_dealed = Vec::new();
for v in edges {
let cn1 = v[0];
let cn2 = v[1];
let dev_id = v[2];
// switch with measure
if let Some(cn) = closed_switch_to_cn.get(&dev_id) {
if *cn == cn1 {
if let Some(tn) = cn_tn.get(cn) {
cn_tn.insert(cn2, *tn);
} else {
let tn = cn_tn.len() + 1;
cn_tn.insert(cn1, tn);
cn_tn.insert(cn2, tn);
}
} else if *cn == cn2 {
if let Some(tn) = cn_tn.get(cn) {
cn_tn.insert(cn1, *tn);
} else {
let tn = cn_tn.len() + 1;
cn_tn.insert(cn1, tn);
cn_tn.insert(cn2, tn);
}
}
}
// this is a closed switch with no measure
else if let Some(false) = normal_open.get(&dev_id) {
if let Some(tn) = cn_tn.get(&cn1) {
cn_tn.insert(cn2, *tn);
} else if let Some(tn) = cn_tn.get(&cn2) {
cn_tn.insert(cn1, *tn);
} else {
let tn = cn_tn.len() + 1;
cn_tn.insert(cn1, tn);
cn_tn.insert(cn2, tn);
}
}
// this is open switch or not switch
else {
if !cn_tn.contains_key(&cn1) {
not_dealed.push(cn1);
}
if !cn_tn.contains_key(&cn2) {
not_dealed.push(cn2);
}
}
}
for cn in not_dealed {
if !cn_tn.contains_key(&cn) {
let tn = cn_tn.len() + 1;
cn_tn.insert(cn, tn);
}
}
// get outgoing edges
let mut outgoing = vec![];
for model_input in &input.model {
match model_input {
ModelType::Outgoing(edge_name) => {
outgoing = edge_name.clone();
}
_ => {}
}
}
let mut csv_bytes = Vec::with_capacity(2);
let mut schema = Vec::with_capacity(2);
// 默认或者指定名称
if outgoing.is_empty() || outgoing.contains(&DYN_TOPO_DF_NAME.to_string()) ||
(!outgoing.contains(&DYN_TOPO_DF_NAME.to_string()) && !outgoing.contains(&DEV_TOPO_DF_NAME.to_string())) {
// build topology
let mut topo_csv = String::from("cn,tn\n");
for (cn, tn) in &cn_tn {
topo_csv.push_str(&format!("{cn},{tn}\n"));
}
// build topology schema
let topo_schema = Schema::new(vec![
Field::new("cn", DataType::UInt64, false),
Field::new("tn", DataType::UInt64, false),
]);
csv_bytes.push((DYN_TOPO_DF_NAME.to_string(), topo_csv.into_bytes()));
schema.push(topo_schema);
} else if outgoing.contains(&DEV_TOPO_DF_NAME.to_string()) {
// build dev connection
let mut dev_csv = String::from("terminal,cn,tn,dev\n");
for (terminal, dev) in terminal_dev {
if closed_switch_to_cn.contains_key(&dev) {
continue;
}
if let Some(cn) = terminal_cn.get(&terminal) {
if let Some(tn) = cn_tn.get(cn) {
dev_csv.push_str(&format!("{terminal},{cn},{tn},{dev}\n"));
}
}
}
// build dev connection schema
let dev_schema = Schema::new(vec![
Field::new("terminal", DataType::UInt64, false),
Field::new("cn", DataType::UInt64, false),
Field::new("tn", DataType::UInt64, false),
Field::new("dev", DataType::UInt64, false),
]);
csv_bytes.push((DEV_TOPO_DF_NAME.to_string(), dev_csv.into_bytes()));
schema.push(dev_schema);
}
PluginOutput {
error_msg: None,
schema: Some(schema),
csv_bytes,
}
} else {
PluginOutput {
error_msg: error,
schema: None,
csv_bytes: vec![],
}
};
ciborium::into_writer(&output, &mut OUTPUT).unwrap();
let offset = OUTPUT.as_ptr() as i32;
let len = OUTPUT.len() as u32;
let mut bytes = BytesMut::with_capacity(8);
bytes.put_i32(offset);
bytes.put_u32(len);
bytes.get_u64()
}
\ No newline at end of file
[package]
name = "ds-tn-input"
version = "0.1.0"
edition = "2021"
[lib]
crate-type = ["cdylib"]
[dependencies]
log = "0.4"
csv = "1.3.0"
ciborium = "0.2"
arrow-schema = { version = "56.1", features = ["serde"] }
ds-common = { path = "../ds-common" }
eig-domain = { path = "../../../../eig-domain" }
mems = { path = "../../.." }
bytes = "1.10"
\ No newline at end of file
use std::collections::{BTreeMap, HashMap, HashSet};
use std::fs;
use std::io::Write;
use std::path::PathBuf;
use arrow_schema::{DataType, Field, Schema};
use bytes::{Buf, BufMut, BytesMut};
use ds_common::{DEV_TOPO_DF_NAME, POINT_DF_NAME, SHUNT_MEAS_DF_NAME, TERMINAL_DF_NAME, TN_INPUT_DF_NAME};
use ds_common::dyn_topo::read_dev_topo;
use ds_common::static_topo::{read_point_terminal, read_terminal_cn_dev};
use ds_common::tn_input::read_shunt_measures;
use eig_domain::{prop::DataUnit, MeasureValue};
use mems::model::{get_df_from_in_plugin, get_meas_from_plugin_input, PluginInput, PluginOutput};
use mems::model::dev::{MeasPhase, PsRsrType};
static mut OUTPUT: Vec<u8> = vec![];
#[no_mangle]
pub unsafe fn run(ptr: i32, len: u32) -> u64 {
// 从内存中获取字符串
let input = unsafe {
let slice = std::slice::from_raw_parts(ptr as _, len as _);
let input: PluginInput = ciborium::from_reader(slice).unwrap();
input
};
let r2 = get_df_from_in_plugin(&input);
let mut error = None;
// static topo
// point, terminal
let mut points: Vec<Vec<u64>> = vec![];
let mut meas_phase: Vec<MeasPhase> = vec![];
// terminal, cn, dev
let mut terminals: Vec<Vec<u64>> = vec![];
// key is point id, value is (terminal id, measure phase)
let mut point_of_shunt_dev: HashMap<u64, (u64, MeasPhase)> = HashMap::with_capacity(0);
let mut terminal_of_shunt_dev: HashSet<u64> = HashSet::with_capacity(0);
// dev id to device type
let mut dev_type: HashMap<u64, u16> = HashMap::new();
// dynamic topo: terminal, cn, tn, dev
let mut dyn_dev_topo: Vec<Vec<u64>> = vec![];
let mut with_static = false;
if let Err(s) = &r2 {
error = Some(s.clone());
} else {
let mut from = r2.unwrap();
for i in 0..input.dfs_len.len() {
let size = input.dfs_len[i] as usize;
let end = from + size;
let mut rdr = csv::ReaderBuilder::new().has_headers(true).from_reader(&input.bytes[from..end]);
let mut records = rdr.records();
// 对第i个边输入该节点的 dataframe 进行处理
if input.dfs[i] == TERMINAL_DF_NAME {
with_static = true;
match read_terminal_cn_dev(&mut records, Some(&mut dev_type)) {
Ok(v) => terminals = v,
Err(s) => error = Some(s),
}
} else if input.dfs[i] == POINT_DF_NAME {
match read_point_terminal(&mut records, Some(&mut meas_phase)) {
Ok(v) => points = v,
Err(s) => error = Some(s),
}
} else if input.dfs[i] == DEV_TOPO_DF_NAME {
match read_dev_topo(&mut records) {
Ok(v) => dyn_dev_topo = v,
Err(s) => {
error = Some(s);
break;
}
}
} else if input.dfs[i] == SHUNT_MEAS_DF_NAME {
match read_shunt_measures(&mut records) {
Ok(v) => {
terminal_of_shunt_dev = HashSet::with_capacity(v.len());
for (terminal, _) in v.values() {
terminal_of_shunt_dev.insert(*terminal);
}
point_of_shunt_dev = v;
}
Err(s) => error = Some(s),
}
}
from += size;
}
}
let output = if error.is_some() {
PluginOutput {
error_msg: error,
schema: None,
csv_bytes: vec![],
}
} else {
// 输出shunt_meas,需要的输入是terminal_cn_dev,point_terminal_phase
if with_static {
let mut csv_str = String::from("point,terminal,phase\n");
let type1 = PsRsrType::SyncGenerator as u16;
let type2 = PsRsrType::Load as u16;
let type3 = PsRsrType::ShuntCompensator as u16;
let shunt_types = [type1, type2, type3];
let mut terminal_with_shunt_dev = HashSet::new();
for v in terminals {
let terminal = v[0];
let dev_id = v[2];
if let Some(dev_type) = dev_type.get(&dev_id) {
if shunt_types.contains(&dev_type) {
terminal_with_shunt_dev.insert(terminal);
}
}
}
let mut point_terminal = HashMap::with_capacity(points.len());
for i in 0..points.len() {
let point_id = points[i][0];
let terminal = points[i][1];
if terminal_with_shunt_dev.contains(&terminal) {
let phase = meas_phase[i].to_string();
csv_str.push_str(&format!("{point_id},{terminal},{phase}\n"));
point_terminal.insert(point_id, (terminal, meas_phase[i].clone()));
}
}
// build schema
let schema = Schema::new(vec![
Field::new("point", DataType::UInt64, false),
Field::new("terminal", DataType::UInt64, false),
Field::new("phase", DataType::Utf8, false),
]);
// create file
let mut base = PathBuf::from("/");
base.push("shunt_meas.csv");
let mut file = fs::OpenOptions::new()
.create(true)
.write(true)
.truncate(true)
.open(&base)
.expect("Could not create file");
if let Err(e) = file.write_all(csv_str.as_bytes()) {
log::warn!("!!Failed to write file, err: {:?}", e);
} else {
let _ = file.sync_all();
}
// write graph
let csv_bytes = vec![(SHUNT_MEAS_DF_NAME.to_string(), csv_str.into_bytes())];
PluginOutput {
error_msg: None,
schema: Some(vec![schema]),
csv_bytes,
}
} else {
// 输出tn_input,需要的输入是:量测,dev_topo
let r1 = get_meas_from_plugin_input(&input);
if let Err(s) = &r1 {
error = Some(s.clone());
}
if error.is_some() {
PluginOutput {
error_msg: error,
schema: None,
csv_bytes: vec![],
}
} else {
let (meas, units) = r1.unwrap();
let len = terminal_of_shunt_dev.len();
let mut terminal_tn = HashMap::with_capacity(len);
let mut tn_measure: BTreeMap<u64, Vec<(DataUnit, MeasPhase, f64)>> = BTreeMap::new();
let mut tn_voltage_count: HashMap<String, usize> = HashMap::with_capacity(len);
for v in dyn_dev_topo {
let terminal = v[0];
let tn = v[2];
if terminal_of_shunt_dev.contains(&terminal) {
terminal_tn.insert(terminal, tn);
tn_measure.insert(tn, vec![]);
}
}
// 开始处理开关量
for m in meas {
if let Some((terminal, phase)) = point_of_shunt_dev.get(&m.point_id) {
if let Some(tn) = terminal_tn.get(terminal) {
let v = tn_measure.get_mut(tn).unwrap();
if let Some(unit) = units.get(&m.point_id) {
match unit {
DataUnit::A => create_measure(&m, phase, 1., v, DataUnit::A),
DataUnit::V => {
create_measure(&m, phase, 1., v, DataUnit::V);
let key = format!("{tn}_{phase}");
if let Some(count) = tn_voltage_count.get_mut(&key) {
*count += 1;
} else {
tn_voltage_count.insert(key, 1);
}
}
DataUnit::kV => {
create_measure(&m, phase, 1000., v, DataUnit::V);
let key = format!("{tn}_{phase}");
if let Some(count) = tn_voltage_count.get_mut(&key) {
*count += 1;
} else {
tn_voltage_count.insert(key, 1);
}
}
DataUnit::W => create_measure(&m, phase, 1., v, DataUnit::W),
DataUnit::kW => create_measure(&m, phase, 1000., v, DataUnit::W),
DataUnit::MW => create_measure(&m, phase, 1000000., v, DataUnit::W),
DataUnit::Var => create_measure(&m, phase, 1., v, DataUnit::Var),
DataUnit::kVar => create_measure(&m, phase, 1000., v, DataUnit::Var),
DataUnit::MVar => create_measure(&m, phase, 1000000., v, DataUnit::Var),
_ => {}
}
}
}
}
}
for (tn_phase, count) in tn_voltage_count {
if count == 1 {
continue;
}
let s: Vec<&str> = tn_phase.split("_").collect();
let tn: u64 = s[0].parse().unwrap();
let p = MeasPhase::from(s[1]);
if let Some(v) = tn_measure.get_mut(&tn) {
for (unit, phase, f) in v {
if DataUnit::V.eq(unit) {
if p.eq(phase) {
*f = *f / (count as f64);
break;
}
}
}
}
}
let mut csv_str = String::from("tn,phase,unit,value\n");
for (tn, measure) in tn_measure {
for (unit, phase, f) in measure {
csv_str.push_str(&format!("{tn},{phase},{unit},{f}\n"))
}
}
let schema = Schema::new(vec![
Field::new("tn", DataType::UInt64, false),
Field::new("phase", DataType::Utf8, false),
Field::new("unit", DataType::Utf8, false),
Field::new("value", DataType::Float64, false),
]);
// create file
let mut base = PathBuf::from("/");
base.push("pf_input.csv");
let mut file = fs::OpenOptions::new()
.create(true)
.write(true)
.truncate(true)
.open(&base)
.expect("Could not create file");
if let Err(e) = file.write_all(csv_str.as_bytes()) {
log::warn!("!!Failed to write file, err: {:?}", e);
} else {
let _ = file.sync_all();
}
let csv_bytes = vec![(TN_INPUT_DF_NAME.to_string(), csv_str.into_bytes())];
PluginOutput {
error_msg: None,
schema: Some(vec![schema]),
csv_bytes,
}
}
}
};
ciborium::into_writer(&output, &mut OUTPUT).unwrap();
let offset = OUTPUT.as_ptr() as i32;
let len = OUTPUT.len() as u32;
let mut bytes = BytesMut::with_capacity(8);
bytes.put_i32(offset);
bytes.put_u32(len);
bytes.get_u64()
}
unsafe fn create_measure(m: &MeasureValue, phase: &MeasPhase, ratio: f64,
v: &mut Vec<(DataUnit, MeasPhase, f64)>, new_unit: DataUnit) {
let mut is_find = false;
for (v_unit, v_phase, f) in &mut *v {
if new_unit.eq(v_unit) && phase.eq(v_phase) {
*f += m.get_value() * ratio;
is_find = true;
break;
}
}
if !is_find {
v.push((new_unit, phase.clone(), m.get_value() * ratio));
}
}
\ No newline at end of file
[package]
name = "ds-static-topo"
version = "0.1.0"
edition = "2021"
[lib]
crate-type = ["cdylib"]
[dependencies]
log = "0.4"
ciborium = "0.2"
arrow-schema = { version = "56.1", features = ["serde"] }
petgraph = "0.8"
mems = { path = "../../.." }
bytes = "1.10"
\ No newline at end of file
use std::collections::HashMap;
use arrow_schema::{DataType, Field, Schema};
use bytes::{Buf, BufMut, BytesMut};
use petgraph::graph::UnGraph;
use mems::model::{get_csv_str, get_island_from_plugin_input, ModelType, PluginInput, PluginOutput};
use mems::model::dev::{CN, Island, PropDefine, PsRsrType, RsrDefine};
// use std::fs;
// use std::io::Write;
// use std::path::PathBuf;
const NORMAL_OPEN: &str = "normalOpen";
const STATIC_TOPO_DF_NAME: &str = "static_topo";
const TERMINAL_DF_NAME: &str = "terminal_cn_dev";
const POINT_DF_NAME: &str = "point_terminal_phase";
static mut OUTPUT: Vec<u8> = vec![];
#[no_mangle]
pub unsafe fn run(ptr: i32, len: u32) -> u64 {
// 从内存中获取字符串
let input = unsafe {
let slice = std::slice::from_raw_parts(ptr as _, len as _);
let input: PluginInput =ciborium::from_reader(slice).unwrap();
input
};
let mut error = None;
// 获取island
let r = get_island_from_plugin_input(&input);
if let Err(s) = &r {
error = Some(s.clone());
}
let output = if error.is_none() {
// 获取电气岛、属性定义、资源定义
let (island, prop_defs, defines) = r.unwrap();
let mut outgoing = vec![];
// 获取输出的
for model_input in &input.model {
match model_input {
ModelType::Outgoing(edge_name) => {
outgoing = edge_name.clone();
}
_ => {}
}
}
let mut csv_bytes = vec![];
let mut schema = vec![];
// create file
// let mut base = PathBuf::from("/");
// base.push("static_graph.csv");
// let mut file = fs::OpenOptions::new()
// .create(true)
// .write(true)
// .truncate(true)
// .open(&base)
// .expect("Could not create file");
// write graph
// 根据输出名称来确定形成不同的data frame
if outgoing.is_empty() || outgoing.contains(&STATIC_TOPO_DF_NAME.to_string()) {
create_static_topo(&island, &prop_defs, &defines, &mut csv_bytes, &mut schema);
}
else if outgoing.contains(&TERMINAL_DF_NAME.to_string()) {
let mut terminal_csv_str = String::from("terminal,cn,dev,type\n");
let mut terminal_to_cn = HashMap::with_capacity(2 * island.cns.len());
// 先建立CN对应的节点
for cn in &island.cns {
for terminal in &cn.terminals {
terminal_to_cn.insert(*terminal, cn.id);
}
}
for (id, dev) in &island.resources {
for terminal in &dev.terminals {
let terminal_id = terminal.id;
if let Some(cn_id) = terminal_to_cn.get(&terminal_id) {
let dev_type = if let Some(def) = defines.get(&dev.define_id) {
def.rsr_type as u16
} else {
0u16
};
terminal_csv_str.push_str(format!("{terminal_id},{cn_id},{id},{dev_type}\n").as_str());
}
}
}
csv_bytes.push((TERMINAL_DF_NAME.to_string(), terminal_csv_str.into_bytes()));
schema.push(Schema::new(vec![
Field::new("terminal", DataType::UInt64, false),
Field::new("cn", DataType::UInt64, false),
Field::new("dev", DataType::UInt64, false),
Field::new("type", DataType::UInt32, false),
]));
}
// if let Err(e) = file.write_all(csv_str.as_bytes()) {
// log::warn!("!!Failed to write file, err: {:?}", e);
// } else {
// let _ = file.sync_all();
// }
else if outgoing.contains(&POINT_DF_NAME.to_string()) {
let mut point_csv_str = String::from("point,terminal,phase\n");
for (_, defines) in &island.measures {
for def in defines {
let point_id = def.point_id;
let terminal_id = def.terminal_id;
let phase = def.phase.to_string();
point_csv_str.push_str(&format!("{point_id},{terminal_id},{phase}\n"))
}
}
csv_bytes.push((POINT_DF_NAME.to_string(), point_csv_str.into_bytes()));
schema.push(Schema::new(vec![
Field::new("point", DataType::UInt64, false),
Field::new("terminal", DataType::UInt64, false),
Field::new("phase", DataType::Utf8, false),
]));
}
PluginOutput {
error_msg: None,
schema: Some(schema),
csv_bytes,
}
} else {
PluginOutput {
error_msg: error,
schema: None,
csv_bytes: vec![],
}
};
ciborium::into_writer(&output, &mut OUTPUT).unwrap();
let offset = OUTPUT.as_ptr() as i32;
let len = OUTPUT.len() as u32;
let mut bytes = BytesMut::with_capacity(8);
bytes.put_i32(offset);
bytes.put_u32(len);
bytes.get_u64()
}
fn create_static_topo(island: &Island, prop_defs: &[PropDefine], defines: &HashMap<u64, RsrDefine>, csv_bytes: &mut Vec<(String, Vec<u8>)>, schema: &mut Vec<Schema>) {
let mut topo_csv_str = String::from("source,target,id,type,open,name\n");
// build node_switch_model
let mut ori_graph: UnGraph<CN, u64> = UnGraph::new_undirected();
let mut terminal_to_idx = HashMap::with_capacity(2 * island.cns.len());
// 先建立CN对应的节点
for cn in &island.cns {
let index = ori_graph.add_node(cn.clone());
for terminal in &cn.terminals {
terminal_to_idx.insert(*terminal, index);
}
}
// 建立有两个terminal设备形成的边
for (id, dev) in &island.resources {
if dev.terminals.len() != 2 {
continue;
}
if let Some(cn1) = terminal_to_idx.get(&dev.terminals[0].id) {
if let Some(cn2) = terminal_to_idx.get(&dev.terminals[1].id) {
ori_graph.add_edge(*cn1, *cn2, *id);
}
}
}
let mut prop_defines = HashMap::with_capacity(prop_defs.len());
for def in prop_defs.into_iter() {
prop_defines.insert(def.id, def);
}
for edge in ori_graph.raw_edges() {
let s = edge.source();
let t = edge.target();
let cn1 = ori_graph.node_weight(s);
let cn2 = ori_graph.node_weight(t);
if cn1.is_none() || cn2.is_none() {
log::warn!("!!Failed to find nodes for edge {}", edge.weight);
topo_csv_str = String::from("source,target,id,type,name\n");
break;
}
let id1 = cn1.unwrap().id;
let id2 = cn2.unwrap().id;
let dev_id = edge.weight;
let mut dev_name = "".to_string();
let mut dev_type = 0u16;
let mut normal_open = "".to_string();
if let Some(rsr) = island.resources.get(&dev_id) {
dev_name = get_csv_str(&rsr.name);
if let Some(def) = defines.get(&rsr.define_id) {
dev_type = def.rsr_type as u16;
if def.rsr_type == PsRsrType::Switch {
let v = rsr.get_prop_value2(NORMAL_OPEN, &island.prop_groups, &prop_defines);
if let Some(b) = v.get_bool() {
normal_open = b.to_string();
}
}
}
}
topo_csv_str.push_str(format!("{id1},{id2},{dev_id},{dev_type},{normal_open},{dev_name}\n").as_str());
}
csv_bytes.push((STATIC_TOPO_DF_NAME.to_string(), topo_csv_str.into_bytes()));
schema.push(Schema::new(vec![
Field::new("source", DataType::UInt64, false),
Field::new("target", DataType::UInt64, false),
Field::new("id", DataType::UInt64, false),
// if using uint16, will get: unsupported data type when reading CSV: u16
Field::new("type", DataType::UInt32, false),
Field::new("open", DataType::Boolean, true),
Field::new("name", DataType::Utf8, true),
]));
}
\ No newline at end of file
[package]
name = "iesplan"
version = "0.1.0"
edition.workspace = true
rust-version.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[lib]
crate-type = ["cdylib"]
[dependencies]
serde = "1"
serde_cbor = "0.11"
wasm-bindgen = "0.2"
yew = { version = "0.21", features = ["csr"] }
gloo-timers = "0.3"
#getrandom = { version = "0.3", features = ["wasm_js"] }
csv = "1.3"
yew-bulma = { git = "https://github.com/shufengdong/yew-bulma.git" }
eig-domain = { path = "../../../eig-domain" }
# web-sys and js-sys
js-sys = "0.3"
[dependencies.web-sys]
version = "0.3"
features = ["FormData", "File"]
[profile.release]
# Tell `rustc` to optimize for small code size.
# 二进制文件会包含更少的代码
panic = 'abort'
# 对所有代码库进行优化(优化更好,构建更慢)
codegen-units = 1
# 优化大小(更加激进)
opt-level = 0
# 优化大小
# opt-level = 's'
# 对整个程序进行链接时优化(link time optimization)
lto = true
#[package.metadata.wasm-pack.profile.release]
#wasm-opt = false
\ No newline at end of file
0,计算模式选择,,
4000002,计算模式,Radio,经济性最优:1.0;环保性最优:2;能效性最优:3
10,冷负荷曲线设置,,
4000028,过渡季,TextField,
4000029,夏季,TextField,
4000030,冬季,TextField,
11,其他参数,,
4000043,年利率,TextField,
4000044,天然气价,TextField,
2,设备选择,,
4000006,燃气轮机,Checkbox,
4000007,燃气锅炉,Checkbox,
4000008,热泵,Checkbox,
4000009,吸收式制冷机,Checkbox,
4000010,电制冷机,Checkbox,
4000011,电储能,Checkbox,
4000012,蓄热罐,Checkbox,
4000013,蓄冷罐,Checkbox,
3,机组最大数量,,
4000014,燃气轮机,TextField,
4000015,燃气锅炉,TextField,
4000016,热泵,TextField,
4000017,吸收式制冷机,TextField,
4000018,电制冷机,TextField,
4000019,电储能,TextField,
4000020,蓄热罐,TextField,
4000021,蓄冷罐,TextField,
4,电负荷曲线设置,,
4000022,过渡季,TextField,
4000023,夏季,TextField,
4000024,冬季,TextField,
5,风电曲线设置,,
4000031,过渡季,TextField,
4000032,夏季,TextField,
4000033,冬季,TextField,
6,热负荷曲线设置,,
4000025,过渡季,TextField,
4000026,夏季,TextField,
4000027,冬季,TextField,
7,光伏曲线设置,,
4000034,过渡季,TextField,
4000035,夏季,TextField,
4000036,冬季,TextField,
8,电价曲线设置,,
4000040,过渡季,TextField,
4000041,夏季,TextField,
4000042,冬季,TextField,
9,环境平均温度,,
4000037,过渡季,TextField,
4000038,夏季,TextField,
4000039,冬季,TextField,
use crate::startpage::StartPage;
use eig_domain::excel::get_first_sheet_merged_cells;
use eig_domain::{csv_str, csv_string, csv_u64, csv_usize, SetPointValue};
use serde::{Deserialize, Serialize};
use wasm_bindgen::prelude::*;
use web_sys::{Element, Headers};
use yew_bulma::layout::tiles::Tiles;
pub mod startpage;
mod paracard;
#[wasm_bindgen]
pub fn create_view(e: Element) {
yew::Renderer::<StartPage>::with_root(e).render();
}
#[wasm_bindgen(raw_module = "/mems-view-bin.js")]
extern "C" {
pub fn get_headers() -> Headers;
pub fn get_user_id() -> u16;
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct PointControl3 {
pub commands: Vec<SetPointValue>,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct QueryWithId {
pub id: Option<u64>,
pub ids: Option<String>,
}
impl QueryWithId {
pub fn query_str(&self) -> String {
let mut query = String::new();
if let Some(id) = self.id {
query.push_str(&format!("?id={}", id));
} else if let Some(ids) = &self.ids {
query.push_str(&format!("?ids={ids}"));
}
query
}
}
#[derive(Clone, Debug, PartialEq)]
pub enum ParaType {
// show expression, true expression, false expression
Checkbox,
Switch,
Radio(Vec<(String, f64)>),
Select(Vec<(String, f64)>),
// min, max, step
Slider(f64, f64, f64),
TextField,
}
#[derive(Clone, Debug, PartialEq)]
pub struct Parameters {
id: usize,
name: String,
labels: Vec<String>,
points: Vec<u64>,
para_types: Vec<ParaType>,
}
pub fn create_parameters(content: &[u8]) -> Parameters {
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.from_reader(content);
let mut records = rdr.records();
let record = records.next().unwrap().unwrap();
let id = csv_usize(&record, 0).unwrap();
let name = csv_string(&record, 1).unwrap();
let mut labels = Vec::new();
let mut points = Vec::new();
let mut para_types = Vec::new();
for record in records {
let row = record.unwrap();
points.push(csv_u64(&row, 0).unwrap());
labels.push(csv_string(&row, 1).unwrap());
let para_type_s = csv_str(&row, 2).unwrap().to_uppercase();
let para_type = match para_type_s.as_str() {
"CHECKBOX" => ParaType::Checkbox,
"SWITCH" => ParaType::Switch,
"TEXTFIELD" => ParaType::TextField,
"SLIDER" => {
let v = csv_str(&row, 3).unwrap();
let s_vec: Vec<&str> = v.split(";").collect();
let min = s_vec[0].parse().unwrap();
let max = s_vec[1].parse().unwrap();
let step = s_vec[2].parse().unwrap();
ParaType::Slider(min, max, step)
},
"SELECT" => {
let v = csv_str(&row, 3).unwrap();
let options = parse_options(v);
ParaType::Select(options)
}
"RADIO" => {
let v = csv_str(&row, 3).unwrap();
let options = parse_options(v);
ParaType::Radio(options)
}
_ => ParaType::TextField
};
para_types.push(para_type);
}
Parameters { id, name, labels, points, para_types }
}
fn parse_options(v: &str) -> Vec<(String, f64)> {
let options = v.split(";")
.map(|s| {
let options: Vec<&str> = s.split(":").collect();
if options.len() == 2 {
(options[0].to_string(), options[1].parse::<f64>().unwrap())
} else {
("".to_string(), s.parse::<f64>().unwrap())
}
}).collect();
options
}
pub fn build_tiles(xlsx_bytes: Vec<u8>) -> Option<Tiles> {
let (m, n, merge_map, values) = get_first_sheet_merged_cells(xlsx_bytes)?;
let mut class_str = Vec::new();
let mut style_str = Vec::new();
let mut is_dealt = vec![false; (m * n) as usize];
for i in 0..m {
for j in 0..n {
let index = (i * n + j) as usize;
if is_dealt[index] {
continue;
}
let mut class_s = "kanban-div cell".to_string();
let coordinate = (i, j);
if let Some((end_row, end_col)) = merge_map.get(&coordinate) {
let row_span = *end_row - i + 1;
let col_span = *end_col - j + 1;
if row_span > 1 {
class_s.push_str(&format!(" is-row-span-{row_span}"))
}
if col_span > 1 {
class_s.push_str(&format!(" is-col-span-{col_span}"))
}
class_str.push(class_s);
for row in i..=*end_row {
for col in j..=*end_col {
let pos = (row * n + col) as usize;
is_dealt[pos] = true;
}
}
} else {
class_str.push(class_s);
}
let h = values.get(&index).cloned().unwrap_or("100".to_string());
let s = format!("height:{h}px");
style_str.push(s);
}
}
let tiles = Tiles { id: "".to_string(), class_str, style_str, with_box: true };
Some(tiles)
}
\ No newline at end of file
use eig_domain::{MeasureValue, SetPointValue};
use std::collections::HashMap;
use wasm_bindgen::JsCast;
use web_sys::InputEvent;
use yew::prelude::*;
use yew_bulma::calendar::get_timestamp;
use yew_bulma::*;
use crate::{get_headers, get_user_id, ParaType, Parameters, PointControl3, QueryWithId};
pub enum Msg {
Refresh,
ParaLoaded(Vec<MeasureValue>),
SetBool(usize, bool),
SetString(usize),
SetOption(usize, String),
SetParaSuccess(Vec<u64>),
None,
}
#[derive(Clone, Debug, PartialEq, Properties)]
pub struct Props {
pub paras: Parameters,
}
pub struct ParaCard {
bools: HashMap<usize, bool>,
floats: HashMap<usize, f64>,
pos: HashMap<u64, usize>,
}
impl Component for ParaCard {
type Message = Msg;
type Properties = Props;
fn create(ctx: &Context<Self>) -> Self {
let mut bools = HashMap::new();
let mut floats = HashMap::new();
let mut pos = HashMap::new();
for index in 0..ctx.props().paras.points.len() {
let input_type = &ctx.props().paras.para_types[index];
if ParaType::Checkbox.eq(input_type)
|| ParaType::Switch.eq(input_type) {
bools.insert(index, false);
} else {
floats.insert(index, 0.0);
}
pos.insert(ctx.props().paras.points[index], index);
}
Self::query_para(ctx, &ctx.props().paras.points);
Self { bools, floats, pos }
}
fn update(&mut self, ctx: &Context<Self>, msg: Self::Message) -> bool {
match msg {
Msg::Refresh => {
Self::query_para(ctx, &ctx.props().paras.points);
}
Msg::ParaLoaded(values) => {
for v in values {
if let Some(index) = self.pos.get(&v.point_id) {
if let Some(b) = self.bools.get_mut(index) {
*b = v.get_value() > 0.0;
} else if let Some(f) = self.floats.get_mut(index) {
*f = v.get_value();
}
}
}
return true;
}
Msg::SetBool(i, b) => {
let point_id = &ctx.props().paras.points[i];
let value = if b {
"1.0"
} else {
"0.0"
};
self.do_set_point(ctx, value, point_id);
}
Msg::SetString(i) => {
let point_id = &ctx.props().paras.points[i];
let name = format!("tf_{}", point_id);
let value = get_input_value_by_name(&name);
self.do_set_point(ctx, &value, point_id);
}
Msg::SetOption(i, value) => {
if value == "None" {
return false;
}
let point_id = &ctx.props().paras.points[i];
self.do_set_point(ctx, &value, point_id);
}
Msg::SetParaSuccess(points) => {
Self::query_para(ctx, &points);
}
Msg::None => {}
}
false
}
fn view(&self, ctx: &Context<Self>) -> Html {
let paras = &ctx.props().paras;
let input_html = (0..paras.points.len()).map(|i| {
self.create_input(ctx, i)
}).collect::<Html>();
html! {
<Card>
<CardHeader>
<p class="card-header-title">
{paras.name.clone()}
</p>
</CardHeader>
<CardContent>
{input_html}
</CardContent>
</Card>
}
}
}
impl ParaCard {
fn create_input(&self, ctx: &Context<Self>, i: usize) -> Html {
let paras = &ctx.props().paras;
let point_id = &paras.points[i];
let input_type = &paras.para_types[i];
let link = ctx.link();
let label = if let Some(label) = paras.labels.get(i) {
label.clone()
} else {
"".to_string()
};
match input_type {
ParaType::Checkbox => {
let checked = self.bools.get(&i).cloned().unwrap_or(false);
html! {
<Field horizontal={true} label={label}>
<Checkbox checked={checked}
update={link.callback(move |b| Msg::SetBool(i, b))}>
</Checkbox>
</Field>
}
}
ParaType::Switch => {
let checked = self.bools.get(&i).cloned().unwrap_or(false);
html! {
<Field horizontal={true} label={label}>
<input class={classes!("mui-switch", "mui-switch-animbg")} type="checkbox"
checked={checked}
onclick={link.callback(move |_| Msg::SetBool(i, !checked))} />
</Field>
}
}
ParaType::Slider(lower, upper, step) => {
let current_v = self.floats.get(&i).cloned().unwrap_or(*lower).to_string();
let oninput = link.callback(move |e: InputEvent| {
let target = e.target().unwrap();
let input = target.dyn_into::<web_sys::HtmlInputElement>().unwrap();
Msg::SetOption(i, input.value())
});
html! {
<Field horizontal={true} label={label}>
<input class={"slider is-fullwidth"} type={"range"} orient={"horizontal"}
oninput={oninput} step={step.to_string()} min={lower.to_string()}
max={upper.to_string()} value={current_v}
/>
</Field>
}
}
ParaType::Select(options) => {
let current_f = self.floats.get(&i).cloned().unwrap_or(0.0);
let content = (0..options.len()).map(|i| {
let (name, f) = &options[i];
let to_show = if name.is_empty() {
f.to_string()
} else {
name.clone()
};
html! {
<option value={f.to_string()} selected={current_f == *f}>
{to_show}
</option>
}
}).collect::<Html>();
html! {
<Field horizontal={true} label={label}>
<Select update={link.callback(move |s| Msg::SetOption(i, s))} >
{content}
<option value={"None"}>{"no_selection"}</option>
</Select>
</Field>
}
}
ParaType::Radio(options) => {
let current_f = self.floats.get(&i).cloned().unwrap_or(0.0);
let content = (0..options.len()).map(|j| {
let (name, f) = &options[j];
let to_show = if name.is_empty() {
f.to_string()
} else {
name.clone()
};
let checked_value = if current_f == *f {
f.to_string()
} else {
current_f.to_string()
};
let value = f.to_string();
html! {
<Radio update={link.callback(move |_| Msg::SetOption(i, value.clone()))}
checked_value={checked_value} value={f.to_string()}>
<span>{to_show}</span>
</Radio>
}
}).collect::<Html>();
html! {
<Field horizontal={true} label={label}>
<div class="radios">{content}</div>
</Field>
}
}
ParaType::TextField => {
let name = format!("tf_{}", point_id);
let f = self.floats.get(&i).cloned().unwrap_or(0.0).to_string();
html! {
<Field classes={classes!("has-addons")} horizontal={true} label={label}>
<Control classes={classes!("is-expanded")}>
<Input placeholder={"eg: 10"} width={"12"} name={name} value={f}
onenterdown={link.callback(move |_| Msg::SetString(i))} />
</Control>
<Control>
<Button classes={classes!("is-outlined")}
onclick={link.callback(move |_| Msg::SetString(i))}>
<Icon awesome_icon={"fa fa-check"} />
</Button>
</Control>
</Field>
}
}
}
}
fn do_set_point(&mut self, ctx: &Context<Self>, value: &str, point_id: &u64) {
if let Ok(expr) = value.parse() {
let user_id = get_user_id();
let v = SetPointValue {
point_id: *point_id,
sender_id: user_id as u64,
command: expr,
timestamp: get_timestamp(),
};
Self::set_point(ctx, PointControl3 { commands: vec![v] });
} else {
alert("Wrong input");
}
}
fn query_para(ctx: &Context<Self>, points: &[u64]) {
let ids: Vec<String> = points.iter().map(|s| s.to_string()).collect();
let ids = ids.join(",").to_string();
let query = QueryWithId {
id: None,
ids: Some(ids),
};
let url = format!("/api/v1/pscpu/points/values_cbor/0{}", query.query_str());
ctx.link().send_future(async move {
match async_ws_get(&url, &get_headers()).await {
Ok(bytes) => {
if let Ok(values) = serde_cbor::from_slice::<Vec<MeasureValue>>(&bytes) {
return Msg::ParaLoaded(values);
} else {
alert("Fail");
}
}
Err(err) => {
if err.to_string().eq(HEADER_TOKEN_INVALID) {
alert(&format!("Invalid header token for url: {url}"));
} else if err.to_string().eq(HEADER_PERMISSION_DENIED) {
alert(&format!("Permission denied for url: {}", url));
} else {
alert(&format!("Failed to load parameter values, err: {:?}", err));
}
}
}
Msg::None
});
}
fn set_point(ctx: &Context<Self>, cmd: PointControl3) {
let url = "/api/v1/controls_cbor/points_by_expr";
let points: Vec<u64> = cmd.commands.iter().map(|c| c.point_id).collect();
ctx.link().send_future(async move {
let content = serde_cbor::to_vec(&cmd).unwrap();
let body = js_sys::Uint8Array::from(content.as_ref()).dyn_into().unwrap();
match async_ws_post_no_resp(url, &get_headers(), Some(body)).await {
Ok(b) => {
if !b {
alert("Fail to set parameter");
} else {
return Msg::SetParaSuccess(points);
}
}
Err(err) => {
if err.to_string().eq(HEADER_TOKEN_INVALID) {
alert(&format!("Invalid header token for url: {url}"));
} else if err.to_string().eq(HEADER_PERMISSION_DENIED) {
alert(&format!("Permission denied for url: {}", url));
} else {
alert(&format!("Failed to set parameter value, err: {:?}", err));
}
}
}
Msg::None
});
}
}
\ No newline at end of file
use gloo_timers::callback::Timeout;
use std::collections::HashMap;
use yew::prelude::*;
use yew_bulma::layout::tiles::Tiles;
use yew_bulma::*;
use crate::paracard::ParaCard;
use crate::{build_tiles, create_parameters, Parameters};
pub enum Msg {
Start,
Stop,
}
pub struct StartPage {
tiles: Tiles,
cards: Vec<Parameters>,
timer: Option<Timeout>,
is_running: bool
}
impl Component for StartPage {
type Message = Msg;
type Properties = ();
fn create(_: &Context<Self>) -> Self {
let tiles = build_tiles(include_bytes!("../layoutV3.xlsx").to_vec()).unwrap();
let card0 = create_parameters(include_bytes!("../card/card0.csv"));
let card2 = create_parameters(include_bytes!("../card/card2.csv"));
let card3 = create_parameters(include_bytes!("../card/card3.csv"));
let card4 = create_parameters(include_bytes!("../card/card4.csv"));
let card5 = create_parameters(include_bytes!("../card/card5.csv"));
let card6 = create_parameters(include_bytes!("../card/card6.csv"));
let card7 = create_parameters(include_bytes!("../card/card7.csv"));
let card8 = create_parameters(include_bytes!("../card/card8.csv"));
let card9 = create_parameters(include_bytes!("../card/card9.csv"));
let card10 = create_parameters(include_bytes!("../card/card10.csv"));
let card11 = create_parameters(include_bytes!("../card/card11.csv"));
let cards = vec![card0, card2, card3, card4, card5, card6, card7, card8, card9, card10, card11];
Self { tiles, cards, timer: None, is_running: false }
}
fn update(&mut self, ctx: &Context<Self>, msg: Self::Message) -> bool {
let link = ctx.link();
match msg {
Msg::Start => {
self.is_running = true;
let stop_running = link.callback(|()| Msg::Stop);
let timeout = Timeout::new(5_000, move || {
stop_running.emit(());
});
self.timer = Some(timeout);
}
Msg::Stop => {
self.is_running = false;
}
}
true
}
fn view(&self, ctx: &Context<Self>) -> Html {
let link = ctx.link();
let mut nodes = HashMap::with_capacity(12);
nodes.insert(1, html! {
<Button loading={self.is_running} classes={classes!("is-primary", "is-fullwidth", "is-large")}
onclick={link.callback(|_|Msg::Start)}>{"开始计算"}</Button>
});
for card in &self.cards {
nodes.insert(card.id, html! {
<ParaCard paras={card.clone()} />
});
}
self.tiles.create_html(nodes)
}
fn destroy(&mut self, _: &Context<Self>) {
if let Some(timer) = self.timer.take() {
timer.cancel();
}
}
}
pub mod model;
// ============= 对webapp.rs中额外暴露给mems的API进行apidoc注释-开始
// ============= 因为在mems的API中过滤了webapp.rs,所以要在此处额外添加
// ============= 另一种方式是把webapp.rs拆分开两个文件,但尽量还是不动代码,就使用这种变通方式
/// public api
/**
* @api {get} /api/v1/measures 查询历史量测
* @apiGroup Webapp_Result
* @apiUse HisQuery
* @apiSuccess {PbPointValues} PbPointValues 测点值对象
*/
/**
* @api {get} /api/v1/soes 查询SOE
* @apiPrivate
* @apiGroup Webapp_Result
* @apiUse HisQuery
* @apiSuccess {PbPointValues} PbPointValues SOE结果,结果按照时间排序
*/
/// public api
/**
* @api {get} /api/v1/aoe_results 查询AOE执行结果
* @apiGroup Webapp_Result
* @apiUse HisQuery
* @apiSuccess {PbAoeResults} PbAoeResults AOE执行结果
*/
/**
* @api {get} /api/v1/commands 查询历史设点执行结果
* @apiPrivate
* @apiGroup Webapp_Result
* @apiUse HisSetPointQuery
* @apiSuccess {PbSetPointResults} PbSetPointResults 历史设点执行结果
*/
/// public api
/**
* @api {get} /api/v1/alarms 查询告警
* @apiGroup Webapp_Result
* @apiUse HisQuery
* @apiSuccess {PbEigAlarms} PbEigAlarms 告警结果,结果按照时间排序
*/
// ============= 对webapp.rs中额外暴露给mems的API进行apidoc注释-结束
/// 解析URL路径中带逗号,的值,返回数组
pub fn parse_path_values<T: std::str::FromStr>(path: &str) -> Vec<T> {
let values_str: Vec<&str> = path.split(',').collect();
let mut vec: Vec<T> = Vec::with_capacity(values_str.len());
for value_str in values_str {
if let Ok(v) = value_str.trim().parse() {
vec.push(v);
}
}
vec
}
use std::cmp::PartialOrd;
use std::collections::HashMap;
use std::fmt;
use std::hash::Hash;
use serde::{Deserialize, Serialize};
use eig_domain::prop::*;
/**
* @api {枚举_电力设备类型} /PsRsrType PsRsrType
* @apiPrivate
* @apiGroup A_Enum
* @apiSuccess {String} Switch Switch
* @apiSuccess {String} Busbar Busbar
* @apiSuccess {String} ACline ACline
* @apiSuccess {String} Dcline Dcline
* @apiSuccess {String} Winding Winding
* @apiSuccess {String} SyncGenerator SyncGenerator
* @apiSuccess {String} ESS ESS
* @apiSuccess {String} PCS PCS
* @apiSuccess {String} Transformer Transformer
* @apiSuccess {String} Load Load
* @apiSuccess {String} ShuntCompensator ShuntCompensator
* @apiSuccess {String} SerialCompensator SerialCompensator
* @apiSuccess {String} Feeder Feeder
* @apiSuccess {String} Cable Cable
* @apiSuccess {String} Regulator Regulator
* @apiSuccess {String} Connector Connector
* @apiSuccess {String} Company Company
* @apiSuccess {String} SubIsland SubIsland
* @apiSuccess {String} LoadArea LoadArea
* @apiSuccess {String} Substation Substation
* @apiSuccess {String} PowerPlant PowerPlant
* @apiSuccess {String} VoltageLevel VoltageLevel
* @apiSuccess {String} BaseVoltage BaseVoltage
* @apiSuccess {String} UserDefine1 UserDefine1
* @apiSuccess {String} UserDefine2 UserDefine2
* @apiSuccess {String} UserDefine3 UserDefine3
* @apiSuccess {String} UserDefine4 UserDefine4
* @apiSuccess {String} UserDefine5 UserDefine5
* @apiSuccess {String} UserDefine6 UserDefine6
* @apiSuccess {String} UserDefine7 UserDefine7
* @apiSuccess {String} UserDefine8 UserDefine8
* @apiSuccess {String} UserDefine9 UserDefine9
* @apiSuccess {String} UserDefine10 UserDefine10
* @apiSuccess {String} Unknown Unknown
*/
/// 电力设备类型枚举
#[repr(u16)]
#[derive(Serialize, Deserialize, Eq, PartialEq, Clone, Debug, Copy, Hash)]
pub enum PsRsrType {
// 输电网络
Switch = 1,
Busbar = 2,
ACline = 3,
DCline = 4,
Winding = 5,
SyncGenerator = 6,
ESS = 7,
PCS = 8,
Transformer = 9,
Load = 10,
ShuntCompensator = 11,
SerialCompensator = 12,
// 配电网
Feeder = 16,
Cable,
Regulator,
Connector,
// container
Company = 10000,
SubIsland,
LoadArea,
Substation,
PowerPlant,
VoltageLevel,
BaseVoltage,
// 自定义
UserDefine1 = 30001,
UserDefine2,
UserDefine3,
UserDefine4,
UserDefine5,
UserDefine6,
UserDefine7,
UserDefine8,
UserDefine9,
UserDefine10,
Unknown = 65535,
}
impl From<&str> for PsRsrType {
fn from(value: &str) -> Self {
match value {
"Switch" => PsRsrType::Switch,
"Busbar" => PsRsrType::Busbar,
"ACline" => PsRsrType::ACline,
"DCline" => PsRsrType::DCline,
"Winding" => PsRsrType::Winding,
"SyncGenerator" => PsRsrType::SyncGenerator,
"ESS" => PsRsrType::ESS,
"PCS" => PsRsrType::PCS,
"Transformer" => PsRsrType::Transformer,
"Load" => PsRsrType::Load,
"ShuntCompensator" => PsRsrType::ShuntCompensator,
"SerialCompensator" => PsRsrType::SerialCompensator,
"Feeder" => PsRsrType::Feeder,
"Cable" => PsRsrType::Cable,
"Regulator" => PsRsrType::Regulator,
"Connector" => PsRsrType::Connector,
"Company" => PsRsrType::Company,
"SubIsland" => PsRsrType::SubIsland,
"LoadArea" => PsRsrType::LoadArea,
"Substation" => PsRsrType::Substation,
"PowerPlant" => PsRsrType::PowerPlant,
"VoltageLevel" => PsRsrType::VoltageLevel,
"BaseVoltage" => PsRsrType::BaseVoltage,
"UserDefine1" => PsRsrType::UserDefine1,
"UserDefine2" => PsRsrType::UserDefine2,
"UserDefine3" => PsRsrType::UserDefine3,
"UserDefine4" => PsRsrType::UserDefine4,
"UserDefine5" => PsRsrType::UserDefine5,
"UserDefine6" => PsRsrType::UserDefine6,
"UserDefine7" => PsRsrType::UserDefine7,
"UserDefine8" => PsRsrType::UserDefine8,
"UserDefine9" => PsRsrType::UserDefine9,
"UserDefine10" => PsRsrType::UserDefine10,
_ => PsRsrType::Unknown,
}
}
}
impl From<String> for PsRsrType {
fn from(value: String) -> Self {
PsRsrType::from(value.as_str())
}
}
impl PsRsrType {
/// 用于遍历所有设备类型列表
pub const PS_DEV_TYPE: [PsRsrType; 34] = [
PsRsrType::Switch,
PsRsrType::Busbar,
PsRsrType::ACline,
PsRsrType::DCline,
PsRsrType::Winding,
PsRsrType::SyncGenerator,
PsRsrType::ESS,
PsRsrType::PCS,
PsRsrType::Transformer,
PsRsrType::Load,
PsRsrType::ShuntCompensator,
PsRsrType::SerialCompensator,
PsRsrType::Feeder,
PsRsrType::Cable,
PsRsrType::Regulator,
PsRsrType::Connector,
PsRsrType::Company,
PsRsrType::SubIsland,
PsRsrType::LoadArea,
PsRsrType::Substation,
PsRsrType::PowerPlant,
PsRsrType::VoltageLevel,
PsRsrType::BaseVoltage,
PsRsrType::UserDefine1,
PsRsrType::UserDefine2,
PsRsrType::UserDefine3,
PsRsrType::UserDefine4,
PsRsrType::UserDefine5,
PsRsrType::UserDefine6,
PsRsrType::UserDefine7,
PsRsrType::UserDefine8,
PsRsrType::UserDefine9,
PsRsrType::UserDefine10,
PsRsrType::Unknown,
];
}
/// 将枚举转换成字符串,调用to_string()方法
impl fmt::Display for PsRsrType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Copy, Clone, Hash)]
pub enum MeasPhase {
Total,
A,
B,
C,
A0,
B0,
C0,
CT,
PT,
Unknown,
}
impl From<&str> for MeasPhase {
fn from(value: &str) -> Self {
match value.to_uppercase().as_str() {
"TOTAL" => MeasPhase::Total,
"A" => MeasPhase::A,
"B" => MeasPhase::B,
"C" => MeasPhase::C,
"A0" => MeasPhase::A0,
"B0" => MeasPhase::B0,
"C0" => MeasPhase::C0,
"CT" => MeasPhase::CT,
"PT" => MeasPhase::PT,
_ => MeasPhase::Unknown,
}
}
}
impl From<String> for MeasPhase {
fn from(value: String) -> Self {
MeasPhase::from(value.as_str())
}
}
impl fmt::Display for MeasPhase {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
/**
* @api {属性定义} /PropDefine PropDefine
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u64} id 属性id
* @apiSuccess {String} name 属性定义标识
* @apiSuccess {String} desc 属性定义描述
* @apiSuccess {PropType} data_type 属性类型
* @apiSuccess {DataUnit} data_unit 属性单位
*/
/// 设备属性
#[derive(Serialize, Deserialize, PartialEq, Clone, Debug)]
pub struct PropDefine {
/// 设备属性定义id
pub id: u64,
/// 属性定义标识
pub name: String,
/// 属性定义描述
pub desc: String,
/// 属性类型
pub data_type: PropType,
/// 属性单位
pub data_unit: DataUnit,
}
/**
* @api {属性分组定义} /PropGroupDefine PropGroupDefine
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {String} name 属性分组定义标识
* @apiSuccess {String} desc 属性分组定义描述
* @apiSuccess {u64[]} prop_defines 属性定义id列表
*/
#[derive(Serialize, Deserialize, PartialEq, Clone, Debug)]
pub struct PropGroupDefine {
/// 属性定义标识
pub name: String,
/// 属性定义描述
pub desc: String,
/// 设备属性实际描述
pub prop_defines: Vec<u64>,
}
/**
* @api {设备属性分组} /RsrPropGroup RsrPropGroup
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u64} id 设备属性分组id
* @apiSuccess {u64} rsr_id 资源id
* @apiSuccess {String} name 分组名称,用于显示,以及匹配PropGroupDefine
* @apiSuccess {u64[]} defines 设备属性定义列表
* @apiSuccess {PropValue[]} props 设备属性值列表
*/
#[derive(Serialize, Deserialize, PartialEq, Clone, Debug)]
pub struct RsrPropGroup {
pub id: u64,
/// resource id
pub rsr_id: u64,
/// 分组名称,用于显示,以及匹配PropGroupDefine
pub name: String,
/// 设备属性定义列表
pub defines: Vec<u64>,
/// 设备属性实际描述
pub props: Vec<PropValue>,
}
/**
* @api {设备定义} /RsrDefine RsrDefine
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u64} id 定义id
* @apiSuccess {PsRsrType} rsr_type 设备所属类型
* @apiSuccess {String} name 设备类别名称
* @apiSuccess {String} desc 设备定义的描述
* @apiSuccess {u8} terminal_num 端口数量
* @apiSuccess {PropGroupDefine[]} prop_groups 属性分组定义列表
*/
/// 设备定义
#[derive(Serialize, Deserialize, PartialEq, Clone, Debug)]
pub struct RsrDefine {
/// 定义id
pub id: u64,
/// 设备所属类型
pub rsr_type: PsRsrType,
/// 设备类别名称
pub name: String,
/// 设备定义的描述
pub desc: String,
/// 端口数量
pub terminal_num: u8,
/// 设备属性
pub prop_groups: Vec<PropGroupDefine>,
}
/**
* @api {设备对象} /NetworkRsr NetworkRsr
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u64} id 设备id
* @apiSuccess {u64} define_id 设备定义id
* @apiSuccess {String} name 设备名称
* @apiSuccess {String} desc 设备描述
* @apiSuccess {u64} [container_id] 容器id
* @apiSuccess {Terminal[]} terminals 端子列表
* @apiSuccess {u64[]} prop_groups 设备属性分组,RsrPropGroup对象的id列表
*/
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct NetworkRsr {
/// 设备id
pub id: u64,
/// 设备定义id
pub define_id: u64,
/// 设备名称
pub name: String,
/// 设备描述
pub desc: String,
// container id
pub container_id: Option<u64>,
/// 设备的端口
pub terminals: Vec<Terminal>,
/// 设备属性分组id列表
pub prop_group_ids: Vec<u64>,
}
/**
* @api {端口} /Terminal Terminal
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u64} device 设备id
* @apiSuccess {u64} id 端口id
*/
//端口
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct Terminal {
pub device: u64,
pub id: u64,
}
/**
* @api {连接节点} /CN CN
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {Terminal[]} terminals 端子列表
*/
//Connective Node
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct CN {
pub id: u64,
pub terminals: Vec<u64>,
}
#[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Hash, Clone, Copy, PartialOrd)]
pub enum TNType {
// 源节点
Source,
// 分布式电源
DG,
// 负荷节点
Load,
// 联络节点
Link,
None,
}
//Topology Node
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct TN {
pub cns: Vec<CN>,
pub tn_type: TNType,
pub dev_ids: Vec<u64>,
}
impl TN {
pub fn add_cn(&mut self, cn: CN) {
self.cns.push(cn);
}
pub fn add_dev(&mut self, dev_id: u64) {
self.dev_ids.push(dev_id);
}
// 合并两个TN
pub fn merge(&mut self, other: TN) {
self.cns.extend(other.cns);
self.dev_ids.extend(other.dev_ids);
if other.tn_type < self.tn_type {
self.tn_type = other.tn_type;
}
}
pub fn set_type(&mut self, tn_type: TNType) {
if tn_type < self.tn_type {
self.tn_type = tn_type;
}
}
}
/**
* @api {测点定义} /MeasureDef MeasureDef
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u64} id 测点定义id
* @apiSuccess {u64} point_id 测点id
* @apiSuccess {u64} terminal_id 测点所属的端口的id
* @apiSuccess {u64} dev_id 测点所属的设备的id
*/
// 测点定义
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct MeasureDef {
// 测点定义id
pub id: u64,
// 测点id
pub point_id: u64,
// 测点所属的端口的id
pub terminal_id: u64,
// 测点所属的设备的id
pub dev_id: u64,
// 测点phase
pub phase: MeasPhase,
}
impl MeasureDef {
pub fn get_csv_header(text_map: &HashMap<String, String>) -> String {
format!(
"{},{},{},{},{}",
text_map.get("index").unwrap_or(&"Index".to_string()),
text_map.get("id").unwrap_or(&"ID".to_string()),
text_map.get("point_id").unwrap_or(&"Point ID".to_string()),
text_map.get("dev_id").unwrap_or(&"Dev ID".to_string()),
text_map.get("terminal_id").unwrap_or(&"Terminal ID".to_string()),
)
}
pub fn to_csv_str(&self) -> String {
format!("{},{},{},{}", self.id, self.point_id, self.dev_id, self.terminal_id)
}
}
/**
* @api {电气岛} /Island Island
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {Map} resources 资源,HashMap<id:u64, NetworkRsr>
* @apiSuccess {Map} measures 测点,HashMap<id:u64, MeasureDef[]>
* @apiSuccess {Map} prop_groups 属性分组,HashMap<id:u64, RsrPropGroup>
* @apiSuccess {CN[]} cns 连接节点列表
*/
//电气岛,即集合
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct Island {
// key: dev_id
pub resources: HashMap<u64, NetworkRsr>,
// key: dev_id
pub measures: HashMap<u64, Vec<MeasureDef>>,
// key: prop_group_id
pub prop_groups: HashMap<u64, RsrPropGroup>,
pub cns: Vec<CN>,
}
impl NetworkRsr {
// 获取设备类型
pub fn get_rsr_type(&self, dev_defs: &HashMap<u64, RsrDefine>) -> PsRsrType {
if let Some(def) = dev_defs.get(&self.define_id) {
return def.rsr_type;
} else {
PsRsrType::Unknown
}
}
// 获取具体设备的具体属性值
pub fn get_prop_value(&self, prop_name: &str, prop_groups: &HashMap<u64, RsrPropGroup>,
prop_defs: &HashMap<u64, PropDefine>) -> PropValue {
for prop_group_id in &self.prop_group_ids {
if let Some(rpg) = prop_groups.get(prop_group_id) {
for i in 0..rpg.defines.len() {
if let Some(prop_def) = prop_defs.get(&rpg.defines[i]) {
if prop_def.name == prop_name {
return rpg.props[i].clone();
}
}
}
}
}
PropValue::Unknown
}
pub fn get_prop_value2(&self, prop_name: &str, prop_groups: &HashMap<u64, RsrPropGroup>,
prop_defs: &HashMap<u64, &PropDefine>) -> PropValue {
for prop_group_id in &self.prop_group_ids {
if let Some(rpg) = prop_groups.get(prop_group_id) {
for i in 0..rpg.defines.len() {
if let Some(prop_def) = prop_defs.get(&rpg.defines[i]) {
if prop_def.name == prop_name {
return rpg.props[i].clone();
}
}
}
}
}
PropValue::Unknown
}
}
\ No newline at end of file
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use eig_domain::{prop::DataUnit, MeasureValue};
use crate::model::dev::{Island, PropDefine, RsrDefine};
pub mod dev;
pub mod plan;
#[derive(Serialize, Deserialize, PartialEq, Clone, Debug)]
pub enum ModelType {
Island,
Meas,
File(Vec<String>),
Outgoing(Vec<String>),
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct PluginInput {
pub model: Vec<ModelType>,
pub model_len: Vec<u32>,
pub dfs: Vec<String>,
pub dfs_len: Vec<u32>,
pub bytes: Vec<u8>,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct PluginOutput {
pub error_msg: Option<String>,
pub schema: Option<Vec<arrow_schema::Schema>>,
pub csv_bytes: Vec<(String, Vec<u8>)>,
}
pub fn get_island_from_plugin_input(input: &PluginInput) -> Result<(Island, Vec<PropDefine>, HashMap<u64, RsrDefine>), String> {
let mut from = 0;
let mut index = 0;
for i in 0..input.model.len() {
match input.model[i] {
ModelType::Island => {
if input.model_len.len() <= index {
return Err("model_len length error".to_string());
}
let size = input.model_len[index] as usize;
let end = from + size;
let r = ciborium::from_reader(&input.bytes[from..end]);
if r.is_err() {
return Err(format!("{:?}", r));
}
from += size;
let island = r.unwrap();
index += 1;
if input.model_len.len() <= index {
return Err("model_len length error".to_string());
}
let size = input.model_len[index] as usize;
let end = from + size;
let r = ciborium::from_reader(&input.bytes[from..end]);
if r.is_err() {
return Err(format!("{:?}", r));
}
from += size;
let defines = r.unwrap();
index += 1;
if input.model_len.len() <= index {
return Err("model_len length error".to_string());
}
let size = input.model_len[index] as usize;
let end = from + size;
let r = ciborium::from_reader(&input.bytes[from..end]);
if r.is_err() {
return Err(format!("{:?}", r));
}
let prop_defs = r.unwrap();
return Ok((island, prop_defs, defines));
}
ModelType::Meas => {
if input.model_len.len() <= index + 2 {
return Err("model_len length error".to_string());
}
let size1 = input.model_len[index] as usize;
let size2 = input.model_len[index + 1] as usize;
from += size1;
from += size2;
index += 2;
}
_ => {}
}
}
Err("Island not found in plugin input".to_string())
}
pub fn get_meas_from_plugin_input(input: &PluginInput) -> Result<(Vec<MeasureValue>, HashMap<u64, DataUnit>), String> {
let mut from = 0;
let mut index = 0;
for i in 0..input.model.len() {
match input.model[i] {
ModelType::Meas => {
if input.model_len.len() < index {
return Err("model_len length error".to_string());
}
let size = input.model_len[index] as usize;
let end = from + size;
let r = ciborium::from_reader(&input.bytes[from..end]);
if r.is_err() {
return Err(format!("{:?}", r));
}
from += size;
let meas = r.unwrap();
index += 1;
if input.model_len.len() <= index {
return Err("model_len length error".to_string());
}
let size = input.model_len[index] as usize;
let end = from + size;
let r = ciborium::from_reader(&input.bytes[from..end]);
if r.is_err() {
return Err(format!("{:?}", r));
}
let units = r.unwrap();
return Ok((meas, units));
}
ModelType::Island => {
if input.model_len.len() < index + 3 {
return Err("model_len length error".to_string());
}
let size1 = input.model_len[index] as usize;
let size2 = input.model_len[index + 1] as usize;
let size3 = input.model_len[index + 2] as usize;
from += size1;
from += size2;
from += size3;
index += 3;
}
_ => {}
}
}
Err("Measure not found in plugin input".to_string())
}
pub fn get_df_from_in_plugin(input: &PluginInput) -> Result<usize, String> {
let mut from = 0;
let mut index = 0;
for i in 0..input.model.len() {
match input.model[i] {
ModelType::Meas => {
if input.model_len.len() < index + 2 {
return Err(format!("model_len length error, expect more then {}, actual {}",
index + 2, input.model_len.len()));
}
let size1 = input.model_len[index] as usize;
let size2 = input.model_len[index + 1] as usize;
from += size1;
from += size2;
index += 2;
}
ModelType::Island => {
if input.model_len.len() < index + 3 {
return Err(format!("model_len length error, expect more then {}, actual {}",
index + 3, input.model_len.len()));
}
let size1 = input.model_len[index] as usize;
let size2 = input.model_len[index + 1] as usize;
let size3 = input.model_len[index + 2] as usize;
from += size1;
from += size2;
from += size3;
index += 3;
}
_ => {}
}
}
Ok(from)
}
// #[inline]
// pub fn get_wasm_result(output: PluginOutput) -> u64 {
// // 下面的unwrap是必要的,否则输出的字节无法解析
// let mut v = Vec::new();
// ciborium::into_writer(&output, &mut v).unwrap();
// v.shrink_to_fit();
// let offset = v.as_ptr() as i32;
// let len = v.len() as u32;
// let mut bytes = BytesMut::with_capacity(8);
// bytes.put_i32(offset);
// bytes.put_u32(len);
// return bytes.get_u64();
// }
#[inline]
pub fn get_csv_str(s: &str) -> String {
if s.contains(',') || s.contains('\n') || s.contains('"')
|| s.starts_with(' ') || s.ends_with(' ') {
format!("\"{}\"", s.replace('\"', "\"\""))
} else {
s.to_string()
}
}
use serde::{Deserialize, Serialize};
/**
* @api {计划对象} /DayPlan DayPlan
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u64} id 计划id
* @apiSuccess {String} name 计划名称
* @apiSuccess {String} [desc] 计划描述
* @apiSuccess {tuple[]} plan 计划内容数组,tuple格式为(开始时间:u64, 结束时间:u64, 功率值:f64)
*/
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct DayPlan {
pub id: u64,
pub name: String,
pub desc: String,
pub plan: Vec<(u64, u64, f64)>,
}
/**
* @api {计划树节点} /PlanTreeNode PlanTreeNode
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {String} path 路径
* @apiSuccess {String} name 名称
* @apiSuccess {String} [desc] 描述
* @apiSuccess {u64} [ref_id] 计划ID,如果是普通节点,则为None
*/
/// 计划树节点
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct PlanTreeNode {
pub path: String,
pub name: String,
pub desc: Option<String>,
// 计划ID,如果是普通节点,则为None
pub ref_id: Option<u64>,
}
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub enum ScriptTarget {
Aoe,
Dff,
}
/**
* @api {MemsScript} /MemsScript MemsScript
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u64} id 脚本id
* @apiSuccess {String} path 脚本路径
* @apiSuccess {String} desc 脚本描述
* @apiSuccess {bool} is_need_island 是否需要电气岛
* @apiSuccess {u64[]} plans 计划列表
* @apiSuccess {String} wasm_module_name wasm模块名称
* @apiSuccess {u64} wasm_update_time wasm上传时间
* @apiSuccess {bool} is_file_uploaded 文件是否已上传
* @apiSuccess {bool} is_js 是否是javascript文件
*/
// 脚本
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct MemsScript {
pub id: u64,
pub target : ScriptTarget,
pub path: String,
pub desc: String,
// 生成aoe script
pub wasm_module_name: String,
pub wasm_update_time: u64,
pub is_file_uploaded: bool,
pub is_js: bool,
}
/**
* @api {ScriptWasmFile} /ScriptWasmFile ScriptWasmFile
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u64} script_id 脚本id
* @apiSuccess {String} module_name 模块名称
* @apiSuccess {u8[]} wasm_file wasm文件
* @apiSuccess {u8[]} js_file js文件
*/
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct ScriptWasmFile {
pub script_id: u64,
pub module_name: String,
pub wasm_file: Vec<u8>,
pub js_file: Vec<u8>,
}
/**
* @api {AoeMakeResult} /AoeMakeResult AoeMakeResult
* @apiPrivate
* @apiGroup A_Object
* @apiSuccess {u64} script_id script_id
* @apiSuccess {u64} make_time make_time
* @apiSuccess {u64} aoe_model_id aoe_model_id
* @apiSuccess {u32} island_version 电气岛版本号
*/
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct ScriptResult {
pub script_id: u64,
pub make_time: u64,
pub model_id: u64,
pub target: ScriptTarget,
}
\ No newline at end of file
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论