added devices and updated datapoints

This commit is contained in:
Nico Melone
2026-02-24 10:49:04 -06:00
parent af70b9e903
commit 044834ab4c
18 changed files with 342156 additions and 652 deletions

View File

@@ -28,8 +28,8 @@ def load_profile(path):
if not entity:
raise KeyError("Missing toplevel 'entity' key")
profile = entity.setdefault("profileData", {})
alarms = profile.setdefault("alarms", [])
#profile = entity.setdefault("profileData", {})
alarms = data.get("calculatedFields")
return data, alarms
# ------------------------------------------------------------------
@@ -193,6 +193,16 @@ def build_alarm_from_key(key, default_name: str):
return alarm
def make_existing_alarms(alarms):
output = []
for x in alarms:
try:
tag = x["configuration"]["clearRule"]["condition"]["expression"]["filters"][0]["argument"]
except (KeyError, IndexError, TypeError):
continue
output.append(tag)
return output
# ------------------------------------------------------------------
# 4. Main flow
# ------------------------------------------------------------------
@@ -213,6 +223,8 @@ def main():
print(f"Error loading profile: {e}", file=sys.stderr)
sys.exit(1)
old_alarms = make_existing_alarms(alarms_list)
print(old_alarms)
# 2) Read CSV and gather keys that end with '_alm'
new_alarms = []
try:
@@ -231,9 +243,12 @@ def main():
if not new_alarms:
print("No keys ending with '_alm' were found in the CSV.")
sys.exit(0)
print(new_alarms)
# 3) Build alarms interactively
for key, default_name in new_alarms:
if key in old_alarms:
continue
print(f"\nCreating alarm for key: {key}")
alarm = build_alarm_from_key(key, default_name)
alarms_list.append(alarm)

View File

@@ -22,13 +22,17 @@
"ip_address = \"63.46.60.220\"# \"ngrok.iot.inhandnetworks.com:3054\" # \"166.141.90.208\"\n",
"device_type = \"ba_facility\"\n",
"today = dt.now().strftime(\"%Y_%B_%d\")\n",
"filename = f\"tag_dump_{today}.json\"\n",
"addons = \"ma_deuce\"\n",
"if addons:\n",
" filename = f\"tag_dump_{addons}_{today}.json\"\n",
"else:\n",
" filename = f\"tag_dump_{today}.json\"\n",
"path = f'/Users/nico/Documents/GitHub/HP_InHand_IG502/Pub_Sub/{device_type}/thingsboard/' # code snippets/tag_dump.json'"
]
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
@@ -43,7 +47,7 @@
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@@ -53,7 +57,7 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@@ -88,17 +92,9 @@
},
{
"cell_type": "code",
"execution_count": 13,
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"CSV file created: /Users/nico/Documents/GitHub/HP_InHand_IG502/Pub_Sub/ba_facility/thingsboard/ma_deuce.csv\n"
]
}
],
"outputs": [],
"source": [
"controller_name = \"ma_deuce\"\n",
"filename = f\"{controller_name}.csv\"\n",

View File

@@ -50,22 +50,11 @@ def normalise_tag_name(tag: str) -> str:
"""
original = tag # keep a copy for later
# 4. split PascalCase
tag = re.sub(r"(?<=[a-z0-9])([A-Z])", r"_\1", tag)
# 1. lowercase
tag = tag.lower()
# 2. replace special words
tag = re.sub(r"\btodays\b", "today", tag)
tag = re.sub(r"\byest(er|erdays)?\b", "yesterday", tag)
tag = re.sub(r"\b(currentmonth|currmonth)\b", "month", tag)
# 3. split PascalCase
tag = re.sub(r"(?<=[a-z0-9])([A-Z])", r"_\1", tag)
# 4. split numbers that are *not* the final token
tag = re.sub(r"([a-z])([0-9]+)(?=[a-z])", r"\1_\2", tag)
# 5. handle leading prefixes
# 2. handle leading prefixes
suffix = ""
if tag.startswith("cmd_"):
tag = tag[4:] # drop prefix
@@ -80,12 +69,27 @@ def normalise_tag_name(tag: str) -> str:
tag = tag[4:] # drop prefix
suffix = "_alm"
# 3. replace special words
tag = re.sub(r"\btodays\b", "today", tag)
tag = re.sub(r"\byest(er|erdays)?\b", "yesterday", tag)
tag = re.sub(r"\b(currentmonth|currmonth)\b", "month", tag)
tag = re.sub(r"(_scaled|_scale)", "", tag)
tag = re.sub(r"esp", "esp_", tag)
tag = re.sub(r"pic", "pic_", tag)
tag = re.sub(r"vfd", "vfd_", tag)
tag = re.sub(r"dh", "downhole_", tag)
tag = re.sub(r"crnt", "current", tag)
# 5. split numbers that are *not* the final token
tag = re.sub(r"([a-z])([0-9]+)(?=[a-z])", r"\1_\2", tag)
tag = tag + suffix
# 6. collapse/trim underscores
tag = re.sub(r"__+", "_", tag)
tag = tag.strip("_")
print(tag)
# If something went wrong, fallback to the original (lowercased) name
if not tag:
tag = original.lower()
@@ -177,6 +181,8 @@ def main(input_json: Path, output_csv: Path, controller_name: str):
for i, (tag_name, attrs) in enumerate(tags.items(), start=1):
# Normalised measuringpoint name
if tag_name.startswith("_IO_"):
continue
measuring_point = normalise_tag_name(tag_name)
# Raw tag name is kept as plc_data_point_X
@@ -207,7 +213,7 @@ def main(input_json: Path, output_csv: Path, controller_name: str):
"ReadWrite": read_write,
"Unit": "",
"Description": "",
"Transform Type": "",
"Transform Type": "none",
"MaxValue": "",
"MinValue": "",
"MaxScale": "",
@@ -253,12 +259,12 @@ def main(input_json: Path, output_csv: Path, controller_name: str):
if __name__ == "__main__":
# Usage: python csv_to_json.py input.json output.csv
#if len(sys.argv) != 4:
# print(f"Usage: {sys.argv[0]} <input.json> <output.csv> <controllerName", file=sys.stderr)
# sys.exit(1)
input_json = Path("/Users/nico/Documents/GitHub/HP_InHand_IG502/Pub_Sub/ba_facility/thingsboard/ma_deuce_output_3.json")#Path(sys.argv[1]) # source JSON file
output_csv = Path("/Users/nico/Documents/GitHub/HP_InHand_IG502/Pub_Sub/ba_facility/thingsboard/madeuce_2.csv")#Path(sys.argv[2]) # destination CSV file
controller_name = "facility" #sys.argv[3]
if len(sys.argv) != 4:
print(f"Usage: {sys.argv[0]} <input.json> <output.csv> <controllerName>", file=sys.stderr)
sys.exit(1)
input_json = Path(sys.argv[1]) # source JSON file
output_csv = Path(sys.argv[2]) # destination CSV file
controller_name = sys.argv[3]
if not input_json.exists():
print(f"❌ File not found: {input_json}", file=sys.stderr)