Spaces:
Sleeping
Sleeping
File size: 5,571 Bytes
1906ba6 25e6d0c b6ec121 7f1b97c 1906ba6 21e4551 1906ba6 fe55366 1906ba6 fe55366 1906ba6 fe55366 1906ba6 fe55366 1906ba6 70a4eee 1906ba6 fdd1de9 d65ad8d b266874 fdd1de9 b266874 fdd1de9 b266874 fdd1de9 b266874 eaf669c d297568 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 |
import copy
import gradio as gr
from huggingface_hub import webhook_endpoint, WebhookPayload
from fastapi import Request
#import other libaries
from specklepy.api.client import SpeckleClient
from specklepy.api.credentials import get_default_account, get_local_accounts
from specklepy.transports.server import ServerTransport
from specklepy.api import operations
from specklepy.objects.geometry import Polyline, Point
import pandas as pd
import numpy as np
import json
import os
from utils import *
speckle_token = os.environ.get("SPECKLE_TOKEN")
current_directory = os.path.dirname(os.path.abspath(__file__))
# Path to the config.json file
config_file_path = os.path.join(current_directory, "config.json")
with open(config_file_path, 'r') as f:
config = json.load(f)
CLIENT = SpeckleClient(host="https://speckle.xyz/")
CLIENT.authenticate_with_token(token=speckle_token)
print(config.keys())
branchA = config["branchA"]
branchB = config["branchB"]
STREAM_ID = config["streamID"]
@webhook_endpoint
async def mergeStreams(request: Request):
# Initialize flag
should_continue = False
# Read the request body as JSON
payload = await request.json()
print("============= payload =============")
print(payload)
print("============= config =============")
print(config)
payload = payload["payload"]
# webhook calls can come from different sources
if payload.get('source') == 'notionTrigger':
action = payload.get('action')
streamName = payload.get('streamName')
branchName = payload.get('branchName')
update_source = "notionTrigger"
should_continue = True
else:
update_source = "speckleWebhook"
event_name = payload["event"]["event_name"]
streamid = payload.get("stream", {}).get("id")
# Extract branchName for commit_update events from the "old" commit data
if event_name == "commit_update":
branchName = payload.get("event", {}).get("data", {}).get("old", {}).get("branchName")
else:
branchName = payload.get("event", {}).get("data", {}).get("commit", {}).get("branchName")
# List of valid event types
valid_event_types = ["commit_create", "commit_delete", "commit_update"]
if event_name in valid_event_types:
if streamid == STREAM_ID:
if branchName == branchA:
should_continue = True
else:
print(f"Branch name {branchName} not found in config.")
else:
print(f"Stream name {streamid} not found in config.")
else:
print(f"Event type {event_name} is not one of the specified types.")
# If the flag is True, continue running the main part of the code
if should_continue:
# get stream
stream = getSpeckleStream(STREAM_ID,
branchA,
CLIENT,
commit_id = "")
# navigate to list with speckle objects of interest
try:
stream_data = stream["@Data"]["@{0}"]
except:
print("something went wrong, try again with non-capital d")
try:
stream_data = stream["@data"]["@{0}"]
except:
print("check on speckle.com how to access the data")
# transform stream_data to dataframe (create a backup copy of this dataframe)
df = get_dataframe(stream_data, return_original_df=False)
df_A = df.copy()
# get stream
stream = getSpeckleStream(STREAM_ID,
branchB,
CLIENT,
commit_id = "")
# navigate to list with speckle objects of interest
try:
stream_data = stream["@Data"]["@{0}"]
except:
print("something went wrong, try again with non-capital d")
try:
stream_data = stream["@data"]["@{0}"]
except:
print("check on speckle.com how to access the data")
# transform stream_data to dataframe (create a backup copy of this dataframe)
df = get_dataframe(stream_data, return_original_df=False)
df_B = df.copy()
excludeCol = config["EXCLUDE_COLS"]
uuidCol = config["UUID_COL"]
refCol = config["REFERENCE_COL"]
aggregated_df_b, log_dict = aggregate_data_optimized(df_A.copy(), df_B.copy(), uuidCol, refCol,excludeCol)
# additional cleanups, remove geometry and fill na
try:
aggregated_df_b_noGeo = aggregated_df_b.drop(columns=['@geometry'])
except:
pass
try:
aggregated_df_b_noGeo = aggregated_df_b.drop(columns=['@Geometry'])
except:
pass
aggregated_df_b_noGeo = aggregated_df_b.fillna("NA")
print (aggregated_df_b_noGeo)
commit_id = updateStreamAnalysisFast(
client = CLIENT,
stream_id =STREAM_ID,
branch_name = branchB,
new_data=aggregated_df_b_noGeo ,
geometryGroupPath=["@Data", "@{0}"],
match_by_id="id",
#openai_key =None,
return_original = False,
comm_message="auto commit from HF; Triggered by:" + update_source)
return "https://speckle.xyz/streams/" + STREAM_ID + "/commits/" + commit_id |