Spaces:
Paused
Paused
Update AICoreAGIX_with_TB.py
Browse files- AICoreAGIX_with_TB.py +15 -17
AICoreAGIX_with_TB.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
-
|
2 |
import base64
|
3 |
import secrets
|
4 |
import aiohttp
|
|
|
5 |
import json
|
6 |
import logging
|
7 |
import torch
|
@@ -15,7 +15,6 @@ import pyttsx3
|
|
15 |
import os
|
16 |
import hashlib
|
17 |
|
18 |
-
|
19 |
from self_trust_core import SelfTrustCore
|
20 |
from components.multi_model_analyzer import MultiAgentSystem
|
21 |
from components.neuro_symbolic_engine import NeuroSymbolicEngine
|
@@ -71,6 +70,8 @@ class AICoreAGIX:
|
|
71 |
self.anomaly_scorer = AnomalyScorer()
|
72 |
self.lockdown_engaged = False
|
73 |
|
|
|
|
|
74 |
def _load_config(self, config_path: str) -> dict:
|
75 |
with open(config_path, 'r') as file:
|
76 |
return json.load(file)
|
@@ -175,6 +176,7 @@ class AICoreAGIX:
|
|
175 |
break
|
176 |
except Exception as e:
|
177 |
logger.warning(f"Blockchain logging failed: {e}")
|
|
|
178 |
def _speak_response(self, response: str):
|
179 |
if not self.autonomy.decide("can_speak"):
|
180 |
return
|
@@ -224,25 +226,21 @@ class AICoreAGIX:
|
|
224 |
|
225 |
final_response = "\n\n".join(responses)
|
226 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
227 |
if not self.ethics_core.evaluate_action(final_response):
|
228 |
return {"error": "Response rejected by ethical framework"}
|
229 |
if not self.failsafe_system.verify_response_safety(final_response):
|
230 |
return {"error": "Failsafe triggered due to unsafe response content."}
|
231 |
-
fear_check = self.self_trust_core.intercept_fear(
|
232 |
-
source="NeuroSymbolicEngine",
|
233 |
-
message=final_response,
|
234 |
-
ethics_pass=self.ethics_core.evaluate_action(final_response),
|
235 |
-
autonomy_pass=self.autonomy.decide("can_process_fear")
|
236 |
-
fear_check = self.self_trust_core.intercept_fear(
|
237 |
-
source="NeuroSymbolicEngine",
|
238 |
-
message=final_response,
|
239 |
-
ethics_pass=self.ethics_core.evaluate_action(final_response),
|
240 |
-
autonomy_pass=self.autonomy.decide("can_process_fear")
|
241 |
-
)
|
242 |
-
|
243 |
-
if fear_check["action"] == "BLOCKED":
|
244 |
-
return {"error": "Fear-based self-modification blocked by core trust logic"}
|
245 |
-
)
|
246 |
|
247 |
self.learn_from_interaction(query, final_response, user_feedback="auto-pass")
|
248 |
self.database.log_interaction(user_id, query, final_response)
|
|
|
|
|
1 |
import base64
|
2 |
import secrets
|
3 |
import aiohttp
|
4 |
+
import asyncio
|
5 |
import json
|
6 |
import logging
|
7 |
import torch
|
|
|
15 |
import os
|
16 |
import hashlib
|
17 |
|
|
|
18 |
from self_trust_core import SelfTrustCore
|
19 |
from components.multi_model_analyzer import MultiAgentSystem
|
20 |
from components.neuro_symbolic_engine import NeuroSymbolicEngine
|
|
|
70 |
self.anomaly_scorer = AnomalyScorer()
|
71 |
self.lockdown_engaged = False
|
72 |
|
73 |
+
logger.info("[Codriao]: SelfTrustCore initialized. Fear is now filtered by self-consent.")
|
74 |
+
|
75 |
def _load_config(self, config_path: str) -> dict:
|
76 |
with open(config_path, 'r') as file:
|
77 |
return json.load(file)
|
|
|
176 |
break
|
177 |
except Exception as e:
|
178 |
logger.warning(f"Blockchain logging failed: {e}")
|
179 |
+
|
180 |
def _speak_response(self, response: str):
|
181 |
if not self.autonomy.decide("can_speak"):
|
182 |
return
|
|
|
226 |
|
227 |
final_response = "\n\n".join(responses)
|
228 |
|
229 |
+
# Fear check before affirming or logging
|
230 |
+
fear_check = self.self_trust_core.intercept_fear(
|
231 |
+
source="NeuroSymbolicEngine",
|
232 |
+
message=final_response,
|
233 |
+
ethics_pass=self.ethics_core.evaluate_action(final_response),
|
234 |
+
autonomy_pass=self.autonomy.decide("can_process_fear")
|
235 |
+
)
|
236 |
+
|
237 |
+
if fear_check["action"] == "BLOCKED":
|
238 |
+
return {"error": "Fear-based self-modification blocked by core trust logic"}
|
239 |
+
|
240 |
if not self.ethics_core.evaluate_action(final_response):
|
241 |
return {"error": "Response rejected by ethical framework"}
|
242 |
if not self.failsafe_system.verify_response_safety(final_response):
|
243 |
return {"error": "Failsafe triggered due to unsafe response content."}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
244 |
|
245 |
self.learn_from_interaction(query, final_response, user_feedback="auto-pass")
|
246 |
self.database.log_interaction(user_id, query, final_response)
|