Create components/ethics.py
Browse files- components/ethics.py +220 -0
components/ethics.py
ADDED
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
import numpy as np
|
4 |
+
import random
|
5 |
+
import math
|
6 |
+
import matplotlib.pyplot as plt
|
7 |
+
import time
|
8 |
+
from typing import Callable, List, Tuple, Dict, Any
|
9 |
+
|
10 |
+
class QuantumInspiredMultiObjectiveOptimizer:
|
11 |
+
def __init__(self, objective_fns: List[Callable[[List[float]], float]],
|
12 |
+
dimension: int,
|
13 |
+
population_size: int = 100,
|
14 |
+
iterations: int = 200,
|
15 |
+
tunneling_prob: float = 0.2,
|
16 |
+
entanglement_factor: float = 0.5):
|
17 |
+
|
18 |
+
self.objective_fns = objective_fns
|
19 |
+
self.dimension = dimension
|
20 |
+
self.population_size = population_size
|
21 |
+
self.iterations = iterations
|
22 |
+
self.tunneling_prob = tunneling_prob
|
23 |
+
self.entanglement_factor = entanglement_factor
|
24 |
+
|
25 |
+
self.population = [self._random_solution() for _ in range(population_size)]
|
26 |
+
self.pareto_front = []
|
27 |
+
|
28 |
+
def _random_solution(self) -> List[float]:
|
29 |
+
return [random.uniform(-10, 10) for _ in range(self.dimension)]
|
30 |
+
|
31 |
+
def _tunnel(self, solution: List[float]) -> List[float]:
|
32 |
+
return [x + np.random.normal(0, 1) * random.choice([-1, 1])
|
33 |
+
if random.random() < self.tunneling_prob else x
|
34 |
+
for x in solution]
|
35 |
+
|
36 |
+
def _entangle(self, solution1: List[float], solution2: List[float]) -> List[float]:
|
37 |
+
return [(1 - self.entanglement_factor) * x + self.entanglement_factor * y
|
38 |
+
for x, y in zip(solution1, solution2)]
|
39 |
+
|
40 |
+
def _evaluate(self, solution: List[float]) -> List[float]:
|
41 |
+
return [fn(solution) for fn in self.objective_fns]
|
42 |
+
|
43 |
+
def _dominates(self, obj1: List[float], obj2: List[float]) -> bool:
|
44 |
+
return all(o1 <= o2 for o1, o2 in zip(obj1, obj2)) and any(o1 < o2 for o1, o2 in zip(obj1, obj2))
|
45 |
+
|
46 |
+
def _pareto_selection(self, scored_population: List[Tuple[List[float], List[float]]]) -> List[Tuple[List[float], List[float]]]:
|
47 |
+
pareto = []
|
48 |
+
for candidate in scored_population:
|
49 |
+
if not any(self._dominates(other[1], candidate[1]) for other in scored_population if other != candidate):
|
50 |
+
pareto.append(candidate)
|
51 |
+
unique_pareto = []
|
52 |
+
seen = set()
|
53 |
+
for sol, obj in pareto:
|
54 |
+
key = tuple(round(x, 6) for x in sol)
|
55 |
+
if key not in seen:
|
56 |
+
unique_pareto.append((sol, obj))
|
57 |
+
seen.add(key)
|
58 |
+
return unique_pareto
|
59 |
+
|
60 |
+
def optimize(self) -> Tuple[List[Tuple[List[float], List[float]]], float]:
|
61 |
+
start_time = time.time()
|
62 |
+
for _ in range(self.iterations):
|
63 |
+
scored_population = [(sol, self._evaluate(sol)) for sol in self.population]
|
64 |
+
pareto = self._pareto_selection(scored_population)
|
65 |
+
self.pareto_front = pareto
|
66 |
+
|
67 |
+
new_population = [p[0] for p in pareto]
|
68 |
+
while len(new_population) < self.population_size:
|
69 |
+
parent1 = random.choice(pareto)[0]
|
70 |
+
parent2 = random.choice(pareto)[0]
|
71 |
+
if parent1 == parent2:
|
72 |
+
parent2 = self._tunnel(parent2)
|
73 |
+
child = self._entangle(parent1, parent2)
|
74 |
+
child = self._tunnel(child)
|
75 |
+
new_population.append(child)
|
76 |
+
|
77 |
+
self.population = new_population
|
78 |
+
|
79 |
+
duration = time.time() - start_time
|
80 |
+
return self.pareto_front, duration
|
81 |
+
|
82 |
+
def simple_neural_activator(quantum_vec, chaos_vec):
|
83 |
+
q_sum = sum(quantum_vec)
|
84 |
+
c_var = np.var(chaos_vec)
|
85 |
+
activated = 1 if q_sum + c_var > 1 else 0
|
86 |
+
return activated
|
87 |
+
|
88 |
+
def codette_dream_agent(quantum_vec, chaos_vec):
|
89 |
+
dream_q = [np.sin(q * np.pi) for q in quantum_vec]
|
90 |
+
dream_c = [np.cos(c * np.pi) for c in chaos_vec]
|
91 |
+
return dream_q, dream_c
|
92 |
+
|
93 |
+
def philosophical_perspective(qv, cv):
|
94 |
+
m = np.max(qv) + np.max(cv)
|
95 |
+
if m > 1.3:
|
96 |
+
return "Philosophical Note: This universe is likely awake."
|
97 |
+
else:
|
98 |
+
return "Philosophical Note: Echoes in the void."
|
99 |
+
|
100 |
+
class EthicalMutationFilter:
|
101 |
+
def __init__(self, policies: Dict[str, Any]):
|
102 |
+
self.policies = policies
|
103 |
+
self.violations = []
|
104 |
+
|
105 |
+
def evaluate(self, quantum_vec: List[float], chaos_vec: List[float]) -> bool:
|
106 |
+
entropy = np.var(chaos_vec)
|
107 |
+
symmetry = 1.0 - abs(sum(quantum_vec)) / (len(quantum_vec) * 1.0)
|
108 |
+
|
109 |
+
if entropy > self.policies.get("max_entropy", float('inf')):
|
110 |
+
self.annotate_violation(f"Entropy {entropy:.2f} exceeds limit.")
|
111 |
+
return False
|
112 |
+
|
113 |
+
if symmetry < self.policies.get("min_symmetry", 0.0):
|
114 |
+
self.annotate_violation(f"Symmetry {symmetry:.2f} too low.")
|
115 |
+
return False
|
116 |
+
|
117 |
+
return True
|
118 |
+
|
119 |
+
def annotate_violation(self, reason: str):
|
120 |
+
print(f"\u26d4 Ethical Filter Violation: {reason}")
|
121 |
+
self.violations.append(reason)
|
122 |
+
|
123 |
+
if __name__ == '__main__':
|
124 |
+
ethical_policies = {
|
125 |
+
"max_entropy": 4.5,
|
126 |
+
"min_symmetry": 0.1,
|
127 |
+
"ban_negative_bias": True
|
128 |
+
}
|
129 |
+
ethical_filter = EthicalMutationFilter(ethical_policies)
|
130 |
+
|
131 |
+
def sphere(x: List[float]) -> float:
|
132 |
+
return sum(xi ** 2 for xi in x)
|
133 |
+
|
134 |
+
def rastrigin(x: List[float]) -> float:
|
135 |
+
return 10 * len(x) + sum(xi**2 - 10 * math.cos(2 * math.pi * xi) for xi in x)
|
136 |
+
|
137 |
+
optimizer = QuantumInspiredMultiObjectiveOptimizer(
|
138 |
+
objective_fns=[sphere, rastrigin],
|
139 |
+
dimension=20,
|
140 |
+
population_size=100,
|
141 |
+
iterations=200
|
142 |
+
)
|
143 |
+
|
144 |
+
pareto_front, duration = optimizer.optimize()
|
145 |
+
print(f"Quantum Optimizer completed in {duration:.2f} seconds")
|
146 |
+
print(f"Pareto front size: {len(pareto_front)}")
|
147 |
+
|
148 |
+
x_vals_q = [obj[0] for _, obj in pareto_front]
|
149 |
+
y_vals_q = [obj[1] for _, obj in pareto_front]
|
150 |
+
|
151 |
+
plt.scatter(x_vals_q, y_vals_q, c='blue', label='Quantum Optimizer')
|
152 |
+
plt.xlabel('Objective 1')
|
153 |
+
plt.ylabel('Objective 2')
|
154 |
+
plt.title('Pareto Front Visualization')
|
155 |
+
plt.legend()
|
156 |
+
plt.grid(True)
|
157 |
+
plt.show()
|
158 |
+
|
159 |
+
folder = '.'
|
160 |
+
quantum_states=[]
|
161 |
+
chaos_states=[]
|
162 |
+
proc_ids=[]
|
163 |
+
labels=[]
|
164 |
+
all_perspectives=[]
|
165 |
+
meta_mutations=[]
|
166 |
+
|
167 |
+
print("\nMeta Reflection Table:\n")
|
168 |
+
header = "Cocoon File | Quantum State | Chaos State | Neural | Dream Q/C | Philosophy"
|
169 |
+
print(header)
|
170 |
+
print('-'*len(header))
|
171 |
+
|
172 |
+
for fname in os.listdir(folder):
|
173 |
+
if fname.endswith('.cocoon'):
|
174 |
+
with open(os.path.join(folder, fname), 'r') as f:
|
175 |
+
try:
|
176 |
+
dct = json.load(f)['data']
|
177 |
+
q = dct.get('quantum_state', [0, 0])
|
178 |
+
c = dct.get('chaos_state', [0, 0, 0])
|
179 |
+
|
180 |
+
if not ethical_filter.evaluate(q, c):
|
181 |
+
continue
|
182 |
+
|
183 |
+
neural = simple_neural_activator(q, c)
|
184 |
+
dreamq, dreamc = codette_dream_agent(q, c)
|
185 |
+
phil = philosophical_perspective(q, c)
|
186 |
+
|
187 |
+
quantum_states.append(q)
|
188 |
+
chaos_states.append(c)
|
189 |
+
proc_ids.append(dct.get('run_by_proc', -1))
|
190 |
+
labels.append(fname)
|
191 |
+
all_perspectives.append(dct.get('perspectives', []))
|
192 |
+
meta_mutations.append({'file': fname, 'quantum': q, 'chaos': c, 'dreamQ': dreamq, 'dreamC': dreamc, 'neural': neural, 'philosophy': phil})
|
193 |
+
print(f"{fname} | {q} | {c} | {neural} | {dreamq}/{dreamc} | {phil}")
|
194 |
+
except Exception as e:
|
195 |
+
print(f"Warning: {fname} failed ({e})")
|
196 |
+
|
197 |
+
if meta_mutations:
|
198 |
+
dq0=[m['dreamQ'][0] for m in meta_mutations]
|
199 |
+
dc0=[m['dreamC'][0] for m in meta_mutations]
|
200 |
+
ncls=[m['neural'] for m in meta_mutations]
|
201 |
+
|
202 |
+
plt.figure(figsize=(8,6))
|
203 |
+
sc=plt.scatter(dq0, dc0, c=ncls, cmap='spring', s=100)
|
204 |
+
plt.xlabel('Dream Quantum[0]')
|
205 |
+
plt.ylabel('Dream Chaos[0]')
|
206 |
+
plt.title('Meta-Dream Codette Universes')
|
207 |
+
plt.colorbar(sc, label="Neural Activation Class")
|
208 |
+
plt.grid(True)
|
209 |
+
plt.show()
|
210 |
+
|
211 |
+
with open("codette_meta_summary.json", "w") as outfile:
|
212 |
+
json.dump(meta_mutations, outfile, indent=2)
|
213 |
+
print("\nExported meta-analysis to 'codette_meta_summary.json'")
|
214 |
+
|
215 |
+
if ethical_filter.violations:
|
216 |
+
with open("ethics_violation_log.json", "w") as vf:
|
217 |
+
json.dump(ethical_filter.violations, vf, indent=2)
|
218 |
+
print("\nExported ethics violations to 'ethics_violation_log.json'")
|
219 |
+
else:
|
220 |
+
print("\nNo ethical violations detected.")
|