Skip to content

Instantly share code, notes, and snippets.

@shrimo
Last active April 19, 2025 21:19
Show Gist options
  • Save shrimo/5ef39a8ec43d43ca568a09a91e795a61 to your computer and use it in GitHub Desktop.
Save shrimo/5ef39a8ec43d43ca568a09a91e795a61 to your computer and use it in GitHub Desktop.
Digital life
import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import random
# --- PARAMETERS ---
GRID_SIZE = 64
NUM_ORGANISMS = 20
NUM_FOOD = 150
NUM_ENEMIES = 5
VISION_RADIUS = 2
DEVICE = 'cpu'
MAX_MEMORY = 5
# --- BRAIN ---
class Brain(nn.Module):
def __init__(self, input_size, hidden_size=128, output_size=4):
super().__init__()
self.model = nn.Sequential(
nn.Linear(input_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, output_size)
)
def forward(self, x):
return self.model(x)
def mutate(brain, mutation_rate=0.1):
new_brain = Brain((2 * VISION_RADIUS + 1) ** 2 + 2 + MAX_MEMORY * 2)
with torch.no_grad():
for new_param, old_param in zip(new_brain.parameters(), brain.parameters()):
new_param.copy_(old_param + mutation_rate * torch.randn_like(old_param))
return new_brain
# --- ORGANISM ---
class Organism:
def __init__(self, x, y):
self.x, self.y = x, y
self.energy = 100
self.memory = [] # запамʼятовує координати їжі
self.brain = Brain((2 * VISION_RADIUS + 1) ** 2 + 2 + MAX_MEMORY * 2)
self.optimizer = optim.Adam(self.brain.parameters(), lr=0.01)
self.trajectory = []
def get_vision(self, env, signals):
vision = env[max(0, self.y - VISION_RADIUS):self.y + VISION_RADIUS + 1,
max(0, self.x - VISION_RADIUS):self.x + VISION_RADIUS + 1]
padded = np.zeros((2 * VISION_RADIUS + 1, 2 * VISION_RADIUS + 1), dtype=np.float32)
dy, dx = vision.shape
padded[:dy, :dx] = vision
relative_signals = [sx - self.x for sx, sy in signals], [sy - self.y for sx, sy in signals]
sig_x = sum(relative_signals[0]) / (len(signals) + 1e-5)
sig_y = sum(relative_signals[1]) / (len(signals) + 1e-5)
mem_flat = []
for mx, my in self.memory[-MAX_MEMORY:]:
mem_flat.extend([(mx - self.x) / GRID_SIZE, (my - self.y) / GRID_SIZE])
while len(mem_flat) < MAX_MEMORY * 2:
mem_flat.append(0.0)
full_input = np.concatenate([padded.flatten(), [sig_x, sig_y], mem_flat])
return full_input
def act(self, env, enemies, signals):
input_data = self.get_vision(env, signals)
input_tensor = torch.tensor(input_data, dtype=torch.float32).to(DEVICE)
logits = self.brain(input_tensor)
probs = torch.softmax(logits, dim=0)
action = torch.multinomial(probs, 1).item()
self.trajectory.append((input_tensor, action, probs[action]))
dx, dy = [(0, -1), (0, 1), (-1, 0), (1, 0)][action]
nx, ny = self.x + dx, self.y + dy
if 0 <= nx < GRID_SIZE and 0 <= ny < GRID_SIZE:
self.x, self.y = nx, ny
if env[self.y, self.x] == 1:
self.energy += 25
env[self.y, self.x] = 0
self.memory.append((self.x, self.y))
if (self.x, self.y) in enemies:
self.energy -= 50
self.energy -= 1
def learn(self, reward):
loss = 0
for input_tensor, action, prob in self.trajectory:
loss -= torch.log(prob) * reward
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.trajectory = []
# --- ENVIRONMENT ---
def create_environment():
env = np.zeros((GRID_SIZE, GRID_SIZE), dtype=np.uint8)
for _ in range(NUM_FOOD):
x, y = random.randint(0, GRID_SIZE - 1), random.randint(0, GRID_SIZE - 1)
env[y, x] = 1
return env
def place_enemies():
return {(random.randint(0, GRID_SIZE - 1), random.randint(0, GRID_SIZE - 1)) for _ in range(NUM_ENEMIES)}
# --- RENDERING ---
def render(env, organisms, enemies):
img = np.zeros((GRID_SIZE, GRID_SIZE, 3), dtype=np.uint8)
img[env == 1] = (0, 255, 0)
for ex, ey in enemies:
img[ey, ex] = (0, 0, 255)
for org in organisms:
img[org.y, org.x] = (255, 0, 0)
img = cv2.resize(img, (640, 640), interpolation=cv2.INTER_NEAREST)
cv2.imshow("Digital Life", img)
# --- MAIN LOOP ---
def main():
organisms = [Organism(random.randint(0, GRID_SIZE - 1), random.randint(0, GRID_SIZE - 1)) for _ in range(NUM_ORGANISMS)]
for episode in range(1000):
env = create_environment()
enemies = place_enemies()
for step in range(120):
signals = [(org.x, org.y) for org in organisms if org.energy > 0]
for org in organisms:
if org.energy > 0:
org.act(env, enemies, signals)
render(env, organisms, enemies)
# 👉 Додано вихід по клавіші 'q'
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
return
for org in organisms:
org.learn(org.energy)
organisms.sort(key=lambda o: o.energy, reverse=True)
survivors = organisms[:NUM_ORGANISMS // 2]
new_organisms = []
for _ in range(NUM_ORGANISMS - len(survivors)):
parent = random.choice(survivors)
clone = Organism(random.randint(0, GRID_SIZE - 1), random.randint(0, GRID_SIZE - 1))
clone.brain = mutate(parent.brain, mutation_rate=0.05)
new_organisms.append(clone)
organisms = survivors + new_organisms
for org in organisms:
org.energy = 100
org.trajectory = []
org.memory = []
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import random
from concurrent.futures import ThreadPoolExecutor
# --- PARAMETERS ---
GRID_SIZE = 64 # reduced grid size
CELL_SIZE = 10 # remains the same for visualization
IMG_SIZE = GRID_SIZE * CELL_SIZE
LEGEND_HEIGHT = 120 # height for legend area
NUM_ORGANISMS = 10 # reduced number of primary organisms
NUM_COMPETITORS = 10 # reduced number of competing organisms
NUM_FOOD = 300 # initial (and maximum) number of food items
NUM_INITIAL_PREDATORS = 5 # reduced initial predators
VISION_RADIUS = 2
DEVICE = "cpu"
MAX_MEMORY = 5
# Starvation parameters: organisms die if they don't eat for STARVATION_THRESHOLD steps.
# Increased threshold to allow longer life without food.
STARVATION_THRESHOLD = 30
# Population control for prey
POPULATION_THRESHOLD = 40 # adjusted threshold for disease outbreak
DISEASE_DEATH_PROB = 0.25 # probability non-immune prey dies during outbreak
IMMUNITY_GRANT_PROB = 0.2 # probability surviving prey gains immunity
# Predator parameters
PREDATOR_INITIAL_ENERGY = 50
PREDATOR_ENERGY_LOSS = 2
PREDATOR_KILL_ENERGY_GAIN = 30
PREDATOR_REPRODUCTION_THRESHOLD = 60
PREDATOR_REPRODUCTION_COST = 30
# New food generation: new food is added such that total food never exceeds NUM_FOOD.
NEW_FOOD_PER_STEP = 5
RENDER_INTERVAL = 5 # update visualization every N simulation steps
# --- BRAIN ---
class Brain(nn.Module):
def __init__(self, input_size, hidden_size=128, output_size=4):
super().__init__()
self.model = nn.Sequential(
nn.Linear(input_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, output_size),
)
def forward(self, x):
return self.model(x)
def mutate(brain, mutation_rate=0.1):
new_brain = Brain((2 * VISION_RADIUS + 1) ** 2 + 2 + MAX_MEMORY * 2)
with torch.no_grad():
for new_param, old_param in zip(new_brain.parameters(), brain.parameters()):
new_param.copy_(old_param + mutation_rate * torch.randn_like(old_param))
return new_brain
def crossover(brain1, brain2, mutation_rate=0.05):
new_brain = Brain((2 * VISION_RADIUS + 1) ** 2 + 2 + MAX_MEMORY * 2)
with torch.no_grad():
for new_param, param1, param2 in zip(
new_brain.parameters(), brain1.parameters(), brain2.parameters()
):
new_param.copy_(
(param1 + param2) / 2 + mutation_rate * torch.randn_like(param1)
)
return new_brain
# --- PRIMARY ORGANISM (Population A) ---
class Organism:
def __init__(self, x, y):
self.x, self.y = x, y
self.energy = 100
self.memory = [] # consumed food coordinates
self.brain = Brain((2 * VISION_RADIUS + 1) ** 2 + 2 + MAX_MEMORY * 2)
self.optimizer = optim.Adam(self.brain.parameters(), lr=0.01)
self.trajectory = [] # List of tuples (tensor, action, probability)
self.sex = random.choice(["M", "F"])
self.has_mated = False
self.age = 0
# Increased base life to extend lifespan
self.base_life = 720
self.immune = False
self.starvation_steps = 0 # count steps without eating
def get_vision(self, env, signals):
vision = env[
max(0, self.y - VISION_RADIUS) : self.y + VISION_RADIUS + 1,
max(0, self.x - VISION_RADIUS) : self.x + VISION_RADIUS + 1,
]
padded = np.zeros(
(2 * VISION_RADIUS + 1, 2 * VISION_RADIUS + 1), dtype=np.float32
)
dy, dx = vision.shape
padded[:dy, :dx] = vision
rel_x = [sx - self.x for sx, sy in signals]
rel_y = [sy - self.y for sx, sy in signals]
sig_x = sum(rel_x) / (len(signals) + 1e-5)
sig_y = sum(rel_y) / (len(signals) + 1e-5)
mem_flat = []
for mx, my in self.memory[-MAX_MEMORY:]:
mem_flat.extend([(mx - self.x) / GRID_SIZE, (my - self.y) / GRID_SIZE])
while len(mem_flat) < MAX_MEMORY * 2:
mem_flat.append(0.0)
full_input = np.concatenate([padded.flatten(), [sig_x, sig_y], mem_flat])
return full_input
def act(self, env, static_enemies, signals):
input_data = self.get_vision(env, signals)
input_tensor = torch.tensor(input_data, dtype=torch.float32, device=DEVICE)
logits = self.brain(input_tensor)
probs = torch.softmax(logits, dim=0)
action = torch.multinomial(probs, 1).item()
self.trajectory.append((input_tensor, action, probs[action]))
dx, dy = [(0, -1), (0, 1), (-1, 0), (1, 0)][action]
nx, ny = self.x + dx, self.y + dy
food_eaten = False
if 0 <= nx < GRID_SIZE and 0 <= ny < GRID_SIZE:
self.x, self.y = nx, ny
if env[self.y, self.x] == 1:
self.energy += 25
env[self.y, self.x] = 0
self.memory.append((self.x, self.y))
food_eaten = True
if (self.x, self.y) in static_enemies:
self.energy -= 50
self.energy -= 1
self.age += 1
# Update starvation counter.
if food_eaten:
self.starvation_steps = 0
else:
self.starvation_steps += 1
if self.starvation_steps >= STARVATION_THRESHOLD:
self.energy = 0
effective_life = self.base_life + int(self.energy / 2)
if self.age > effective_life:
self.energy = 0
def learn(self, reward):
self.trajectory = [(t.detach(), a, p.detach()) for (t, a, p) in self.trajectory]
loss = torch.tensor(0.0, device=DEVICE, requires_grad=True)
for tensor_in, action, prob in self.trajectory:
loss = loss - torch.log(prob) * reward
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.trajectory = []
# --- COMPETING ORGANISM (Population B) ---
class Competitor:
def __init__(self, x, y):
self.x, self.y = x, y
self.energy = 100
self.memory = []
self.brain = Brain((2 * VISION_RADIUS + 1) ** 2 + 2 + MAX_MEMORY * 2)
self.optimizer = optim.Adam(self.brain.parameters(), lr=0.01)
self.trajectory = []
self.sex = random.choice(["M", "F"])
self.has_mated = False
self.age = 0
# Increased base life for competing organisms as well.
self.base_life = 540
self.immune = False
self.starvation_steps = 0
def get_vision(self, env, signals):
vision = env[
max(0, self.y - VISION_RADIUS) : self.y + VISION_RADIUS + 1,
max(0, self.x - VISION_RADIUS) : self.x + VISION_RADIUS + 1,
]
padded = np.zeros(
(2 * VISION_RADIUS + 1, 2 * VISION_RADIUS + 1), dtype=np.float32
)
dy, dx = vision.shape
padded[:dy, :dx] = vision
rel_x = [sx - self.x for sx, sy in signals]
rel_y = [sy - self.y for sx, sy in signals]
sig_x = sum(rel_x) / (len(signals) + 1e-5)
sig_y = sum(rel_y) / (len(signals) + 1e-5)
mem_flat = []
for mx, my in self.memory[-MAX_MEMORY:]:
mem_flat.extend([(mx - self.x) / GRID_SIZE, (my - self.y) / GRID_SIZE])
while len(mem_flat) < MAX_MEMORY * 2:
mem_flat.append(0.0)
full_input = np.concatenate([padded.flatten(), [sig_x, sig_y], mem_flat])
return full_input
def act(self, env, static_enemies, signals):
input_data = self.get_vision(env, signals)
input_tensor = torch.tensor(input_data, dtype=torch.float32, device=DEVICE)
logits = self.brain(input_tensor)
probs = torch.softmax(logits, dim=0)
action = torch.multinomial(probs, 1).item()
self.trajectory.append((input_tensor, action, probs[action]))
dx, dy = [(0, -1), (0, 1), (-1, 0), (1, 0)][action]
nx, ny = self.x + dx, self.y + dy
food_eaten = False
if 0 <= nx < GRID_SIZE and 0 <= ny < GRID_SIZE:
self.x, self.y = nx, ny
if env[self.y, self.x] == 1:
self.energy += 25
env[self.y, self.x] = 0
self.memory.append((self.x, self.y))
food_eaten = True
if (self.x, self.y) in static_enemies:
self.energy -= 50
self.energy -= 1
self.age += 1
if food_eaten:
self.starvation_steps = 0
else:
self.starvation_steps += 1
if self.starvation_steps >= STARVATION_THRESHOLD:
self.energy = 0
effective_life = self.base_life + int(self.energy / 2)
if self.age > effective_life:
self.energy = 0
def learn(self, reward):
self.trajectory = [(t.detach(), a, p.detach()) for (t, a, p) in self.trajectory]
loss = torch.tensor(0.0, device=DEVICE, requires_grad=True)
for tensor_in, action, prob in self.trajectory:
loss = loss - torch.log(prob) * reward
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.trajectory = []
# --- PREDATOR (Dynamic) ---
class Predator:
def __init__(self):
self.x = random.randint(0, GRID_SIZE - 1)
self.y = random.randint(0, GRID_SIZE - 1)
self.energy = PREDATOR_INITIAL_ENERGY
def act(self, prey_entities):
# Predators target both Primary and Competitor organisms.
dx, dy = random.choice([(0, -1), (0, 1), (-1, 0), (1, 0)])
nx, ny = self.x + dx, self.y + dy
if 0 <= nx < GRID_SIZE and 0 <= ny < GRID_SIZE:
self.x, self.y = nx, ny
for entity in prey_entities:
if entity.energy > 0 and entity.x == self.x and entity.y == self.y:
entity.energy = 0 # Predator kills the prey.
self.energy += PREDATOR_KILL_ENERGY_GAIN
break
self.energy -= PREDATOR_ENERGY_LOSS
def reproduce(self):
if self.energy >= PREDATOR_REPRODUCTION_THRESHOLD:
self.energy -= PREDATOR_REPRODUCTION_COST
child = Predator()
child.x, child.y = self.x, self.y
child.energy = PREDATOR_INITIAL_ENERGY
return child
return None
# --- MATING FUNCTIONS ---
def mate(org1, org2):
new_x = random.randint(0, GRID_SIZE - 1)
new_y = random.randint(0, GRID_SIZE - 1)
child = Organism(new_x, new_y)
child.brain = crossover(org1.brain, org2.brain)
child.sex = random.choice(["M", "F"])
child.energy = 100
child.immune = (
True
if (org1.immune and org2.immune)
else (random.random() < 0.5 if (org1.immune or org2.immune) else False)
)
return child
def mate_competitor(comp1, comp2):
new_x = random.randint(0, GRID_SIZE - 1)
new_y = random.randint(0, GRID_SIZE - 1)
child = Competitor(new_x, new_y)
child.brain = crossover(comp1.brain, comp2.brain)
child.sex = random.choice(["M", "F"])
child.energy = 100
child.immune = (
True
if (comp1.immune and comp2.immune)
else (random.random() < 0.5 if (comp1.immune or comp2.immune) else False)
)
return child
# --- ENVIRONMENT ---
def create_environment():
# Initialize environment with NUM_FOOD food items.
env = np.zeros((GRID_SIZE, GRID_SIZE), dtype=np.uint8)
for _ in range(NUM_FOOD):
x = random.randint(0, GRID_SIZE - 1)
y = random.randint(0, GRID_SIZE - 1)
env[y, x] = 1
return env
def place_enemies():
return {
(random.randint(0, GRID_SIZE - 1), random.randint(0, GRID_SIZE - 1))
for _ in range(5)
}
# --- RENDERING ---
def render(env, organisms, competitors, predators):
img = np.zeros((IMG_SIZE, IMG_SIZE, 3), dtype=np.uint8)
# Draw food as green circles.
for y in range(GRID_SIZE):
for x in range(GRID_SIZE):
if env[y, x] == 1:
center = (
x * CELL_SIZE + CELL_SIZE // 2,
y * CELL_SIZE + CELL_SIZE // 2,
)
cv2.circle(img, center, CELL_SIZE // 2 - 1, (0, 255, 0), -1)
# Draw primary organisms as circles.
for org in organisms:
if org.energy > 0:
center = (
org.x * CELL_SIZE + CELL_SIZE // 2,
org.y * CELL_SIZE + CELL_SIZE // 2,
)
color = (255, 0, 0) if org.sex == "M" else (0, 255, 255)
cv2.circle(img, center, CELL_SIZE // 2, color, -1)
# Draw competitors as diamonds.
for comp in competitors:
if comp.energy > 0:
cx = comp.x * CELL_SIZE + CELL_SIZE // 2
cy = comp.y * CELL_SIZE + CELL_SIZE // 2
pts = np.array(
[
[cx, cy - CELL_SIZE // 2],
[cx + CELL_SIZE // 2, cy],
[cx, cy + CELL_SIZE // 2],
[cx - CELL_SIZE // 2, cy],
],
np.int32,
).reshape((-1, 1, 2))
color = (0, 0, 150) if comp.sex == "M" else (150, 75, 0)
cv2.fillPoly(img, [pts], color)
# Draw predators as red triangles.
for pred in predators:
cx = pred.x * CELL_SIZE + CELL_SIZE // 2
cy = pred.y * CELL_SIZE + CELL_SIZE // 2
pts = np.array(
[
[cx, cy - CELL_SIZE // 2],
[cx - CELL_SIZE // 2, cy + CELL_SIZE // 2],
[cx + CELL_SIZE // 2, cy + CELL_SIZE // 2],
],
np.int32,
).reshape((-1, 1, 2))
cv2.fillPoly(img, [pts], (0, 0, 255))
final_img = cv2.copyMakeBorder(
img, LEGEND_HEIGHT, 0, 0, 0, cv2.BORDER_CONSTANT, value=(255, 255, 255)
)
legend_texts = [
"Legend:",
"Green Circle: Food",
"Blue Circle: Primary Organism (Male)",
"Yellow Circle: Primary Organism (Female)",
"Blue Diamond: Competitor (Male)",
"Orange Diamond: Competitor (Female)",
"Red Triangle: Predator",
]
font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 0.5
font_thickness = 1
y0 = 20
dy = 15
for i, text in enumerate(legend_texts):
cv2.putText(
final_img,
text,
(10, y0 + i * dy),
font,
font_scale,
(0, 0, 0),
font_thickness,
cv2.LINE_AA,
)
cv2.imshow("Digital Life", final_img)
# Top-level function for threading learning phase.
def do_learn(entity):
entity.learn(entity.energy)
return True
# --- MAIN LOOP ---
def main():
organisms = [
Organism(random.randint(0, GRID_SIZE - 1), random.randint(0, GRID_SIZE - 1))
for _ in range(NUM_ORGANISMS)
]
competitors = [
Competitor(random.randint(0, GRID_SIZE - 1), random.randint(0, GRID_SIZE - 1))
for _ in range(NUM_COMPETITORS)
]
predators = [Predator() for _ in range(NUM_INITIAL_PREDATORS)]
steps_per_episode = 480
for episode in range(1000):
env = create_environment()
static_enemies = place_enemies()
for entity in organisms + competitors:
entity.has_mated = False
for step in range(steps_per_episode):
if cv2.waitKey(1) & 0xFF == ord("q"):
cv2.destroyAllWindows()
return
for entity in organisms + competitors:
entity.has_mated = False
signals_preys = [
(e.x, e.y) for e in (organisms + competitors) if e.energy > 0
]
for entity in organisms:
if entity.energy > 0:
entity.act(env, static_enemies, signals_preys)
for entity in competitors:
if entity.energy > 0:
entity.act(env, static_enemies, signals_preys)
# New food generation: replenish food so that total food does not exceed NUM_FOOD.
current_food = int(np.sum(env == 1))
if current_food < NUM_FOOD:
food_to_add = min(NEW_FOOD_PER_STEP, NUM_FOOD - current_food)
added = 0
attempts = 0
while added < food_to_add and attempts < 1000:
x = random.randint(0, GRID_SIZE - 1)
y = random.randint(0, GRID_SIZE - 1)
if env[y, x] == 0:
env[y, x] = 1
added += 1
attempts += 1
# Mating for primary population.
cell_dict = {}
for org in organisms:
if org.energy > 0:
cell_dict.setdefault((org.x, org.y), []).append(org)
offspring = []
for cell, orgs in cell_dict.items():
males = [o for o in orgs if o.sex == "M" and not o.has_mated]
females = [o for o in orgs if o.sex == "F" and not o.has_mated]
if males and females:
offspring.append(mate(random.choice(males), random.choice(females)))
for o in males + females:
o.has_mated = True
organisms.extend(offspring)
# Mating for competing population.
cell_dict_comp = {}
for comp in competitors:
if comp.energy > 0:
cell_dict_comp.setdefault((comp.x, comp.y), []).append(comp)
comp_offspring = []
for cell, comps in cell_dict_comp.items():
males = [c for c in comps if c.sex == "M" and not c.has_mated]
females = [c for c in comps if c.sex == "F" and not c.has_mated]
if males and females:
comp_offspring.append(
mate_competitor(random.choice(males), random.choice(females))
)
for c in males + females:
c.has_mated = True
competitors.extend(comp_offspring)
# Population control: disease outbreak if prey population is too high.
live_preys = [e for e in (organisms + competitors) if e.energy > 0]
if len(live_preys) > POPULATION_THRESHOLD:
for entity in live_preys:
if not entity.immune:
if random.random() < DISEASE_DEATH_PROB:
entity.energy = 0
elif random.random() < IMMUNITY_GRANT_PROB:
entity.immune = True
# Predator dynamics: predators reproduce more if prey population increases.
prey_count = len(live_preys)
repro_multiplier = max(1, prey_count // POPULATION_THRESHOLD)
new_predators = []
for pred in predators:
pred.act(organisms + competitors)
for _ in range(repro_multiplier):
child = pred.reproduce()
if child:
new_predators.append(child)
predators.extend(new_predators)
predators = [pred for pred in predators if pred.energy > 0]
if step % RENDER_INTERVAL == 0:
render(env, organisms, competitors, predators)
cv2.waitKey(1)
# Learning phase using ThreadPoolExecutor.
with ThreadPoolExecutor() as executor:
futures = [
executor.submit(do_learn, entity)
for entity in (organisms + competitors)
]
for future in futures:
future.result()
# Natural selection for primary population: keep top performers.
organisms.sort(key=lambda o: o.energy, reverse=True)
survivors = organisms[:NUM_ORGANISMS]
new_orgs = []
males = [o for o in survivors if o.sex == "M"]
females = [o for o in survivors if o.sex == "F"]
needed = NUM_ORGANISMS - len(survivors)
for _ in range(needed):
if males and females:
new_orgs.append(mate(random.choice(males), random.choice(females)))
else:
parent = random.choice(survivors)
child = Organism(
random.randint(0, GRID_SIZE - 1), random.randint(0, GRID_SIZE - 1)
)
child.brain = mutate(parent.brain, mutation_rate=0.05)
new_orgs.append(child)
organisms = survivors + new_orgs
# Natural selection for competing population.
competitors.sort(key=lambda c: c.energy, reverse=True)
comp_survivors = competitors[:NUM_COMPETITORS]
new_comps = []
males_comp = [c for c in comp_survivors if c.sex == "M"]
females_comp = [c for c in comp_survivors if c.sex == "F"]
needed_comp = NUM_COMPETITORS - len(comp_survivors)
for _ in range(needed_comp):
if males_comp and females_comp:
new_comps.append(
mate_competitor(
random.choice(males_comp), random.choice(females_comp)
)
)
else:
parent = random.choice(comp_survivors)
child = Competitor(
random.randint(0, GRID_SIZE - 1), random.randint(0, GRID_SIZE - 1)
)
child.brain = mutate(parent.brain, mutation_rate=0.05)
new_comps.append(child)
competitors = comp_survivors + new_comps
for entity in organisms + competitors:
entity.energy = 100
entity.trajectory = []
entity.memory = []
entity.has_mated = False
entity.age = 0
entity.starvation_steps = 0
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment