Allenfenqu/partition_main.py

613 lines
24 KiB
Python
Raw Permalink Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

# def partition_diversity(partition):
import numpy as np
import pandas as pd
from Code import Code
from variancediy import variancediy
from TOPSIS import TOPSIS
from adregionf import adregionf
from select import select
from crossnew import crossnew
from mutationnew import mutationnew
from hui_fun import liziqunfun
from hui_xfun import xfun
import os
import time
from three_dimensional_TOPSIS import three_dimensional_TOPSIS
bus_info = pd.DataFrame()
file_range = range(211, 236)
file_directory = "E:\\分方向的mfd的数据\\bus_info\\03_pcross"
for file_number in file_range:
file_name = f"20190903_{file_number:03d}.csv" # 根据文件名模式构造文件名
file_path = os.path.join(file_directory, file_name) # 构造完整的文件路径
temp_dataframe = pd.read_csv(file_path) # 使用 pandas 读取 CSV 文件
bus_info = pd.concat([bus_info, temp_dataframe], ignore_index=True) # 将 temp_dataframe 追加到 merged_dataframe 中
links = pd.read_csv('links.csv')
# 计算第16列相同值的第7列的平均值
average_values = bus_info.groupby(bus_info.columns[9])[bus_info.columns[5]].mean()
# 在links_osm的第六列找到average_values将平均值放在该值所在行第五列的位置
for index, value in average_values.items():
links.loc[links[links.columns[5]] == index, links.columns[4]] = value
# links = links[links[links.columns[5]].isin(average_values.index)]
# 将结果保存到新的CSV文件
links.to_csv('links_processed.csv', index=False)
# final_partitions是最终分区的数量 Initial_partitions是初始分区的数量
final_partitions = 8
Initial_partitions = final_partitions * 5
# links = pd.read_csv('links.csv')
links = links.to_numpy()
df = pd.read_excel('stationidwan.xls')
stationid = df.to_numpy()
# for chuu in range(1, 17):
# for dic in range(1):
# tic()
# chu = 10
# zhong = 2
# 给道路起点和终点标注序列eg从1到500
# 因为一个路口可以是好几个道路的起点或终点,所以同一路口就会有同样的标记
node = np.concatenate((links[:, :2], links[:, 2:4]), axis=0) # np.concatenate 函数会将这两个子数组沿着轴 0 连接起来;
# axis 是指在数组操作时沿着哪个轴进行操作。当axis=0时表示在第一个维度上进行拼接操作。这里就是纵轴
# 这里是给道路起点和终点标注序列,也就是路口表注序列,因为一个路口可以是好几个道路的起点或终点,所以同一路口就会有同样的标记
noi = 1
node = np.hstack((node, np.zeros((len(node), 1))))
print(node.shape[0])
for i in range(node.shape[0]): # node.shape[0] 是指 node 数组的第一维大小,即 node 数组的行数
print(i)
# node[:i, 0] 表示从 node 数组的第一行到第 i-1 行的所有行的第一列构成的数组
# np.where() 函数返回一个包含下标的元组,后面的[0]就代表返回第一个元素的下标
a = np.where(node[:i, 0] == node[i, 0])[0]
b = np.where(node[:i, 1] == node[i, 1])[0]
c = np.intersect1d(a, b) # intersect1d 返回两个数组的交集
if c.size > 0:
x = c.shape[0]
y = 1
else:
x, y = 0, 1
# 在 node 数组的最后添加一列全为0的列并将添加后的新数组重新赋值给 node
if x > 0 and y > 0:
node[i, 2] = node[min(c), 2] # 如果c是矩阵则min(A)是包含每一列的最小值的行向量
else:
node[i, 2] = noi
noi += 1
node = np.concatenate((node[:int(len(node) / 2), 2].reshape(-1, 1), node[int(len(node) / 2):, 2].reshape(-1, 1)),
axis=1)
np.save('node.npy', node)
# 这里的links多加了一行才能yanlinks但这样yanlinks就不对了
links = np.hstack((links, np.zeros((len(links), 1))))
links = np.hstack((links, np.zeros((len(links), 1))))
links = np.hstack((links, np.zeros((len(links), 1))))
yanlinks = np.concatenate((node, links[:, [5, 6, 7, 4, 0, 1, 2, 3]], np.zeros((len(links), 4))), axis=1)
yanlinks[:, 4] = np.arange(1, len(yanlinks) + 1)
road = np.arange(1, node.shape[0] + 1)
adjacency = np.zeros((len(road), len(road)))
# 初始化分区
for i in range(len(road)):
temp1 = np.where(node[:, 0] == node[i, 0])[0] # 找出第一列每个数字在第一列出现的位置
temp2 = np.where(node[:, 1] == node[i, 0])[0] # 找出第一列每个数字在第二列出现的位置
temp3 = np.where(node[:, 0] == node[i, 1])[0] # 找出第二列每个数字在第一列出现的位置
temp4 = np.where(node[:, 1] == node[i, 1])[0] # 找出第二列每个数字在第二列出现的位置
temp = np.unique(np.intersect1d(np.arange(i + 1, node.shape[0]), np.concatenate((temp1, temp2, temp3, temp4))))
if len(temp) > 0:
adjacency[i, temp] = 1
adjacency[temp, i] = 1
row_sums = np.sum(adjacency, axis=1)
# 找到全零行的索引
zero_row_indices = np.where(row_sums == 0)[0]
from sklearn.cluster import KMeans
N = Initial_partitions # 设置聚类数目
# 利用 K-Means 算法对 yanlinks 矩阵的第 7 列和第 8 列(即经度和纬度)进行聚类,
# 将样本分成 N 类idx是一个N x 2的矩阵其中N是聚类数目。
# idx的每一行就是一个聚类中心其中第一列是该中心的经度第二列是该中心的纬度。
# 在计算每个点到聚类中心的距离时就需要用到idx的值。
Cluster_Label, idx = KMeans(n_clusters=N).fit(yanlinks[:, [6, 7]]).labels_, KMeans(n_clusters=N).fit(
yanlinks[:, [6, 7]]).cluster_centers_
# df = pd.read_csv('idx.csv',header=None)
# idx = df.to_numpy()
# 计算每个点到聚类中心的距离
dis = 111000 * np.sqrt(
(yanlinks[:, 6] - idx[:, 0].reshape(N, 1)) ** 2 + (yanlinks[:, 7] - idx[:, 1].reshape(N, 1)) ** 2)
# 找到每个点最近的聚类中心mm是最小值nn是最小值在向量的索引
mm, nn = np.min(dis, axis=1, keepdims=True), np.argmin(dis, axis=1)
data = links[:, 4] # links第五行是路的长度
if data.size > 0:
m = data.shape[0]
n = 1
else:
m, n = 0, 1
pattern = np.zeros((m, n)) # zeros(m,n+1)返回由零组成的m×(n+1)数组
pattern[:, 0] = data # 前n列为data中的数据
pattern = np.hstack((pattern, np.zeros((len(pattern), 1))))
pattern[:, 1] = -1
center = np.zeros((N, n)) # 初始化聚类中心
pattern[:, :n] = data.reshape(-1, n)
center = np.hstack((center, np.zeros((len(center), 1))))
# 初始化聚类中心
for x in range(0, N):
center[x, 1] = nn[x]
center[x, 0] = data[int(center[x, 1])]
pattern[int(center[x, 1]), 1] = x
# 初始化距离和计数
distance = np.zeros(N)
num = np.zeros(N)
# 初始化新的聚类中心
new_center = np.zeros((N, n))
unassigned_links = 2
while unassigned_links > 1:
print(unassigned_links)
for x in range(0, N): # x表示当前聚类的编号
try:
selected_links = adjacency[pattern[:, 1] == x, :]
unassigned_roads = np.where(np.sum(selected_links, axis=0) > 0)[0]
selected_links = np.where(pattern[:, 1] > -1)[0]
unassigned_roads = np.setdiff1d(unassigned_roads, selected_links) # bound 是一个向量,表示与聚类 x 相关的未被分配到聚类中的道路的编号。
selected_links = np.where(pattern[:, 1] == x)[0] # 这里的yisou表示已经被分配到的道路编号
# yisou = np.array([1778], dtype=int)
# bound = np.array([1722, 1776, 1779, 1782])
bus = []
# for y=1:length(yisou)
for y in range(len(selected_links)):
visited_links = selected_links[y]
visited_links_indices = np.where(stationid[:, 5] == visited_links+1)[0]
for dengyuyisourow in visited_links_indices:
bus.append(stationid[dengyuyisourow, 6])
road_evaluation = np.zeros((len(unassigned_roads), 2))
for unassigned_road_index in range(len(unassigned_roads)):
if len(np.concatenate([bus, stationid[stationid[:, 5] == unassigned_roads[unassigned_road_index]+1, 6]])) > 1 and np.var((np.concatenate([bus,stationid[stationid[:,5]==unassigned_roads[unassigned_road_index]+1, 6]])),ddof=1) > 0:
selected_links_lengths_float = (pattern[selected_links , 0]).tolist()
unassigned_road_length_array = (pattern[unassigned_roads[unassigned_road_index] , 0])
unassigned_road_length_array = [unassigned_road_length_array]
abrr = selected_links_lengths_float+ unassigned_road_length_array
road_evaluation[unassigned_road_index, 0] = np.var(abrr, ddof=1) * \
np.var((np.concatenate((bus, stationid[stationid[:, 5] == unassigned_roads[unassigned_road_index]+1, 6]))), ddof=1)
else:
if np.var(bus, ddof=1) > 0: # if var(bus)>0%已分配道路的速度方差
selected_links_lengths_float = (pattern[selected_links , 0]).tolist() # 这里没问题!!!!
unassigned_road_length_array = (pattern[unassigned_roads[unassigned_road_index] , 0])
unassigned_road_length_array = [unassigned_road_length_array]
abrr = selected_links_lengths_float + unassigned_road_length_array
road_evaluation[unassigned_road_index, 0] = np.var(abrr, ddof=1) + np.var(bus, ddof=1)
else:
selected_links_lengths_float = (pattern[selected_links, 0]).tolist()
unassigned_road_length_array = (pattern[unassigned_roads[unassigned_road_index] , 0])
unassigned_road_length_array = [unassigned_road_length_array]
abrr = selected_links_lengths_float + unassigned_road_length_array
road_evaluation[unassigned_road_index, 0] = np.var(abrr,ddof=1)
aas = yanlinks[yanlinks[:, 4] == unassigned_roads[unassigned_road_index]+1, 6:8]
road_evaluation[unassigned_road_index, 1] = 111000 * np.sqrt(np.sum(
(yanlinks[yanlinks[:, 4] == unassigned_roads[unassigned_road_index]+1, 6:8] - idx[x , :]) ** 2))
if road_evaluation.shape[0] > 1:
m, n = TOPSIS(road_evaluation) # bestxuhao最优方案的序号bestgoal最优得分
else:
n = 1
pattern[unassigned_roads[n - 1], 1] = x
except:
continue
unassigned_links = np.sum(pattern[:, 1] == -1)
# 因为我的pattern是从0到39的编号所以要变成1到40
pattern[:, 1] = pattern[:, 1] + 1
np.save('pattern.npy', pattern)
# pattern = pd.read_excel('pattern.xls').to_numpy()
yanlinks[:, 3] = links[:, 9]
yanlinks[:, 10] = pattern[:, 1]
data_path = r''
df2 = pd.read_csv(data_path + 'links_processed.csv')
zero_rows = yanlinks[:, 10] == 0
# 获取已删除行的索引
deleted_rows_indices = np.where(zero_rows)[0]
# 从 links 中删除 deleted_rows_indices 中指定的行
df2 = df2.drop(deleted_rows_indices, errors='ignore')
df2.to_csv(data_path + 'links_test1.csv', index=False)
yanlinks = yanlinks[yanlinks[:, 10] != 0]
yanlinks = yanlinks[yanlinks[:, 10] != -1, :]
for i in range(len(stationid)):
try:
stationid[i, 7] = yanlinks[np.where(yanlinks[:, 4] == stationid[i, 5]), 10][0][0]
except:
stationid[i, 7] = -1
road = np.unique(np.concatenate((yanlinks[:, 1], yanlinks[:, 0]), axis=0))
adjacency = np.zeros((len(road), len(road)))
adregion = np.zeros((int(np.max(yanlinks[:, 4])), int(np.max(yanlinks[:, 4]))))
for i in range(len(yanlinks[:, 0])):
temp1 = np.where(node[:, 0] == node[i, 0])[0]
temp2 = np.where(node[:, 1] == node[i, 0])[0]
temp3 = np.where(node[:, 0] == node[i, 1])[0]
temp4 = np.where(node[:, 1] == node[i, 1])[0]
temp = np.unique(np.intersect1d(np.arange(i + 1, node.shape[0]), np.concatenate((temp1, temp2, temp3, temp4))))
if len(temp) > 0:
adregion[i, temp] = 1
adregion[temp, i] = 1
# adregion矩阵表示路段之间的邻接关系
np.save('adregion.npy', adregion)
# 给adregion矩阵乘上权重道路的分组编号
for i in range(len(yanlinks[:, 1])):
# print(adregion[:, int(yanlinks[i, 4])])
# print(int(yanlinks[i, 10]))
adregion[:, int(yanlinks[i, 4]) - 1] = adregion[:, int(yanlinks[i, 4]) - 1] * int(yanlinks[i, 10])
subregion_adj = np.zeros((Initial_partitions, Initial_partitions))
# 计算adregion中的每个元素出现的频率(判断是强相关还是弱相关)
for i in range(len(adregion[:, 1])):
a = adregion[i, :]
a = np.unique(a)
a = a[a != 0]
if a.size > 0:
x = 1
y = a.shape[0]
else:
x, y = 0, 1
if y > 1:
for j in range(len(a)):
for u in range(len(a)):
if j != u:
# subregion_adj表示子区域的邻接关系其中数值的大小表示区域之间的相关程度
subregion_adj[int(a[j])-1, int(a[u])-1] += 1
subregion_adj[int(a[u])-1, int(a[j])-1] += 1
# 计算后存到directed_adjacency_matrix里
directed_adjacency_matrix = subregion_adj.copy()
# 对于子区域相关程度处于弱相关的邻接关系进行忽略
min_value = np.min(np.max(subregion_adj, axis=0)) - 2
subregion_adj[subregion_adj < min_value] = 0
subregion_adj[subregion_adj > 1] = 1
directed_adjacency_matrix[directed_adjacency_matrix > 1] = 1
np.save('adr.npy', subregion_adj)
np.save('dadr.npy', directed_adjacency_matrix)
np.save('yanlinks.npy', yanlinks)
df = pd.DataFrame(yanlinks)
df.to_csv('yanlinks_initial_partition.csv', index=False, header=False)
####################################################################################################################
start = time.time()
# 组合优化
print('step2 组合优化'.encode('utf-8').decode('utf-8'))
maxgen = 10 # 进化代数
sizepop =2 # 种群规模
pcross = 0.8 # 交叉概率
pmutation = 0.8 # 变异概率
lenchrom = [1] * Initial_partitions # 变量字串长度
unassigned_roads = np.tile([[1, final_partitions]], (Initial_partitions, 1))
individuals = {'fitness': np.zeros((sizepop, 1)), 'chrom': []}
avgfitness = [] # 种群平均适应度
bestfitness = [] # 种群最佳适应度
bestchrom = None # 种群最佳染色体
for i in range(sizepop):
individuals['chrom'].append([Code(lenchrom, unassigned_roads,final_partitions)]) # 给第一个个体随机产生染色体
print('初始化结束'.encode('utf-8').decode('utf-8'))
repeated_row1= np.array([[0]])
inff = np.repeat([repeated_row1], sizepop, axis=0)
inff_np = np.array(inff, dtype=np.int64)
chromosome_results = np.zeros((sizepop, 3))
for j in range(sizepop):
chromosome_results[j,:] = liziqunfun(individuals['chrom'][j],j)
inff_np = three_dimensional_TOPSIS(chromosome_results,None)
inff_np[inff_np == 0] = 1
# inff_np[j]
print('初始化的适应度算完了'.encode('utf-8').decode('utf-8'))
# isnan函数是判断输入是否为NaNnot a number的函数而inff是表示正无穷大的特殊值。
# 在这段代码中如果inff是无限值则将其值设为1000000
inff[np.isnan(inff_np)] = 1000000
for j in range(sizepop):
individuals['fitness'][j] = inff_np[j]
# 找最好的染色体
bestfitness = np.min(individuals['fitness'])
bestindex = np.argmin(individuals['fitness'])
bestchrom = individuals['chrom'][bestindex]
avgfitness = np.sum(individuals['fitness']) / sizepop
individuals['chrom'] = np.array(individuals['chrom'])
# 记录最好适应度和平均适应度
trace = {'avgfitness': [], 'bestfitness': []}
historychorm = individuals['chrom'].reshape(sizepop,-1)
historyfitness = individuals['fitness'].reshape(-1,1)
bighistorychorm = individuals['chrom']
bighistoryfitness = np.hstack([individuals['fitness'].reshape((-1, 1)), np.zeros((sizepop, 1))])
chromosome_results1 = np.zeros((sizepop, 3))
for i in range(1, maxgen + 1):
print("Generation:", i)
# Selection operation
individuals = select(individuals, sizepop)
avgfitness = np.sum(individuals['fitness']) / sizepop
# Crossover operation
individuals['chrom'] = crossnew(pcross, lenchrom, individuals['chrom'], sizepop, unassigned_roads,final_partitions)
# Mutation operation
individuals['chrom'] = mutationnew(pmutation, lenchrom, individuals['chrom'], sizepop, unassigned_roads,final_partitions)
inff = np.zeros(sizepop)
for j in range(sizepop):
if len(np.where(np.sum(np.abs(historychorm - individuals['chrom'][j, :]), axis=1) == 0)[0]) < 1:
chromosome_results1[j, :] = liziqunfun(individuals['chrom'][j, :],j)
else:
a = np.where(np.sum(np.abs(historychorm - individuals['chrom'][j, :]), axis=1) == 0)[0]
chromosome_results1[j, :] = historyfitness[a[0], 0]
inff = three_dimensional_TOPSIS(chromosome_results, chromosome_results1)
inff[inff == 0] = 1
inff[np.isnan(inff)] = 1000000
result = np.vstack((chromosome_results, chromosome_results1))
chromosome_results = np.amin(result, axis=0)
for j in range(sizepop):
individuals['fitness'][j] = inff[j]
# Find the best and worst fitness chromosomes and their positions in the population
newbestfitness, newbestindex = np.min(individuals['fitness']), np.argmin(individuals['fitness'])
worstfitness, worstindex = np.max(individuals['fitness']), np.argmax(individuals['fitness'])
# Replace the best chromosome from the previous generation
if bestfitness > newbestfitness:
bestfitness = newbestfitness
bestchrom = individuals['chrom'][newbestindex]
if isinstance(bestchrom, list):
bestchrom = bestchrom[0]
# Replace the worst chromosome with the best chromosome
individuals['chrom'][worstindex] = bestchrom
individuals['fitness'][worstindex] = bestfitness
avgfitness = np.sum(individuals['fitness']) / sizepop
# Add the chromosomes and their fitness values to the history if they are not already in it
for h in range(sizepop):
if len(np.where(np.sum(np.abs(historychorm - individuals['chrom'][h, :]), axis=1) == 0)[0]) < 1:
historychorm = np.vstack((historychorm, individuals['chrom'][h, :]))
historyfitness = np.vstack((historyfitness, individuals['fitness'].reshape(-1,1)))
trace['avgfitness'].append(avgfitness)
trace['bestfitness'].append(bestfitness)
################################################################################################
# if isinstance(bestchrom, list):
#
# bestchrom = bestchrom[0]
#
# # 根据前面算法得到的最佳染色体 bestchrom更新 yanlinks 中的第 12 列,
# # 然后根据 yanlinks 中记录的节点信息,建立邻接矩阵 adregion。
# # index 38 is out of bounds for axis 0 with size 38!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#
# for a in range(len(yanlinks)):
#
# yanlinks[a, 11] = bestchrom[int(yanlinks[a, 10])-1]
#
#
# np.save('yanlinks.npy',yanlinks)
# df = pd.DataFrame(yanlinks)
# df.to_csv('yanlinks_merge.csv', index=False, header=False)
################################################################################################
end = time.time()
start1 = time.time()
print('Step 3 边界调整')
if isinstance(bestchrom, list):
bestchrom = bestchrom[0]
# 根据前面算法得到的最佳染色体 bestchrom更新 yanlinks 中的第 12 列,
# 然后根据 yanlinks 中记录的节点信息,建立邻接矩阵 adregion。
# index 38 is out of bounds for axis 0 with size 38!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
for i in range(len(yanlinks)):
yanlinks[i, 11] = bestchrom[int(yanlinks[i, 10])-1]
np.save('yanlinks.npy',yanlinks)
df = pd.DataFrame(yanlinks)
df.to_csv('yanlinks_merge.csv', index=False, header=False)
adregion = np.zeros((len(yanlinks), len(yanlinks)))
for i in range(len(yanlinks)):
temp1 = np.where(node[:, 0] == node[i, 0])[0]
temp2 = np.where(node[:, 1] == node[i, 0])[0]
temp3 = np.where(node[:, 0] == node[i, 1])[0]
temp4 = np.where(node[:, 1] == node[i, 1])[0]
temp = np.unique(np.intersect1d(np.arange(i + 1, len(node)), np.concatenate((temp1, temp2, temp3, temp4))))
# 这里temp是个一维数组所以不能直接访问
# if 5026 in temp:
# temp = temp[temp != 5026]
if temp.size > 0:
adregion[i, temp-1] = 1
adregion[temp-1, i] = 1
# adregionf主要作用是更新 adregion 矩阵,用于记录节点之间的连接关系,通过这样的操作,
# 可以保证连接关系的权值与遗传算法的优化结果保持一致。
adregion = adregionf(yanlinks, node)
behindfun = 0
solution = xfun(yanlinks,node)
frontfun=solution
noi = 1
while behindfun < frontfun:
solution = xfun(yanlinks,node)
frontfun = solution
x = frontfun
print(noi)
noi+=1
for i in range(len(yanlinks)):
if i==2:
break
ar = yanlinks[i, 4]-1
af = yanlinks[i, 11]
ad = adregion[int(ar), :]
ad[int(ar)] = af
a = np.unique(ad)
if len(a) > 2:
unique_elements, counts = np.unique(ad, return_counts=True)
# pandas和numpy不一样
unia = pd.DataFrame({'Value': unique_elements,'Count': counts,})
unia = unia[unia.iloc[:, 0] != 0]
unia = unia.values
if unia[unia[:, 0] == af, 1] == 2:
a = a[a != 0]
ti = []
for j in range(len(a)):
if a[j] == af:
tiao = x
else:
yanlinks[i, 11] = a[j]
solution =xfun(yanlinks,node)
tiao = solution
ti.append(tiao)
x, y = np.min(ti), np.argmin(ti)
yanlinks[i, 11] = a[y]
adregion = adregionf(yanlinks, node)
behindfun = x
df = pd.DataFrame(yanlinks)
df1 = pd.DataFrame(bestchrom)
df.to_csv('yanlinks_boundary_adjustment.csv', index=False, header=False)
df1.to_csv("bestchrom.csv", index=False, header=False)
# df.to_csv(f'yanlinks_boundary_adjustment_{partition}.csv', index=False, header=False)
# df.to_csv(f"bestchrom_{partition}.csv", index=False, header=False)
end1 = time.time()
# 使用numpy的unique函数获取第12列索引为11中所有的唯一值以及它们的索引
df = pd.DataFrame(yanlinks)
df1 = pd.DataFrame(bestchrom)
df.to_csv('yanlinks_boundary_adjustment.csv', index=False, header=False)
df1.to_csv("bestchrom.csv", index=False, header=False)
# df.to_csv(f'yanlinks_boundary_adjustment_{partition}.csv', index=False, header=False)
# df.to_csv(f"bestchrom_{partition}.csv", index=False, header=False)
# 使用numpy的unique函数获取第12列索引为11中所有的唯一值以及它们的索引
unique_values, unique_indices = np.unique(yanlinks[:, 11], return_index=True)
adregion_ = np.load('adregion.npy')
for i in range(len(yanlinks[:, 1])):
adregion_[:, int(yanlinks[i, 4]) - 1] = adregion_[:, int(yanlinks[i, 4]) - 1] * int(yanlinks[i, 11])
region_adj = np.zeros((final_partitions, final_partitions))
# region_adj不对/*
for i in range(len(adregion_[:, 1])):
a = adregion_[i, :]
a = np.unique(a)
a = a[a != 0]
if a.size > 0:
x = 1
y = a.shape[0]
else:
x, y = 0, 1
if y > 1:
for j in range(len(a)):
for u in range(len(a)):
if j != u:
# subregion_adj表示子区域的邻接关系其中数值的大小表示区域之间的相关程度
region_adj[int(a[j])-1, int(a[u])-1] = 1
region_adj[int(a[u])-1, int(a[j])-1] = 1
Asb = 0
for i in unique_values:
wu = np.where( region_adj[int(i) - 1, :] == 1)
smrjj_divide_smrjj_ = 0
for element in wu:
selected_values_list = [yanlinks[yanlinks[:, 11] == e + 1][:, 5] for e in element]
selected_values = np.concatenate(selected_values_list)
average = np.mean(selected_values)
variance = np.var(selected_values)
selected_values1 = yanlinks[yanlinks[:, 11] == i][:, 5]
average1 = np.mean(selected_values1)
variance1 = np.var(selected_values1)
smrjj = 2*variance1
smrjj_ = variance+variance1+(average-average1)**2
smrjj_divide_smrjj_one = smrjj/smrjj_
smrjj_divide_smrjj_ +=smrjj_divide_smrjj_one
num_elements = len(wu)
Asb_one = smrjj_divide_smrjj_/num_elements
Asb += Asb_one
Tvb = 0
for i in unique_values:
selected_values = yanlinks[yanlinks[:, 11] == i][:, 5]
variance = np.var(selected_values)
Tvb+=variance
# for i in unique_values:
print(f'最好的适应度是{bestfitness}')
print('合并分区的时间:', end - start, 'seconds')
print('边界调整的时间:', end1 - start1, 'seconds')