import numpy as np import pandas as pd from Code import Code from variancediy import variancediy from TOPSIS import TOPSIS from adregionf import adregionf from select import select from crossnew import crossnew from mutationnew import mutationnew from hui_fun import liziqunfun from hui_xfun import xfun # final_partitions是最终分区的数量 Initial_partitions是初始分区的数量 final_partitions = 8 for final_partitions in [final_partitions]: for Initial_partitions in [final_partitions * 5]: pass # 导入数据 df = pd.read_csv('shenzhen_districts_roads.csv') links = df.to_numpy() df = pd.read_excel('stationidwan.xls') stationid = df.to_numpy() # for chuu in range(1, 17): # for dic in range(1): # tic() # chu = 10 # zhong = 2 # 给道路起点和终点标注序列,eg从1到500, # 因为一个路口可以是好几个道路的起点或终点,所以同一路口就会有同样的标记 node = np.concatenate((links[:, :2], links[:, 2:4]), axis=0) # np.concatenate 函数会将这两个子数组沿着轴 0 连接起来; # axis 是指在数组操作时沿着哪个轴进行操作。当axis=0时,表示在第一个维度上进行拼接操作。这里就是纵轴 # 这里是给道路起点和终点标注序列,也就是路口表注序列,因为一个路口可以是好几个道路的起点或终点,所以同一路口就会有同样的标记 noi = 1 node = np.hstack((node, np.zeros((len(node), 1)))) print(node.shape[0]) for i in range(node.shape[0]): # node.shape[0] 是指 node 数组的第一维大小,即 node 数组的行数 print(i) # node[:i, 0] 表示从 node 数组的第一行到第 i-1 行的所有行的第一列构成的数组 # np.where() 函数返回一个包含下标的元组,后面的[0]就代表返回第一个元素的下标 a = np.where(node[:i, 0] == node[i, 0])[0] b = np.where(node[:i, 1] == node[i, 1])[0] c = np.intersect1d(a, b) # intersect1d 返回两个数组的交集 if c.size > 0: x = c.shape[0] y = 1 else: x, y = 0, 1 # 在 node 数组的最后添加一列全为0的列,并将添加后的新数组重新赋值给 node if x > 0 and y > 0: node[i, 2] = node[min(c), 2] # 如果c是矩阵,则min(A)是包含每一列的最小值的行向量 else: node[i, 2] = noi noi += 1 node = np.concatenate((node[:int(len(node) / 2), 2].reshape(-1, 1), node[int(len(node) / 2):, 2].reshape(-1, 1)), axis=1) np.save('node.npy', node) # 这里的links多加了一行才能yanlinks,但这样yanlinks就不对了 links = np.hstack((links, np.zeros((len(links), 1)))) links = np.hstack((links, np.zeros((len(links), 1)))) links = np.hstack((links, np.zeros((len(links), 1)))) yanlinks = np.concatenate((node, links[:, [5, 6, 7, 4, 0, 1, 2, 3]], np.zeros((len(links), 4))), axis=1) yanlinks[:, 4] = np.arange(1, len(yanlinks) + 1) road = np.arange(1, node.shape[0] + 1) adjacency = np.zeros((len(road), len(road))) # 初始化分区 for i in range(len(road)): temp1 = np.where(node[:, 0] == node[i, 0])[0] # 找出第一列每个数字在第一列出现的位置 temp2 = np.where(node[:, 1] == node[i, 0])[0] # 找出第一列每个数字在第二列出现的位置 temp3 = np.where(node[:, 0] == node[i, 1])[0] # 找出第二列每个数字在第一列出现的位置 temp4 = np.where(node[:, 1] == node[i, 1])[0] # 找出第二列每个数字在第二列出现的位置 temp = np.unique(np.intersect1d(np.arange(i + 1, node.shape[0]), np.concatenate((temp1, temp2, temp3, temp4)))) if len(temp) > 0: adjacency[i, temp] = 1 adjacency[temp, i] = 1 from sklearn.cluster import KMeans N = Initial_partitions # 设置聚类数目 # 利用 K-Means 算法对 yanlinks 矩阵的第 7 列和第 8 列(即经度和纬度)进行聚类, # 将样本分成 N 类,idx是一个N x 2的矩阵,其中N是聚类数目。 # idx的每一行就是一个聚类中心,其中第一列是该中心的经度,第二列是该中心的纬度。 # 在计算每个点到聚类中心的距离时,就需要用到idx的值。 Cluster_Label, idx = KMeans(n_clusters=N).fit(yanlinks[:, [6, 7]]).labels_, KMeans(n_clusters=N).fit( yanlinks[:, [6, 7]]).cluster_centers_ # 计算每个点到聚类中心的距离 dis = 111000 * np.sqrt( (yanlinks[:, 6] - idx[:, 0].reshape(N, 1)) ** 2 + (yanlinks[:, 7] - idx[:, 1].reshape(N, 1)) ** 2) # 找到每个点最近的聚类中心,mm是最小值,nn是最小值在向量的索引 mm, nn = np.min(dis, axis=1, keepdims=True), np.argmin(dis, axis=1) data = links[:, 4] # links第五行是路的长度 if data.size > 0: m = data.shape[0] n = 1 else: m, n = 0, 1 pattern = np.zeros((m, n)) # zeros(m,n+1)返回由零组成的m×(n+1)数组 pattern[:, 0] = data # 前n列为data中的数据 pattern = np.hstack((pattern, np.zeros((len(pattern), 1)))) pattern[:, 1] = -1 center = np.zeros((N, n)) # 初始化聚类中心 pattern[:, :n] = data.reshape(-1, n) # 初始化聚类中心 for x in range(0, N): center = np.hstack((center, np.zeros((len(center), 1)))) center[x, 1] = nn[x] center[x, 0] = data[int(center[x, 1])] pattern[int(center[x, 1]), 1] = x # 初始化距离和计数 distance = np.zeros(N) num = np.zeros(N) # 初始化新的聚类中心 new_center = np.zeros((N, n)) unassigned_links = 10 while unassigned_links > 1: print(unassigned_links) for x in range(0, N): # x表示当前聚类的编号 try: selected_links = adjacency[pattern[:, 1] == x, :] unassigned_roads = np.where(np.sum(selected_links, axis=0) > 0)[0] selected_links = np.where(pattern[:, 1] > -1)[0] unassigned_roads = np.setdiff1d(unassigned_roads, selected_links) # bound 是一个向量,表示与聚类 x 相关的未被分配到聚类中的道路的编号。 selected_links = np.where(pattern[:, 1] == x)[0] # 这里的yisou表示已经被分配到的道路编号 # yisou = np.array([1778], dtype=int) # bound = np.array([1722, 1776, 1779, 1782]) bus = [] # for y=1:length(yisou) for y in range(len(selected_links)): visited_links = selected_links[y] visited_links_indices = np.where(stationid[:, 5] == visited_links)[0] for dengyuyisourow in visited_links_indices: bus.append(stationid[dengyuyisourow, 6]) road_evaluation = np.zeros((len(unassigned_roads), 2)) for unassigned_road_index in range(len(unassigned_roads)): if len(np.concatenate([bus, stationid[ stationid[:, 5] == unassigned_roads[unassigned_road_index], 6]])) > 1 and \ variancediy((np.concatenate([bus,stationid[stationid[:,5] ==unassigned_roads[unassigned_road_index], 6]])).tolist()) > 0: # np.var(np.concatenate((bus, stationid[stationid[:, 6] == bound[adad], 6]有可能为无穷大,当这里的两个变量为空集时 # bus 和stationid[stationid[:, 6] == bound[adad]不能直接和零比较,因为他们不是数 # pattern[yisou, 0]和pattern[bound[adad], 0]不是一个数据类型 selected_links_lengths_float = (pattern[selected_links - 1, 0]).tolist() unassigned_road_length_array = (pattern[unassigned_roads[unassigned_road_index] - 1, 0]).tolist() road_evaluation[unassigned_road_index, 0] = variancediy(selected_links_lengths_float, unassigned_road_length_array) * variancediy( (np.concatenate( (bus, stationid[stationid[:, 5] == unassigned_roads[unassigned_road_index], 6]))).tolist()) else: if variancediy(bus) > 0: # if var(bus)>0%已分配道路的速度方差 selected_links_lengths_float = (pattern[selected_links - 1, 0]).tolist() # 这里没问题!!!! unassigned_road_length_array = ( pattern[unassigned_roads[unassigned_road_index] - 1, 0]).tolist() road_evaluation[unassigned_road_index, 0] = variancediy(selected_links_lengths_float, unassigned_road_length_array) + variancediy( bus) else: selected_links_lengths_float = (pattern[selected_links - 1, 0]).tolist() unassigned_road_length_array = ( pattern[unassigned_roads[unassigned_road_index] - 1, 0]).tolist() road_evaluation[unassigned_road_index, 0] = variancediy(selected_links_lengths_float, unassigned_road_length_array) road_evaluation[unassigned_road_index, 1] = 111000 * np.sqrt(np.sum( (yanlinks[yanlinks[:, 4] == unassigned_roads[unassigned_road_index], 6:8] - idx[x - 1, :]) ** 2)) if road_evaluation.shape[0] > 1: m, n = TOPSIS(road_evaluation) # bestxuhao最优方案的序号,bestgoal最优得分 else: n = 1 pattern[unassigned_roads[n - 1], 1] = x except: continue unassigned_links = np.sum(pattern[:, 1] == -1) # 因为我的pattern是从0到39的编号,所以要变成1到40 pattern[:, 1] = pattern[:, 1] + 1 np.save('pattern.npy', pattern) # pattern = pd.read_excel('pattern.xls').to_numpy() yanlinks[:, 10] = pattern[:, 1] data_path = r'' df2 = pd.read_csv(data_path + 'links_test.csv') zero_rows = yanlinks[:, 10] == 0 # 获取已删除行的索引 deleted_rows_indices = np.where(zero_rows)[0] # 从 links 中删除 deleted_rows_indices 中指定的行 df2 = df2.drop(deleted_rows_indices, errors='ignore') df2.to_csv(data_path + 'links_test1.csv', index=False) yanlinks = yanlinks[yanlinks[:, 10] != 0] yanlinks = yanlinks[yanlinks[:, 10] != -1, :] for i in range(len(stationid)): try: stationid[i, 7] = yanlinks[np.where(yanlinks[:, 4] == stationid[i, 5]), 10][0][0] except: stationid[i, 7] = -1 road = np.unique(np.concatenate((yanlinks[:, 1], yanlinks[:, 0]), axis=0)) adjacency = np.zeros((len(road), len(road))) adregion = np.zeros((int(np.max(yanlinks[:, 4])), int(np.max(yanlinks[:, 4])))) for i in range(len(yanlinks[:, 0])): temp1 = np.where(node[:, 0] == node[i, 0])[0] temp2 = np.where(node[:, 1] == node[i, 0])[0] temp3 = np.where(node[:, 0] == node[i, 1])[0] temp4 = np.where(node[:, 1] == node[i, 1])[0] temp = np.unique(np.intersect1d(np.arange(i + 1, node.shape[0]), np.concatenate((temp1, temp2, temp3, temp4)))) if len(temp) > 0: adregion[i, temp] = 1 adregion[temp, i] = 1 # adregion矩阵表示路段之间的邻接关系 np.save('adregion.npy', adregion) # 给adregion矩阵乘上权重(道路的分组编号) for i in range(len(yanlinks[:, 1])): # print(adregion[:, int(yanlinks[i, 4])]) # print(int(yanlinks[i, 10])) adregion[:, int(yanlinks[i, 4]) - 1] = adregion[:, int(yanlinks[i, 4]) - 1] * int(yanlinks[i, 10]) subregion_adj = np.zeros((Initial_partitions, Initial_partitions)) # 计算adregion中的每个元素出现的频率(判断是强相关还是弱相关) for i in range(len(adregion[:, 1])): a = adregion[i, :] a = np.unique(a) a = a[a != 0] if a.size > 0: x = 1 y = a.shape[0] else: x, y = 0, 1 if y > 1: for j in range(len(a)): for u in range(len(a)): if j != u: # subregion_adj表示子区域的邻接关系,其中数值的大小表示区域之间的相关程度 subregion_adj[int(a[j])-1, int(a[u])-1] += 1 subregion_adj[int(a[u])-1, int(a[j])-1] += 1 # 计算后存到directed_adjacency_matrix里 directed_adjacency_matrix = subregion_adj.copy() # 对于子区域相关程度处于弱相关的邻接关系进行忽略 min_value = np.min(np.max(subregion_adj, axis=0)) - 2 subregion_adj[subregion_adj < min_value] = 0 subregion_adj[subregion_adj > 1] = 1 directed_adjacency_matrix[directed_adjacency_matrix > 1] = 1 np.save('adr.npy', subregion_adj) np.save('dadr.npy', directed_adjacency_matrix) #################################################################################################################### # # 遍历所有公交路线 # # # 公交站点OD检测 # # stationid[:,0]是公交站点ID,np.floor(stationid[:,0]/1000000)将站点ID转换为路线编号 # route = np.unique(np.floor(stationid[:, 0] / 1000000)) # route是公交站点的路段的编号 # ss = [] # # # pattern = np.hstack((pattern, np.zeros((len(pattern), len(route)+1)))) # # pattern第3列是路段编号 # pattern[:,2]=np.arange(1, len(pattern[:,0])+1).transpose() # b = 0 # # 遍历每条公交线路,每个路段记录公交线路 # for i in route: # i就代表一条路线 # # 路线i的全部站点 # vehrecord = stationid[np.floor(stationid[:, 0] / 1000000) == i, :] # # 路线i站点的所属路段 # road_number = np.unique(vehrecord[:, 5]) # # 每个路段记录这条线路 # pattern[road_number.astype(int), b+3] = i # b= b+1 # # # # subregion_bus_line = np.zeros((40, 20000)) # b = 0 # for i in range(0,N): # ap = pattern[pattern[:,1]==i, 3:] # # app = np.count_nonzero(ap) # # subregion_bus_line[i, ap.shape[0]] = app # ap = np.ravel(ap[np.nonzero(ap)]) # # 创建矩阵subregion_bus_line,记录每个子区域的公交线路 # # subregion_bus_line第一行就是聚类编号为1的公交线路 # subregion_bus_line[i, :ap.shape[0]] = ap # # # # 对subregion_bus_line重复值进行去重,只保留重复值的一个 # for i in range(subregion_bus_line.shape[0]): # unique_values, indices = np.unique(subregion_bus_line[i], return_inverse=True) # duplicated_indices = np.where(np.bincount(indices) > 1)[0] # subregion_bus_line[i][np.in1d(indices, duplicated_indices)] = 0 # # ## 使用pandas的DataFrame将数据转换为表格形式 # # df = pd.DataFrame(subregion_bus_line) # # 将数据写入Excel文件 # # df.to_excel('subregion_bus_line.xlsx', index=False) # # # # # # # 创建矩阵subregion_adj_line来对子区域之间的重复公交线路进行计数 # subregion_adj_line = subregion_adj.copy() # # # 找出subregion_adj=1的行和列,记为row和col # row, col = np.where(subregion_adj ==1) # # # # 对subregion_bus_line重复值计数后记录在subregion_adj_line # for i in range(len(row)): # nonzero_values = subregion_bus_line[int(row[i]), :] != 0 # aab = subregion_bus_line[int(row[i]), nonzero_values] # # nonzero_values = subregion_bus_line[int(col[i]), :] != 0 # aac = subregion_bus_line[int(col[i]), nonzero_values] # # aad = len(set(aab) & set(aac)) # subregion_adj_line[int(row[i]),int(col[i])] = aad # subregion_adj_line[int(col[i]),int(row[i])] = aad # # # # # # 将subregion_adj_line中小于最大值的80%的值变为0 # # # 找到最大值 # max_val = np.max(subregion_adj_line) # # 找到需要变为零的值的阈值 # thresh_val = 0.8 * max_val # # 将小于阈值的值变为零 # subregion_adj_line[subregion_adj_line < thresh_val] = 0 # # 找出大于最大值的80%的区域 # row1, col1 = np.where(subregion_adj_line != 0) # # # # # # 将subregion_adj_line>某个值(最大值的80%)的区域合并,合并的方法如下 # # 将row1和col1合并在一起(因为要按子区域出现的频率进行排序) # arr = np.concatenate([row1.reshape(-1, 1), col1.reshape(-1, 1)], axis=1) # # 将arr按第一列出现的元素对每个元素出现的频率进行统计 # sorted_arr = sorted(arr.tolist(), key=lambda x: arr[:, 0].tolist().count(x[0])) # # 频率统计完后按频率大小进行排序 # sorted_arr = [list(i) for i in sorted_arr] # # # # 将subregion_adj_line>某个值(最大值的80%)的区域合并 # for i in range(len(row1)): # # 找到第sorted_arr[i][0]行变成sorted_arr[i][1] # pattern[np.where(pattern[:, 1] == sorted_arr[i][0]),1]=sorted_arr[i][1] # # # # # # 合并后因为有些子区域的编号就断了,比如子区域30合并到6,那子区域编号就从29直接到31了 # # 所以一下进行重新排序后编号 # pattern_sort = pattern[pattern[:, 1].argsort()] # # pattern_sort1 = pattern_sort.copy() # for i in range(len(pattern_sort)): # if i ==0: # pattern_sort[i, 1] = i # else: # if pattern_sort[i,1]==pattern_sort1[i-1,1]: # pattern_sort[i, 1] = pattern_sort[i-1,1] # else: # pattern_sort[i, 1] = pattern_sort[i-1,1]+1 # # #更新完pattern_sort后赋予给yanlinks # for i in range(pattern_sort.shape[0]): # # 找到yanlinks第五列等于pattern_sort第一列的第i个值的行 # matching_rows = np.where(yanlinks[:, 5] == pattern_sort[i, 0])[0] # # # 将pattern_sort第二列第i行的值赋予给yanlinks匹配的行的第十列 # yanlinks[matching_rows, 10] = pattern_sort[i, 1] # yanlinks[yanlinks[:, 10] == 0, 10] = 1 # # np.save('pattern_sort.npy',pattern_sort) # # # #重新构建子区的邻接关系 # # adregion = np.zeros((int(np.max(yanlinks[:, 4])), int(np.max(yanlinks[:, 4])))) # # for i in range(len(yanlinks[:, 0])): # temp1 = np.where(node[:, 0] == node[i, 0])[0] # temp2 = np.where(node[:, 1] == node[i, 0])[0] # temp3 = np.where(node[:, 0] == node[i, 1])[0] # temp4 = np.where(node[:, 1] == node[i, 1])[0] # temp = np.unique(np.intersect1d(np.arange(i + 1, node.shape[0]), np.concatenate((temp1, temp2, temp3, temp4)))) # if len(temp) > 0: # adregion[i, temp] = 1 # adregion[temp, i] = 1 # for i in range(len(yanlinks[:, 1])): # # print(adregion[:, int(yanlinks[i, 4])]) # # print(int(yanlinks[i, 10])) # adregion[:, int(yanlinks[i, 4]) - 1] = adregion[:, int(yanlinks[i, 4]) - 1] * int(yanlinks[i, 10]) # subregion_adj1 = np.zeros((Initial_partitions, Initial_partitions)) # # # 计算adregion中的每个元素出现的频率(判断是强相关还是弱相关) # # for i in range(len(adregion[:, 1])): # a = adregion[i, :] # a = np.unique(a) # a = a[a != 0] # # if a.size > 0: # x = 1 # y = a.shape[0] # else: # x, y = 0, 1 # if y > 1: # for j in range(len(a)): # for u in range(len(a)): # if j != u: # # subregion_adj表示子区域的邻接关系,其中数值的大小表示区域之间的相关程度 # subregion_adj1[int(a[j]) - 1, int(a[u]) - 1] += 1 # subregion_adj1[int(a[u]-1), int(a[j])-1] += 1 # # # # # # 定义一个 40 行,100 列的二维数组,初始值为 0,subregion_v的第一行是子区域1的所有速度 # subregion_v = np.zeros((int(pattern_sort[-1, 1]), 1000)) # # for i in range(0, int(pattern_sort[-1, 1])): # # 找出子区域所有的速度 # subregion_v_one = yanlinks[yanlinks[:, 10] == i, 5] # 获取符合条件的 yanlinks 列表 # # 把这些速度放到subregion_v # subregion_v[i, :subregion_v_one.shape[0]] = subregion_v_one # 将 subregion_v_one 赋值给 subregion_v # # # # 计算后存到directed_adjacency_matrix里 # directed_adjacency_matrix1 = subregion_adj1.copy() # # 对于子区域相关程度处于弱相关的邻接关系进行忽略 # min_value = np.min(np.max(subregion_adj1, axis=0)) - 2 # subregion_adj1[subregion_adj1 < min_value] = 0 # subregion_adj1[subregion_adj1 > 1] = 1 # directed_adjacency_matrix1[directed_adjacency_matrix1 > 1] = 1 # # np.save('dadr.npy', directed_adjacency_matrix1) # # 算subregion的公交车的平均速度 # # subregion_v_bus = np.zeros((N, 1)) # # subregion_v_bus[N-1,0] = np.mean(subregion_v[subregion_v > 0], axis=1) # # # # # subregion_v_bus_mean = np.zeros((int(pattern_sort[-1, 1]), 1)) # 存储平均值的数组 # for i in range(0, int(pattern_sort[-1, 1])): # subregion_v_bus_mean[i, 0] = np.mean(subregion_v[i, subregion_v[i, :] > 0]) # # # subregion_v_bus_mean第二列是分区编号 # new_column = np.arange(1, int(pattern_sort[-1, 1]+1)).reshape(-1, 1) # # subregion_v_bus_mean = np.delete(subregion_v_bus_mean, 0, axis=0) # subregion_v_bus_mean = np.hstack((subregion_v_bus_mean[:, :1], new_column)) # # # 对subregion_v_bus_mean进行排序 # sorted_indices = np.argsort(subregion_v_bus_mean[:, 0]) # subregion_v_bus_mean = subregion_v_bus_mean[sorted_indices] # # 找出subregion的速度最小值 # subregion_v_bus_min = np.min(subregion_v_bus_mean) # # # np.save('yanlinks.npy', yanlinks) # #################################################################################################################### # 组合优化 print('step2 组合优化') subvar = np.zeros((N, 8))# int(pattern_sort[-1, 1]) maxgen = 1 # 进化代数 sizepop =2 # 种群规模 for x in range(0, N):# int(pattern_sort[-1, 1]) a = yanlinks[np.where(yanlinks[:, 10] == x), 5].T l = yanlinks[np.where(yanlinks[:, 10] == x), 4] subvar[x, 0] = np.mean(a) subvar[x, 1] = np.mean(a ** 2) subvar[x, 2] = len(a) b = stationid[np.where(stationid[:, 7] == x), 6][0] b = b[~np.isnan(b)] subvar[x, 3] = np.mean(b) subvar[x, 4] = np.mean(b ** 2) subvar[x, 5] = len(b) subvar[x, 6] = variancediy(a) subvar[x, 7] = variancediy(b) subvar[np.isnan(subvar)] = 0 np.save('subvar.npy', subvar) pcross = 0.8 # 交叉概率 pmutation = 0.8 # 变异概率 lenchrom = [1] * Initial_partitions # 变量字串长度 unassigned_roads = np.tile([[1, final_partitions]], (Initial_partitions, 1)) individuals = {'fitness': np.zeros((sizepop, 1)), 'chrom': []} avgfitness = [] # 种群平均适应度 bestfitness = [] # 种群最佳适应度 bestchrom = None # 种群最佳染色体 for i in range(sizepop): individuals['chrom'].append([Code(lenchrom, unassigned_roads)]) # 给第一个个体随机产生染色体 print('code出来了') repeated_row1= np.array([[0]]) inff = np.repeat([repeated_row1], sizepop, axis=0) inff_np = np.array(inff, dtype=np.int64) liziqunfundey2 = [None] * sizepop liziqunfundey3 = [None] * sizepop for j in range(sizepop): inff_np[j] = liziqunfun(individuals['chrom'][j]) # isnan函数是判断输入是否为NaN(not a number)的函数,而inff是表示正无穷大的特殊值。 # 在这段代码中,如果inff是无限值,则将其值设为1000000 inff[np.isnan(inff_np)] = 1000000 for j in range(sizepop): individuals['fitness'][j] = inff_np[j] # 找最好的染色体 bestfitness = np.min(individuals['fitness']) bestindex = np.argmin(individuals['fitness']) bestchrom = individuals['chrom'][bestindex] avgfitness = np.sum(individuals['fitness']) / sizepop individuals['chrom'] = np.array(individuals['chrom']) # 记录最好适应度和平均适应度 trace = {'avgfitness': [], 'bestfitness': []} historychorm = individuals['chrom'].reshape(sizepop,-1) historyfitness = individuals['fitness'].reshape(-1,1) bighistorychorm = individuals['chrom'] bighistoryfitness = np.hstack([individuals['fitness'].reshape((-1, 1)), np.zeros((sizepop, 1))]) for i in range(1, maxgen + 1): print("Generation:", i) # Selection operation individuals = select(individuals, sizepop) avgfitness = np.sum(individuals['fitness']) / sizepop # Crossover operation individuals['chrom'] = crossnew(pcross, lenchrom, individuals['chrom'], sizepop, unassigned_roads) # Mutation operation individuals['chrom'] = mutationnew(pmutation, lenchrom, individuals['chrom'], sizepop, unassigned_roads) # Calculate fitness # bestchrom = individuals['chrom'][0] # for i in range(len(yanlinks)): # yanlinks[i, 11] = bestchrom[int(yanlinks[i, 10]) - 1] # np.save('yanlinks_yichuan.npy', yanlinks) # df = pd.DataFrame(yanlinks) # df.to_excel('yanlinks_yichuan1524_1.xlsx', index=False, header=False) # # bestchrom = individuals['chrom'][1] # for i in range(len(yanlinks)): # yanlinks[i, 11] = bestchrom[int(yanlinks[i, 10]) - 1] # np.save('yanlinks_yichuan.npy', yanlinks) # df = pd.DataFrame(yanlinks) # df.to_excel('yanlinks_yichuan1524_2.xlsx', index=False, header=False) inff = np.zeros(sizepop) for j in range(sizepop): if len(np.where(np.sum(np.abs(historychorm - individuals['chrom'][j, :]), axis=1) == 0)[0]) < 1: inff[j] = liziqunfun(individuals['chrom'][j, :]) else: a = np.where(np.sum(np.abs(historychorm - individuals['chrom'][j, :]), axis=1) == 0)[0] inff[j] = historyfitness[a[0], 0] inff[np.isnan(inff)] = 1000000 for j in range(sizepop): individuals['fitness'][j] = inff[j] # Find the best and worst fitness chromosomes and their positions in the population newbestfitness, newbestindex = np.min(individuals['fitness']), np.argmin(individuals['fitness']) worstfitness, worstindex = np.max(individuals['fitness']), np.argmax(individuals['fitness']) # Replace the best chromosome from the previous generation if bestfitness > newbestfitness: bestfitness = newbestfitness bestchrom = individuals['chrom'][newbestindex] # Replace the worst chromosome with the best chromosome individuals['chrom'][worstindex] = bestchrom[0] individuals['fitness'][worstindex] = bestfitness avgfitness = np.sum(individuals['fitness']) / sizepop # Add the chromosomes and their fitness values to the history if they are not already in it for h in range(sizepop): if len(np.where(np.sum(np.abs(historychorm - individuals['chrom'][h, :]), axis=1) == 0)[0]) < 1: historychorm = np.vstack((historychorm, individuals['chrom'][h, :])) historyfitness = np.vstack((historyfitness, individuals['fitness'].reshape(-1,1))) trace['avgfitness'].append(avgfitness) trace['bestfitness'].append(bestfitness) print('Step 3 边界调整') if isinstance(bestchrom, list): bestchrom = bestchrom[0] # 根据前面算法得到的最佳染色体 bestchrom,更新 yanlinks 中的第 12 列, # 然后根据 yanlinks 中记录的节点信息,建立邻接矩阵 adregion。 # index 38 is out of bounds for axis 0 with size 38!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! for i in range(len(yanlinks)): yanlinks[i, 11] = bestchrom[int(yanlinks[i, 10])-1] np.save('yanlinks.npy',yanlinks) df = pd.DataFrame(yanlinks) df.to_csv('yanlinks_merge.csv', index=False, header=False) adregion = np.zeros((len(yanlinks), len(yanlinks))) for i in range(len(yanlinks)): temp1 = np.where(node[:, 0] == node[i, 0])[0] temp2 = np.where(node[:, 1] == node[i, 0])[0] temp3 = np.where(node[:, 0] == node[i, 1])[0] temp4 = np.where(node[:, 1] == node[i, 1])[0] temp = np.unique(np.intersect1d(np.arange(i + 1, len(node)), np.concatenate((temp1, temp2, temp3, temp4)))) # 这里temp是个一维数组,所以不能直接访问 # if 5026 in temp: # temp = temp[temp != 5026] if temp.size > 0: adregion[i, temp-1] = 1 adregion[temp-1, i] = 1 # adregionf主要作用是更新 adregion 矩阵,用于记录节点之间的连接关系,通过这样的操作, # 可以保证连接关系的权值与遗传算法的优化结果保持一致。 adregion = adregionf(yanlinks, node) behindfun = 0 solution = xfun(yanlinks,node) frontfun=solution while behindfun < frontfun: solution = xfun(yanlinks,node) frontfun = solution x = frontfun # for i in range(len(yanlinks)): if i ==2: break ar = yanlinks[i, 4]-1 af = yanlinks[i, 11] ad = adregion[int(ar), :] ad[int(ar)] = af a = np.unique(ad) if len(a) > 2: unique_elements, counts = np.unique(ad, return_counts=True) # pandas和numpy不一样 unia = pd.DataFrame({'Value': unique_elements,'Count': counts,}) unia = unia[unia.iloc[:, 0] != 0] unia = unia.values if unia[unia[:, 0] == af, 1] == 2: a = a[a != 0] ti = [] for j in range(len(a)): if a[j] == af: tiao = x else: yanlinks[i, 11] = a[j] solution =xfun(yanlinks,node) tiao = solution ti.append(tiao) x, y = np.min(ti), np.argmin(ti) yanlinks[i, 11] = a[y] adregion = adregionf(yanlinks, node) behindfun = x df = pd.DataFrame(yanlinks) df.to_csv('yanlinks_boundary_adjustment.csv', index=False, header=False)