492 lines
21 KiB
Python
492 lines
21 KiB
Python
# def partition_diversity(partition):
|
||
import numpy as np
|
||
import pandas as pd
|
||
from Code import Code
|
||
from variancediy import variancediy
|
||
from TOPSIS import TOPSIS
|
||
from adregionf import adregionf
|
||
from select import select
|
||
from crossnew import crossnew
|
||
from mutationnew import mutationnew
|
||
from hui_fun import liziqunfun
|
||
from hui_xfun import xfun
|
||
import os
|
||
import time
|
||
from three_dimensional_TOPSIS import three_dimensional_TOPSIS
|
||
from boundary_adjustment_fitness import boundary_fitness
|
||
from boundary_adjustment import boundary_adjustment_fitness
|
||
import concurrent.futures
|
||
import multiprocessing
|
||
from hui_fun import liziqunfun
|
||
import pickle
|
||
from luanlai import multi_huifun
|
||
from sklearn.cluster import KMeans
|
||
|
||
Initial_partitions=60
|
||
for final_partitions in range(6,18):
|
||
|
||
print(f'final_partitions is {final_partitions}')
|
||
# bus_info = pd.DataFrame()
|
||
# file_range = range(211, 236)
|
||
# file_directory = "E:\\分方向MFD\\数据\\处理后的数据\\公交匹配到路网\\08_pcross"
|
||
# for file_number in file_range:
|
||
# file_name = f"20190908_{file_number:03d}.csv" # 根据文件名模式构造文件名
|
||
# file_path = os.path.join(file_directory, file_name) # 构造完整的文件路径
|
||
# temp_dataframe = pd.read_csv(file_path) # 使用 pandas 读取 CSV 文件
|
||
# bus_info = pd.concat([bus_info, temp_dataframe], ignore_index=True) # 将 temp_dataframe 追加到 merged_dataframe 中
|
||
# links = pd.read_csv('links_niu.csv')
|
||
# # 计算第16列相同值的第7列的平均值
|
||
# average_values = bus_info.groupby(bus_info.columns[9])[bus_info.columns[5]].mean()
|
||
# # 在links_osm的第六列找到average_values,将平均值放在该值所在行第五列的位置
|
||
# for index, value in average_values.items():
|
||
# links.loc[links[links.columns[5]] == index, links.columns[4]] = value
|
||
# # links = links[links[links.columns[5]].isin(average_values.index)]
|
||
# # 将结果保存到新的CSV文件
|
||
# links.to_csv('links_processed.csv', index=False)
|
||
links = pd.read_csv('links_processed.csv')
|
||
|
||
|
||
# links = pd.read_csv('links.csv')
|
||
folder_name = f"{Initial_partitions}_to_{final_partitions}"
|
||
OUTPUT_DIR = os.path.join('output', folder_name)
|
||
|
||
# 如果文件夹不存在,则创建
|
||
if not os.path.exists(OUTPUT_DIR):
|
||
os.makedirs(OUTPUT_DIR)
|
||
# links = pd.read_csv('links_1.csv')
|
||
links = links.to_numpy()
|
||
|
||
# for chuu in range(1, 17):
|
||
# for dic in range(1):
|
||
# tic()
|
||
# chu = 10
|
||
# zhong = 2
|
||
|
||
|
||
# 给道路起点和终点标注序列,eg从1到500,
|
||
# 因为一个路口可以是好几个道路的起点或终点,所以同一路口就会有同样的标记
|
||
node = np.concatenate((links[:, :2], links[:, 2:4]), axis=0) # np.concatenate 函数会将这两个子数组沿着轴 0 连接起来;
|
||
# axis 是指在数组操作时沿着哪个轴进行操作。当axis=0时,表示在第一个维度上进行拼接操作。这里就是纵轴
|
||
|
||
# 这里是给道路起点和终点标注序列,也就是路口表注序列,因为一个路口可以是好几个道路的起点或终点,所以同一路口就会有同样的标记
|
||
noi = 1
|
||
node = np.hstack((node, np.zeros((len(node), 1))))
|
||
|
||
for i in range(node.shape[0]): # node.shape[0] 是指 node 数组的第一维大小,即 node 数组的行数
|
||
|
||
# node[:i, 0] 表示从 node 数组的第一行到第 i-1 行的所有行的第一列构成的数组
|
||
# np.where() 函数返回一个包含下标的元组,后面的[0]就代表返回第一个元素的下标
|
||
a = np.where(node[:i, 0] == node[i, 0])[0]
|
||
b = np.where(node[:i, 1] == node[i, 1])[0]
|
||
c = np.intersect1d(a, b) # intersect1d 返回两个数组的交集
|
||
if c.size > 0:
|
||
x = c.shape[0]
|
||
y = 1
|
||
else:
|
||
x, y = 0, 1
|
||
# 在 node 数组的最后添加一列全为0的列,并将添加后的新数组重新赋值给 node
|
||
if x > 0 and y > 0:
|
||
node[i, 2] = node[min(c), 2] # 如果c是矩阵,则min(A)是包含每一列的最小值的行向量
|
||
else:
|
||
node[i, 2] = noi
|
||
noi += 1
|
||
node = np.concatenate((node[:int(len(node) / 2), 2].reshape(-1, 1), node[int(len(node) / 2):, 2].reshape(-1, 1)),
|
||
axis=1)
|
||
|
||
np.save('node.npy', node)
|
||
|
||
|
||
# 这里的links多加了一行才能yanlinks,但这样yanlinks就不对了
|
||
links = np.hstack((links, np.zeros((len(links), 1))))
|
||
links = np.hstack((links, np.zeros((len(links), 1))))
|
||
links = np.hstack((links, np.zeros((len(links), 1))))
|
||
yanlinks = np.concatenate((node, links[:, [5, 6, 7, 4, 0, 1, 2, 3]], np.zeros((len(links), 4))), axis=1)
|
||
yanlinks[:, 4] = np.arange(1, len(yanlinks) + 1)
|
||
|
||
road = np.arange(1, node.shape[0] + 1)
|
||
adjacency = np.zeros((len(road), len(road)))
|
||
|
||
# 初始化分区
|
||
|
||
for i in range(len(road)):
|
||
temp1 = np.where(node[:, 0] == node[i, 0])[0] # 找出第一列每个数字在第一列出现的位置
|
||
temp2 = np.where(node[:, 1] == node[i, 0])[0] # 找出第一列每个数字在第二列出现的位置
|
||
temp3 = np.where(node[:, 0] == node[i, 1])[0] # 找出第二列每个数字在第一列出现的位置
|
||
temp4 = np.where(node[:, 1] == node[i, 1])[0] # 找出第二列每个数字在第二列出现的位置
|
||
temp = np.unique(np.intersect1d(np.arange(i + 1, node.shape[0]), np.concatenate((temp1, temp2, temp3, temp4))))
|
||
if len(temp) > 0:
|
||
adjacency[i, temp] = 1
|
||
adjacency[temp, i] = 1
|
||
row_sums = np.sum(adjacency, axis=1)
|
||
|
||
# 找到全零行的索引
|
||
zero_row_indices = np.where(row_sums == 0)[0]
|
||
|
||
N = Initial_partitions # 设置聚类数目
|
||
|
||
# 利用 K-Means 算法对 yanlinks 矩阵的第 7 列和第 8 列(即经度和纬度)进行聚类,
|
||
# 将样本分成 N 类,idx是一个N x 2的矩阵,其中N是聚类数目。
|
||
# idx的每一行就是一个聚类中心,其中第一列是该中心的经度,第二列是该中心的纬度。
|
||
# 在计算每个点到聚类中心的距离时,就需要用到idx的值。
|
||
Cluster_Label, idx = KMeans(n_clusters=N, n_init=10).fit(yanlinks[:, [6, 7]]).labels_, KMeans(n_clusters=N, n_init=10).fit(
|
||
yanlinks[:, [6, 7]]).cluster_centers_
|
||
# df = pd.read_csv('idx.csv',header=None)
|
||
# idx = df.to_numpy()
|
||
# 计算每个点到聚类中心的距离
|
||
dis = 111000 * np.sqrt(
|
||
(yanlinks[:, 6] - idx[:, 0].reshape(N, 1)) ** 2 + (yanlinks[:, 7] - idx[:, 1].reshape(N, 1)) ** 2)
|
||
|
||
# 找到每个点最近的聚类中心,mm是最小值,nn是最小值在向量的索引
|
||
mm, nn = np.min(dis, axis=1, keepdims=True), np.argmin(dis, axis=1)
|
||
|
||
data = links[:, 4] # links第五行是路的长度
|
||
if data.size > 0:
|
||
m = data.shape[0]
|
||
n = 1
|
||
else:
|
||
m, n = 0, 1
|
||
|
||
pattern = np.zeros((m, n)) # zeros(m,n+1)返回由零组成的m×(n+1)数组
|
||
pattern[:, 0] = data # 前n列为data中的数据
|
||
pattern = np.hstack((pattern, np.zeros((len(pattern), 1))))
|
||
pattern[:, 1] = -1
|
||
center = np.zeros((N, n)) # 初始化聚类中心
|
||
pattern[:, :n] = data.reshape(-1, n)
|
||
center = np.hstack((center, np.zeros((len(center), 1))))
|
||
# 初始化聚类中心
|
||
for x in range(0, N):
|
||
|
||
center[x, 1] = nn[x]
|
||
center[x, 0] = data[int(center[x, 1])]
|
||
pattern[int(center[x, 1]), 1] = x
|
||
|
||
# 初始化距离和计数
|
||
distance = np.zeros(N)
|
||
num = np.zeros(N)
|
||
|
||
# 初始化新的聚类中心
|
||
new_center = np.zeros((N, n))
|
||
|
||
unassigned_links = 2
|
||
# while unassigned_links > 0:
|
||
# print(unassigned_links)
|
||
#
|
||
#
|
||
#
|
||
# for x in range(0, N): # x表示当前聚类的编号
|
||
# try:
|
||
#
|
||
# selected_links = adjacency[pattern[:, 1] == x, :]
|
||
# unassigned_roads = np.where(np.sum(selected_links, axis=0) > 0)[0]
|
||
# selected_links = np.where(pattern[:, 1] > -1)[0]
|
||
# unassigned_roads = np.setdiff1d(unassigned_roads, selected_links) # bound 是一个向量,表示与聚类 x 相关的未被分配到聚类中的道路的编号。
|
||
# selected_links = np.where(pattern[:, 1] == x)[0] # 这里的yisou表示已经被分配到的道路编号
|
||
# bus = []
|
||
#
|
||
# road_evaluation = np.zeros((len(unassigned_roads), 2))
|
||
# for unassigned_road_index in range(len(unassigned_roads)):
|
||
#
|
||
# selected_links_lengths_float = (pattern[selected_links, 0]).tolist()
|
||
# unassigned_road_length_array = (pattern[unassigned_roads[unassigned_road_index], 0])
|
||
# unassigned_road_length_array = [unassigned_road_length_array]
|
||
# abrr = selected_links_lengths_float + unassigned_road_length_array
|
||
# road_evaluation[unassigned_road_index, 0] = np.var(abrr, ddof=1)
|
||
# aas = yanlinks[yanlinks[:, 4] == unassigned_roads[unassigned_road_index] + 1, 6:8]
|
||
# road_evaluation[unassigned_road_index, 1] = 111000 * np.sqrt(np.sum(
|
||
# (yanlinks[yanlinks[:, 4] == unassigned_roads[unassigned_road_index] + 1, 6:8] - idx[x, :]) ** 2))
|
||
#
|
||
#
|
||
# if road_evaluation.shape[0] > 1:
|
||
# m, n = TOPSIS(road_evaluation) # bestxuhao最优方案的序号,bestgoal最优得分
|
||
# else:
|
||
# n = 0
|
||
#
|
||
# # pattern[unassigned_roads[n - 1], 1] = x
|
||
# pattern[unassigned_roads[n], 1] = x
|
||
# except:
|
||
# continue
|
||
# unassigned_links = np.sum(pattern[:, 1] == -1)
|
||
# # 因为我的pattern是从0到39的编号,所以要变成1到40
|
||
# pattern[:, 1] = pattern[:, 1] + 1
|
||
#
|
||
# np.save('pattern.npy', pattern)
|
||
|
||
filename = f'pattern{Initial_partitions}.npy'
|
||
pattern = np.load(filename)
|
||
np.save(os.path.join(OUTPUT_DIR, 'pattern.npy'), pattern)
|
||
pattern_path = os.path.join(OUTPUT_DIR, 'pattern.npy')
|
||
pattern = np.load(pattern_path)
|
||
yanlinks[:, 3] = links[:, 9]
|
||
yanlinks[:, 10] = pattern[:, 1]
|
||
|
||
import numpy as np
|
||
|
||
# 假设yanlinks是你的NumPy数组
|
||
|
||
# 找到第11列的唯一值和对应的索引
|
||
unique_values, unique_indices = np.unique(yanlinks[:, 10], return_index=True)
|
||
|
||
# 初始化一个空数组来存储结果
|
||
result_array = np.zeros((len(unique_values), 3))
|
||
|
||
# 遍历每个唯一值
|
||
for i, value in enumerate(unique_values):
|
||
# 找到所有第11列等于当前唯一值的行的索引
|
||
rows_indices = np.where(yanlinks[:, 10] == value)[0]
|
||
|
||
# 获取这些行的第七列和第八列的值
|
||
column_seven_values = yanlinks[rows_indices, 6]
|
||
column_eight_values = yanlinks[rows_indices, 7]
|
||
|
||
# 计算第七列和第八列的平均值
|
||
column_seven_mean = np.mean(column_seven_values)
|
||
column_eight_mean = np.mean(column_eight_values)
|
||
|
||
# 将结果存储到新数组中
|
||
result_array[i, 0] = value
|
||
result_array[i, 1] = column_seven_mean
|
||
result_array[i, 2] = column_eight_mean
|
||
|
||
data_path = r''
|
||
df2 = pd.read_csv(data_path + 'links_processed.csv')
|
||
zero_rows = yanlinks[:, 10] == 0
|
||
# 获取已删除行的索引
|
||
deleted_rows_indices = np.where(zero_rows)[0]
|
||
|
||
# 从 links 中删除 deleted_rows_indices 中指定的行
|
||
df2 = df2.drop(deleted_rows_indices, errors='ignore')
|
||
|
||
df2.to_csv(data_path + 'links_test1.csv', index=False)
|
||
|
||
yanlinks = yanlinks[yanlinks[:, 10] != 0]
|
||
yanlinks = yanlinks[yanlinks[:, 10] != -1, :]
|
||
|
||
road = np.unique(np.concatenate((yanlinks[:, 1], yanlinks[:, 0]), axis=0))
|
||
|
||
adjacency = np.zeros((len(road), len(road)))
|
||
adregion = np.zeros((int(np.max(yanlinks[:, 4])), int(np.max(yanlinks[:, 4]))))
|
||
|
||
for i in range(len(yanlinks[:, 0])):
|
||
temp1 = np.where(node[:, 0] == node[i, 0])[0]
|
||
temp2 = np.where(node[:, 1] == node[i, 0])[0]
|
||
temp3 = np.where(node[:, 0] == node[i, 1])[0]
|
||
temp4 = np.where(node[:, 1] == node[i, 1])[0]
|
||
temp = np.unique(np.intersect1d(np.arange(i + 1, node.shape[0]), np.concatenate((temp1, temp2, temp3, temp4))))
|
||
if len(temp) > 0:
|
||
adregion[i, temp] = 1
|
||
adregion[temp, i] = 1
|
||
# adregion矩阵表示路段之间的邻接关系
|
||
np.save('adregion.npy', adregion)
|
||
# 给adregion矩阵乘上权重(道路的分组编号)
|
||
for i in range(len(yanlinks[:, 1])):
|
||
# print(adregion[:, int(yanlinks[i, 4])])
|
||
# print(int(yanlinks[i, 10]))
|
||
adregion[:, int(yanlinks[i, 4]) - 1] = adregion[:, int(yanlinks[i, 4]) - 1] * int(yanlinks[i, 10])
|
||
|
||
|
||
|
||
|
||
|
||
|
||
subregion_adj = np.zeros((Initial_partitions, Initial_partitions))
|
||
|
||
# 计算adregion中的每个元素出现的频率(判断是强相关还是弱相关)
|
||
|
||
for i in range(len(adregion[:, 1])):
|
||
|
||
a = adregion[i, :]
|
||
a = np.unique(a)
|
||
a = a[a != 0]
|
||
|
||
if a.size > 0:
|
||
x = 1
|
||
y = a.shape[0]
|
||
else:
|
||
x, y = 0, 1
|
||
if y > 1:
|
||
for j in range(len(a)):
|
||
for u in range(len(a)):
|
||
if j != u:
|
||
# subregion_adj表示子区域的邻接关系,其中数值的大小表示区域之间的相关程度
|
||
subregion_adj[int(a[j])-1, int(a[u])-1] += 1
|
||
subregion_adj[int(a[u])-1, int(a[j])-1] += 1
|
||
|
||
|
||
|
||
|
||
# 计算后存到directed_adjacency_matrix里
|
||
directed_adjacency_matrix = subregion_adj.copy()
|
||
# 对于子区域相关程度处于弱相关的邻接关系进行忽略
|
||
min_value = np.min(np.max(subregion_adj, axis=0)) - 2
|
||
subregion_adj[subregion_adj < min_value] = 0
|
||
subregion_adj[subregion_adj > 1] = 1
|
||
directed_adjacency_matrix[directed_adjacency_matrix > 1] = 1
|
||
|
||
|
||
np.save('adr.npy', subregion_adj)
|
||
pd.DataFrame(subregion_adj).to_csv(os.path.join(OUTPUT_DIR, 'subregion_adj.csv'), index=False, header=False)
|
||
np.save('dadr.npy', directed_adjacency_matrix)
|
||
np.save('yanlinks.npy', yanlinks)
|
||
|
||
|
||
####################################################################################################################
|
||
|
||
start = time.time()
|
||
# 组合优化
|
||
print('step2 combinatorial optimization')
|
||
maxgen = 200 # 进化代数
|
||
sizepop = 100 # 种群规模
|
||
|
||
|
||
|
||
pcross = 0.8 # 交叉概率
|
||
pmutation = 0.8 # 变异概率
|
||
lenchrom = [1] * Initial_partitions # 变量字串长度
|
||
unassigned_roads = np.tile([[1, final_partitions]], (Initial_partitions, 1))
|
||
|
||
|
||
individuals = {'fitness': np.zeros((sizepop, 1)), 'chrom': []}
|
||
|
||
bestfitness = [] # 种群最佳适应度
|
||
bestchrom = None # 种群最佳染色体
|
||
lixiangjie = {}
|
||
every_generation_best_chromosome_san={}
|
||
every_generation_best_chromosome={}
|
||
for i in range(sizepop):
|
||
individuals['chrom'].append([Code(lenchrom, unassigned_roads,final_partitions)]) # 给第一个个体随机产生染色体
|
||
print('code ended')
|
||
repeated_row1= np.array([[0]])
|
||
inff = np.repeat([repeated_row1], sizepop, axis=0)
|
||
inff_np = np.array(inff, dtype=np.int64)
|
||
# with open('individuals_chrom.pkl', 'wb') as file:
|
||
# pickle.dump(individuals['chrom'], file)
|
||
|
||
road_velocity = yanlinks[:, [4, 5]]
|
||
# if __name__ == "__main__":
|
||
dadr = np.load('dadr.npy')
|
||
bus_line_sequence = pd.read_csv('bus_road_sequence.csv', header=None, encoding='gbk')
|
||
bus_line_sequence = bus_line_sequence.drop(bus_line_sequence.columns[0], axis=1)
|
||
bus_line_sequence = bus_line_sequence.fillna(0)
|
||
bus_line_sequence = bus_line_sequence.astype(int)
|
||
bus_line_sequence = bus_line_sequence.values
|
||
|
||
chromosome_results = np.array(multi_huifun(individuals['chrom'],yanlinks,road_velocity,dadr,bus_line_sequence,result_array))
|
||
|
||
# 读取
|
||
# with open('individuals_chrom.pkl', 'rb') as file:
|
||
# individuals['chrom'] = pickle.load(file)
|
||
# chromosome_results = np.zeros((sizepop, 3))
|
||
# for j in range(sizepop):
|
||
# chromosome_results[j,:] = liziqunfun(individuals['chrom'][j],yanlinks,road_velocity,dadr,bus_line_sequence)
|
||
inff_np = three_dimensional_TOPSIS(chromosome_results,None)
|
||
|
||
inff_np[inff_np == 0] = 0.1
|
||
print('The fitness calculation for Code is complete')
|
||
# isnan函数是判断输入是否为NaN(not a number)的函数,而inff是表示正无穷大的特殊值。
|
||
# 在这段代码中,如果inff是无限值,则将其值设为1000000
|
||
inff[np.isnan(inff_np)] = 1000000
|
||
|
||
|
||
for j in range(sizepop):
|
||
individuals['fitness'][j] = inff_np[j]
|
||
|
||
|
||
# 找最好的染色体
|
||
|
||
lixiangjie[0] = np.amin(chromosome_results, axis=0)
|
||
every_generation_best_chromosome[0] = individuals['chrom'][np.argmin(individuals['fitness'])][0]
|
||
every_generation_best_chromosome_san[0] = chromosome_results[np.argmin(individuals['fitness'])]
|
||
individuals['chrom'] = np.array(individuals['chrom'])#select,crossnew,mutationnew有用到
|
||
# 记录最好适应度和平均适应度
|
||
trace = {'avgfitness': [], 'bestfitness': []}
|
||
|
||
|
||
chromosome_results1 = np.zeros((sizepop, 3))
|
||
for i in range(1, maxgen + 1):
|
||
print(i)
|
||
|
||
individuals = select(individuals, sizepop)
|
||
individuals['chrom'] = crossnew(pcross, lenchrom, individuals['chrom'], sizepop, unassigned_roads,final_partitions)
|
||
individuals['chrom'] = mutationnew(pmutation, lenchrom, individuals['chrom'], sizepop, unassigned_roads,final_partitions)
|
||
|
||
inff = np.zeros(sizepop)
|
||
# for j in range(sizepop):
|
||
# chromosome_results1[j, :] = liziqunfun(individuals['chrom'][j, :])
|
||
chromosome_results1 = np.array(multi_huifun(individuals['chrom'],yanlinks,road_velocity,dadr,bus_line_sequence,result_array))
|
||
|
||
inff = three_dimensional_TOPSIS(chromosome_results1,lixiangjie[i-1])
|
||
|
||
inff[inff == 0] = 0.1
|
||
inff[np.isnan(inff)] = 1000000
|
||
|
||
result = np.vstack((chromosome_results, chromosome_results1))
|
||
chromosome_results = np.amin(result, axis=0)
|
||
|
||
for j in range(sizepop):
|
||
individuals['fitness'][j] = inff[j]
|
||
|
||
newbestindex = np.argmin(individuals['fitness'])
|
||
worstindex = np.argmax(individuals['fitness'])
|
||
|
||
# Replace the worst chromosome with the best chromosome
|
||
individuals['chrom'][worstindex] = individuals['chrom'][newbestindex]
|
||
individuals['fitness'][worstindex] = individuals['fitness'][newbestindex]
|
||
|
||
lixiangjie[i] = chromosome_results
|
||
every_generation_best_chromosome[i] = individuals['chrom'][newbestindex]
|
||
every_generation_best_chromosome_san[i] = chromosome_results1[newbestindex]
|
||
|
||
all_generation_bestchrom_san = three_dimensional_TOPSIS(np.array(list(every_generation_best_chromosome_san.values())),lixiangjie[maxgen])
|
||
min_index = np.argmin(all_generation_bestchrom_san)
|
||
|
||
fitness = np.minimum.accumulate(all_generation_bestchrom_san)
|
||
values = np.array(list(every_generation_best_chromosome_san.values()))
|
||
|
||
# 追踪当前的最小值
|
||
current_min = float('inf')
|
||
corresponding_values_list = []
|
||
|
||
for idx, val in enumerate(all_generation_bestchrom_san):
|
||
if val < current_min:
|
||
current_min = val
|
||
min_index = np.argmin(all_generation_bestchrom_san[:idx + 1])
|
||
corresponding_values_list.append(values[min_index])
|
||
else:
|
||
corresponding_values_list.append(corresponding_values_list[-1])
|
||
|
||
corresponding_values = np.array(corresponding_values_list)
|
||
|
||
bestchrom = list(every_generation_best_chromosome.values())[min_index]
|
||
pd.DataFrame({'Fitness': fitness}).to_excel(os.path.join(OUTPUT_DIR, 'all_generation_bestchrom_fitness.xlsx'), index=True)
|
||
|
||
df = pd.DataFrame.from_dict(lixiangjie, orient='index', columns=['var', 'cut', 'linknumber'])
|
||
df_original_values = pd.DataFrame(corresponding_values, columns=['var', 'cut', 'linknumber'])
|
||
merged_df = pd.concat([df, df_original_values], axis=1)
|
||
# 将合并后的DataFrame保存为xlsx文件
|
||
merged_df.to_excel(os.path.join(OUTPUT_DIR, 'lixiangjie_and_best_chromosome.xlsx'), index=False)
|
||
|
||
|
||
df2 = pd.DataFrame.from_dict(every_generation_best_chromosome, orient='index')
|
||
df2.to_excel(os.path.join(OUTPUT_DIR, 'every_generation_best_chromosome.xlsx'), index=False)
|
||
end = time.time()
|
||
|
||
|
||
start1 = time.time()
|
||
# print('Step 3 boundary adjustment')
|
||
|
||
for i in range(len(yanlinks)):
|
||
yanlinks[i, 11] = bestchrom[int(yanlinks[i, 10])-1]
|
||
|
||
|
||
# yanlinks = boundary_adjustment_fitness(yanlinks,bestchrom)
|
||
df = pd.DataFrame(yanlinks)
|
||
df1 = pd.DataFrame(bestchrom)
|
||
pd.DataFrame(yanlinks).to_csv(os.path.join(OUTPUT_DIR, 'yanlinks_boundary_adjustment.csv'), index=False, header=False)
|
||
pd.DataFrame(bestchrom).to_csv(os.path.join(OUTPUT_DIR, "bestchrom.csv"), index=False, header=False)
|
||
|
||
end1 = time.time()
|
||
|
||
# print('Partition Merge:', end - start, 'seconds')
|
||
# print('Boundary Adjustment:', end1 - start1, 'seconds')
|
||
|