Allenfenqu/boundary_adjustment.py

191 lines
7.8 KiB
Python
Raw Permalink Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

import numpy as np
from bus_line_match import pipei
import pandas as pd
import networkx as nx
from objective_func import objective_func
from convert_to_partition import convert_to_partition
from boundary_adjustment_fitness import boundary_fitness
from adregionf import adregionf
def boundary_adjustment_fitness(yanlinks,x):
unique_values, unique_indices = np.unique(yanlinks[:, 11], return_index=True)
Tvb = 0
velocity= {}
for i in unique_values:
selected_values = yanlinks[yanlinks[:, 11] == i][:, 5]
velocity_only = np.average(selected_values)
velocity[i]= velocity_only
min_velocity_key = min(velocity, key=velocity.get)
data_path = r''
df2 = pd.read_csv(data_path + 'links_test1.csv')
df2['L'] = yanlinks[:, 11]
df2.to_csv(data_path + 'links_test1.csv', index=False)
node = np.load('node.npy')
adregion = np.zeros((len(yanlinks), len(yanlinks)))
for i in range(len(yanlinks)):
temp1 = np.where(node[:, 0] == node[i, 0])[0]
temp2 = np.where(node[:, 1] == node[i, 0])[0]
temp3 = np.where(node[:, 0] == node[i, 1])[0]
temp4 = np.where(node[:, 1] == node[i, 1])[0]
temp = np.unique(np.intersect1d(np.arange(i + 1, len(node)), np.concatenate((temp1, temp2, temp3, temp4))))
# 这里temp是个一维数组所以不能直接访问
# if 5026 in temp:
# temp = temp[temp != 5026]
if temp.size > 0:
adregion[i, temp - 1] = 1
adregion[temp - 1, i] = 1
# adregionf主要作用是更新 adregion 矩阵,用于记录节点之间的连接关系,通过这样的操作,
# 可以保证连接关系的权值与遗传算法的优化结果保持一致。
adregion = adregionf(yanlinks, node)
pipei()
bus_line_partition = pd.read_csv('bus_line_partition.csv', header=None, encoding='gbk')
bus_line_partition = bus_line_partition.drop(bus_line_partition.columns[0], axis=1)
bus_line_partition = bus_line_partition .fillna(0)
bus_line_partition = bus_line_partition.astype(int)
bus_line_partition = bus_line_partition.values
bus_line_sequence = pd.read_csv('bus_road_sequence.csv', header=None, encoding='gbk')
bus_line_sequence = bus_line_sequence.drop(bus_line_sequence.columns[0], axis=1)
bus_line_sequence = bus_line_sequence .fillna(0)
bus_line_sequence = bus_line_sequence.astype(int)
bus_line_sequence = bus_line_sequence.values
road_velocity =yanlinks[:, [4, 5]]
partition = convert_to_partition(yanlinks)
G = nx.DiGraph()
for row in yanlinks:
G.add_edge(row[0], row[1], weight=1) # weight=row[3]
G = G.to_undirected()
dadr = np.load('dadr.npy')
# 删除全为零的行
dadr = dadr[~np.all(dadr == 0, axis=1)]
# 删除全为零的列
dadr = dadr[:, ~np.all(dadr == 0, axis=0)]
adr = dadr
# 这里的dadr矩阵没有重置过的所以是有问题的
badr = np.zeros((len(np.unique(x)), len(np.unique(x))))
# if len(np.unique(x)) != 8:
# print('1')
# pass
for i in range(len(adr)):
for j in range(len(adr)):
if adr[i, j] != 0:
if x[i] != x[j]:
badr[x[i]-1, x[j]-1] = 1
badr[x[j]-1, x[i]-1] = 1
wu = np.where(badr[:, int(min_velocity_key) - 1] == 1)[0]
bus_line_partition_id = []
route_sequence = []
for index, row in enumerate(bus_line_partition):
if int(min_velocity_key)in row:
bus_line_partition_id.append(index)
route_sequence.append(bus_line_sequence[index, :])
route_sequence = np.array(route_sequence)
# 初始化一个与 route_velocity 形状相同的新数组
bus_line_route_velocity = np.zeros(route_sequence.shape, dtype=float)
# 遍历 route_velocity 中的每个值
road_velocity_dict = {row[0]: row[1] for row in road_velocity}
for ii, row in enumerate(route_sequence):
for j, value in enumerate(row):
# 直接在字典中查找 value
bus_line_route_velocity[ii, j] = road_velocity_dict.get(value, 0)
bus_line_partition_id = bus_line_partition[bus_line_partition_id]
unique_matching_rows=[]
for index, row in enumerate(bus_line_partition_id):
unique_row = np.array([x for i, x in enumerate(row) if x not in row[:i]])
unique_matching_rows.append(unique_row)
bus_region_direction = [np.array([x for x in array if x in wu]) for array in unique_matching_rows]
max_length = max(len(item) for item in bus_region_direction)
# 使用0填充长度不足的部分并将数组转换为二维数组
bus_region_direction = np.array([np.pad(item, (0, max_length - len(item)), 'constant', constant_values=0) for item in bus_region_direction])
sorted_bus_region_direction = np.sort(bus_region_direction, axis=1)
unique_sorted_bus_region_direction = np.unique(sorted_bus_region_direction, axis=0)
same_direction_fitness = {}
behindfun = 0
solution = boundary_fitness(yanlinks)
frontfun = solution
noi = 1
while behindfun < frontfun:
frontfun = boundary_fitness(yanlinks)
x1 = frontfun
print(f"boundary adjustment {noi} times")
noi+=1
one_direction_velocity_mean={}
# 初始化一个空字典来存储non_zero_values
non_zero_values_dict = {}
for aa, row in enumerate(unique_sorted_bus_region_direction):
matching_row_indices = np.where(np.all(sorted_bus_region_direction == row, axis=1))[0]
same_direction_bus_line_route_velocity = bus_line_route_velocity[matching_row_indices]
same_direction_bus_line_partition_id = bus_line_partition_id[matching_row_indices]
line_proportion = same_direction_bus_line_route_velocity.shape[0] / bus_line_route_velocity.shape[0]
matching_indices = np.where(same_direction_bus_line_partition_id == int(min_velocity_key))
matching_values = same_direction_bus_line_route_velocity[matching_indices]
non_zero_values = matching_values[matching_values != 0]
one_direction_velocity_mean[aa] = np.mean(non_zero_values)
# 将non_zero_values存储到字典中键为aa
non_zero_values_dict[aa] = non_zero_values
min_key = min(one_direction_velocity_mean, key=one_direction_velocity_mean.get)
# 在循环结束后通过min_key获取其对应的non_zero_values
min_key_non_zero_values = non_zero_values_dict[min_key]
matching_indices = np.where(np.isin(road_velocity[:, 1], min_key_non_zero_values))
# 使用这些索引从 road_velocity 的第一列获取相应的值
matching_values = road_velocity[matching_indices, 0]
matching_values_1d = np.squeeze(matching_values)
for i in matching_values_1d:
i = int(i)-1
ar = yanlinks[i, 4] - 1
af = yanlinks[i, 11]
ad = adregion[int(ar), :]
ad[int(ar)] = af
a = np.unique(ad)
if len(a) > 2:
unique_elements, counts = np.unique(ad, return_counts=True)
# pandas和numpy不一样
unia = pd.DataFrame({'Value': unique_elements, 'Count': counts, })
unia = unia[unia.iloc[:, 0] != 0]
unia = unia.values
if unia[unia[:, 0] == af, 1] == 2:
a = a[a != 0]
ti = []
for j in range(len(a)):
if a[j] == af:
tiao = x1
else:
yanlinks[i, 11] = a[j]
solution = boundary_fitness(yanlinks)
tiao = solution
ti.append(tiao)
x1, y = np.min(ti), np.argmin(ti)
yanlinks[i, 11] = a[y]
adregion = adregionf(yanlinks, node)
behindfun = x1
return yanlinks