192 lines
8.4 KiB
Python
192 lines
8.4 KiB
Python
|
import numpy as np
|
|||
|
import pandas as pd
|
|||
|
import sklearn
|
|||
|
import Code_bus_correlation
|
|||
|
# import adregionf
|
|||
|
import fun
|
|||
|
import xfun
|
|||
|
import mutationnew
|
|||
|
import test
|
|||
|
import TOPSIS666
|
|||
|
import crossnew
|
|||
|
import traceback
|
|||
|
import statistics
|
|||
|
import variancediy
|
|||
|
|
|||
|
zhong = 8
|
|||
|
for zhong in [zhong]:
|
|||
|
for chu in [zhong * 5]:
|
|||
|
pass
|
|||
|
|
|||
|
df = pd.read_excel('links.xls')
|
|||
|
links = df.to_numpy()
|
|||
|
|
|||
|
df = pd.read_excel('stationid.xls')
|
|||
|
stationid = df.to_numpy()
|
|||
|
|
|||
|
df = pd.read_excel('stationidwan.xls')
|
|||
|
stationid = df.to_numpy()
|
|||
|
# for chuu in range(1, 17):
|
|||
|
# for dic in range(1):
|
|||
|
# tic()
|
|||
|
# chu = 10
|
|||
|
# zhong = 2
|
|||
|
|
|||
|
|
|||
|
# 给道路起点和终点标注序列,eg从1到500,
|
|||
|
# 因为一个路口可以是好几个道路的起点或终点,所以同一路口就会有同样的标记
|
|||
|
node = np.concatenate((links[:, :2], links[:, 2:4]), axis=0) # np.concatenate 函数会将这两个子数组沿着轴 0 连接起来;
|
|||
|
# axis 是指在数组操作时沿着哪个轴进行操作。当axis=0时,表示在第一个维度上进行拼接操作。这里就是纵轴
|
|||
|
|
|||
|
noi = 1
|
|||
|
node = np.hstack((node, np.zeros((len(node), 1))))
|
|||
|
print(node.shape[0])
|
|||
|
for i in range(node.shape[0]): # node.shape[0] 是指 node 数组的第一维大小,即 node 数组的行数
|
|||
|
print(i)
|
|||
|
# node[:i, 0] 表示从 node 数组的第一行到第 i-1 行的所有行的第一列构成的数组
|
|||
|
# np.where() 函数返回一个包含下标的元组,后面的[0]就代表返回第一个元素的下标
|
|||
|
a = np.where(node[:i, 0] == node[i, 0])[0]
|
|||
|
b = np.where(node[:i, 1] == node[i, 1])[0]
|
|||
|
c = np.intersect1d(a, b) # intersect1d 返回两个数组的交集
|
|||
|
if c.size > 0:
|
|||
|
x = c.shape[0]
|
|||
|
y = 1
|
|||
|
else:
|
|||
|
x, y = 0, 1
|
|||
|
# 在 node 数组的最后添加一列全为0的列,并将添加后的新数组重新赋值给 node
|
|||
|
if x > 0 and y > 0:
|
|||
|
node[i, 2] = node[min(c), 2] # 如果c是矩阵,则min(A)是包含每一列的最小值的行向量
|
|||
|
else:
|
|||
|
node[i, 2] = noi
|
|||
|
noi += 1
|
|||
|
node = np.concatenate((node[:int(len(node)/2), 2].reshape(-1, 1), node[int(len(node)/2):, 2].reshape(-1, 1)), axis=1)
|
|||
|
np.savetxt('node.txt', node)
|
|||
|
# 这里的links多加了一行才能yanlinks,但这样yanlinks就不对了
|
|||
|
links = np.hstack((links, np.zeros((len(links), 1))))
|
|||
|
links = np.hstack((links, np.zeros((len(links), 1))))
|
|||
|
links = np.hstack((links, np.zeros((len(links), 1))))
|
|||
|
yanlinks = np.concatenate((node, links[:, [5,6,7,4,0,1,2,3]], np.zeros((len(links), 4))), axis=1)
|
|||
|
yanlinks[:,4] = np.arange(1, len(yanlinks)+1)
|
|||
|
|
|||
|
road = np.arange(1, node.shape[0] + 1)
|
|||
|
adjacency = np.zeros((len(road), len(road)))
|
|||
|
|
|||
|
#初始化分区
|
|||
|
|
|||
|
for i in range(len(road)):
|
|||
|
temp1 = np.where(node[:, 0] == node[i, 0])[0] # 找出第一列每个数字在第一列出现的位置
|
|||
|
temp2 = np.where(node[:, 1] == node[i, 0])[0] # 找出第一列每个数字在第二列出现的位置
|
|||
|
temp3 = np.where(node[:, 0] == node[i, 1])[0] # 找出第二列每个数字在第一列出现的位置
|
|||
|
temp4 = np.where(node[:, 1] == node[i, 1])[0] # 找出第二列每个数字在第二列出现的位置
|
|||
|
temp = np.unique(np.intersect1d(np.arange(i + 1, node.shape[0]), np.concatenate((temp1, temp2, temp3, temp4))))
|
|||
|
if len(temp) > 0:
|
|||
|
adjacency[i, temp] = 1
|
|||
|
adjacency[temp, i] = 1
|
|||
|
|
|||
|
from sklearn.cluster import KMeans
|
|||
|
|
|||
|
N = chu # 设置聚类数目
|
|||
|
|
|||
|
# 利用 K-Means 算法对 yanlinks 矩阵的第 7 列和第 8 列(即经度和纬度)进行聚类,
|
|||
|
# 将样本分成 N 类,idx是一个N x 2的矩阵,其中N是聚类数目。
|
|||
|
# idx的每一行就是一个聚类中心,其中第一列是该中心的经度,第二列是该中心的纬度。
|
|||
|
# 在计算每个点到聚类中心的距离时,就需要用到idx的值。
|
|||
|
cur, idx = KMeans(n_clusters=N).fit(yanlinks[:, [6, 7]]).labels_, KMeans(n_clusters=N).fit(yanlinks[:, [6, 7]]).cluster_centers_
|
|||
|
|
|||
|
# 计算每个点到聚类中心的距离
|
|||
|
dis = 111000 * np.sqrt((yanlinks[:, 6] - idx[:, 0].reshape(N, 1)) ** 2 + (yanlinks[:, 7] - idx[:, 1].reshape(N, 1)) ** 2)
|
|||
|
|
|||
|
# 找到每个点最近的聚类中心,mm是最小值,nn是最小值在向量的索引
|
|||
|
mm, nn = np.min(dis, axis=1, keepdims=True), np.argmin(dis, axis=1)
|
|||
|
|
|||
|
data = links[:, 4] # links第五行是路的长度
|
|||
|
if data.size > 0:
|
|||
|
m = data.shape[0]
|
|||
|
n = 1
|
|||
|
else:
|
|||
|
m, n = 0, 1
|
|||
|
|
|||
|
pattern = np.zeros((m, n)) # zeros(m,n+1)返回由零组成的m×(n+1)数组
|
|||
|
pattern[:, 0] = data # 前n列为data中的数据
|
|||
|
pattern = np.hstack((pattern, np.zeros((len(pattern), 1))))
|
|||
|
center = np.zeros((N, n)) # 初始化聚类中心
|
|||
|
pattern[:, :n] = data.reshape(-1, n)
|
|||
|
|
|||
|
# 初始化聚类中心
|
|||
|
for x in range(0,N):
|
|||
|
center = np.hstack((center, np.zeros((len(center), 1))))
|
|||
|
center[x, 1] = nn[x]
|
|||
|
center[x, 0] = data[int(center[x, 1])]
|
|||
|
pattern[int(center[x, 1]), 1] = x
|
|||
|
|
|||
|
# 初始化距离和计数
|
|||
|
distance = np.zeros(N)
|
|||
|
num = np.zeros(N)
|
|||
|
|
|||
|
# 初始化新的聚类中心
|
|||
|
new_center = np.zeros((N, n))
|
|||
|
|
|||
|
|
|||
|
|
|||
|
|
|||
|
mb = 10
|
|||
|
while mb > 1:
|
|||
|
print(mb)
|
|||
|
|
|||
|
for x in range(0, N - 1): # x表示当前聚类的编号
|
|||
|
try:
|
|||
|
yisou = adjacency[np.where(pattern[:, 1] == x + 1)[0], :]
|
|||
|
bound = np.where(np.sum(yisou, axis=0) > 0)[0]
|
|||
|
yisou = np.where(pattern[:, 1] > 0)[0]
|
|||
|
bound = np.setdiff1d(bound, yisou) # bound 是一个向量,表示与聚类 x 相关的未被分配到聚类中的道路的编号。
|
|||
|
yisou = np.where(pattern[:, 1] == x + 1)[0] # 这里的yisou表示已经被分配到的道路编号
|
|||
|
|
|||
|
bus = []
|
|||
|
for y in range(len(yisou)): # 变量 y 用于遍历所有被分配到第 x 个聚类的节点,并且在每次迭代中根据索引 y 从 stationid 矩阵中获取相应的信息。
|
|||
|
# bus=[bus;stationid(find(stationid(:,6)==yisou(y)),7)];
|
|||
|
for yicifangwen in stationid[:, 5]: # Python不能用矩阵来和一个数值比较
|
|||
|
if yicifangwen == yisou[y]:
|
|||
|
yicifangwendesuoying = np.where(stationid[:, 5] == yicifangwen)[0]
|
|||
|
for dengyuyisourow in yicifangwendesuoying:
|
|||
|
extendbus = stationid[dengyuyisourow, 6]
|
|||
|
|
|||
|
bus.append(extendbus)
|
|||
|
break
|
|||
|
|
|||
|
bouvar = np.zeros((len(bound), 2))
|
|||
|
|
|||
|
for adad in range(len(bound)):
|
|||
|
# if var([bus;stationid(find(stationid(:,6)==bound(y)),7)])>0这里开始
|
|||
|
#这里result要改!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
|||
|
# result只有一个数的时候是不能算方差的
|
|||
|
abaac = variancediy((np.concatenate([bus, stationid[stationid[:, 5] == bound[adad], 6]])).tolist())
|
|||
|
if len(np.concatenate([bus, stationid[stationid[:, 5] == bound[adad], 6]])) > 1 and abaac > 0:
|
|||
|
# np.var(np.concatenate((bus, stationid[stationid[:, 6] == bound[adad], 6]有可能为无穷大,当这里的两个变量为空集时
|
|||
|
# bus 和stationid[stationid[:, 6] == bound[adad]不能直接和零比较,因为他们不是数
|
|||
|
# pattern[yisou, 0]和pattern[bound[adad], 0]不是一个数据类型
|
|||
|
yaozhuanchenfloat = np.float64(pattern[yisou-1, 0]) # 这里没问题!!!!!!!!!!!!
|
|||
|
bouvar[adad, 0] = variancediy((yaozhuanchenfloat, pattern[bound[adad]-1, 0]))* variancediy((np.concatenate((bus, stationid[stationid[:, 5] == bound[adad], 6]))).tolist())
|
|||
|
|
|||
|
else:
|
|||
|
print(type(bus))
|
|||
|
if variancediy(bus)> 0: # if var(bus)>0%已分配道路的速度方差
|
|||
|
# print(pattern[yisou, 0])
|
|||
|
yaozhuanchenfloat1 = np.float64(pattern[yisou-1, 0]) # 这里没问题!!!!
|
|||
|
|
|||
|
bouvar[adad, 0] = variancediy((yaozhuanchenfloat1, pattern[bound[adad]-1, 0])) + variancediy(bus)
|
|||
|
|
|||
|
else:
|
|||
|
yaozhuanchenfloat2 = np.float64(pattern[yisou-1, 0])
|
|||
|
|
|||
|
bouvar[adad, 0] = variancediy((yaozhuanchenfloat2, pattern[bound[adad]-1, 0]))
|
|||
|
|
|||
|
bouvar[adad, 1] = 111000 * np.sqrt(np.sum((yanlinks[yanlinks[:, 4] == bound[adad], 6:8] - idx[x-1 , :]) ** 2))
|
|||
|
if bouvar.shape[0] > 1:
|
|||
|
a, b = TOPSIS666(bouvar) # TOPSIS是改好的
|
|||
|
|
|||
|
else:
|
|||
|
b = 1
|
|||
|
|
|||
|
|
|||
|
except:
|
|||
|
continue
|