Skip to content

Commit fe172e3

Browse files
authored
Update Linear.py
1 parent 4de97f1 commit fe172e3

File tree

1 file changed

+142
-0
lines changed

1 file changed

+142
-0
lines changed

models/Linear.py

+142
Original file line numberDiff line numberDiff line change
@@ -1 +1,143 @@
1+
import torch
2+
import torch.nn as nn
3+
import torch.nn.functional as F
4+
import numpy as np
5+
import torch
6+
import torch_dct as dct
7+
8+
import numpy as np
9+
import math
10+
# class Model(nn.Module):SENET for ETTmx
11+
12+
# """
13+
# Just one Linear layer
14+
# """
15+
# def __init__(self,configs,channel=7,ratio=1):
16+
# super(Model, self).__init__()
17+
18+
# self.avg_pool = nn.AdaptiveAvgPool1d(1) #innovation
19+
# self.fc = nn.Sequential(
20+
# nn.Linear(7,14, bias=False),
21+
# nn.Dropout(p=0.1),
22+
# nn.ReLU(inplace=True) ,
23+
# nn.Linear(14,7, bias=False),
24+
# nn.Sigmoid()
25+
# )
26+
# self.seq_len = configs.seq_len
27+
# self.pred_len = configs.pred_len
28+
29+
# self.Linear_More_1 = nn.Linear(self.seq_len,self.pred_len * 2)
30+
# self.Linear_More_2 = nn.Linear(self.pred_len*2,self.pred_len)
31+
# self.relu = nn.ReLU()
32+
# self.gelu = nn.GELU()
33+
34+
# self.drop = nn.Dropout(p=0.1)
35+
# # Use this line if you want to visualize the weights
36+
#
37+
# def forward(self, x):
38+
# # x: [Batch, Input length, Channel]
39+
#
40+
# x = x.permute(0,2,1) # (B,L,C)->(B,C,L)
41+
# b, c, l = x.size() # (B,C,L)
42+
# y = self.avg_pool(x).view(b, c) # (B,C,L)
43+
44+
45+
# # np.save('f_weight.npy', f_weight_np)
46+
# # # np.save('%d f_weight.npy' %epoch, f_weight_np)
47+
# # print("y",y.shape)
48+
# # return (x * y).permute(0,2,1)
49+
# return (z).permute(0,2,1)
50+
51+
class my_Layernorm(nn.Module):
52+
"""
53+
Special designed layernorm for the seasonal part
54+
"""
55+
def __init__(self, channels):
56+
super(my_Layernorm, self).__init__()
57+
self.layernorm = nn.LayerNorm(channels)
58+
59+
def forward(self, x):
60+
x_hat = self.layernorm(x)
61+
bias = torch.mean(x_hat, dim=1).unsqueeze(1).repeat(1, x.shape[1], 1)
62+
return x_hat - bias
63+
class Model(nn.Module):
64+
65+
def __init__(self,configs,channel=96,ratio=1):
66+
super(Model, self).__init__()
67+
# self.avg_pool = nn.AdaptiveAvgPool1d(1) #innovation
68+
self.seq_len = configs.seq_len
69+
self.pred_len = configs.pred_len
70+
self.channel_num = configs.enc_in
71+
self.fc = nn.Sequential(
72+
nn.Linear(channel, channel*2, bias=False),
73+
nn.Dropout(p=0.1),
74+
nn.ReLU(inplace=True),
75+
nn.Linear( channel*2, channel, bias=False),
76+
nn.Sigmoid()
77+
)
78+
self.fc_inverse = nn.Sequential(
79+
nn.Linear(channel, channel//2, bias=False),
80+
nn.Dropout(p=0.1),
81+
nn.ReLU(inplace=True),
82+
nn.Linear( channel//2, channel, bias=False),
83+
nn.Sigmoid()
84+
)
85+
# self.fc_plot = nn.Linear(channel, channel, bias=False)
86+
self.mid_Linear = nn.Linear(self.seq_len, self.seq_len)
87+
88+
self.Linear = nn.Linear(self.seq_len, self.pred_len)
89+
self.Linear_1 = nn.Linear(self.seq_len, self.pred_len)
90+
# self.dct_norm = nn.LayerNorm([self.channel_num], eps=1e-6)
91+
self.dct_norm = nn.LayerNorm(self.seq_len, eps=1e-6)
92+
# self.my_layer_norm = nn.LayerNorm([96], eps=1e-6)
93+
def forward(self, x):
94+
x = x.permute(0,2,1) # (B,L,C)=》(B,C,L)#forL
95+
96+
97+
98+
b, c, l = x.size() # (B,C,L)
99+
list = []
100+
101+
for i in range(c):#i represent channel
102+
freq=dct.dct(x[:,i,:]) #dct
103+
# print("freq-shape:",freq.shape)
104+
list.append(freq)
105+
106+
107+
stack_dct=torch.stack(list,dim=1)
108+
stack_dct = torch.tensor(stack_dct)#(B,L,C)
109+
110+
stack_dct = self.dct_norm(stack_dct)#matters for traffic
111+
f_weight = self.fc(stack_dct)
112+
f_weight = self.dct_norm(f_weight)#matters for traffic
113+
114+
115+
116+
#visualization for fecam tensor
117+
f_weight_cpu = f_weight
118+
119+
f_weight_np = f_weight_cpu.cpu().detach().numpy()
120+
121+
np.save('f_weight_weather_wf.npy', f_weight_np)
122+
# np.save('%d f_weight.npy' %epoch, f_weight_np)
123+
124+
125+
126+
127+
128+
129+
130+
# f_weight = self.dct_norm(f_weight.permute(0,2,1))#matters for traffic
131+
# result = self.Linear(x)#forL
132+
133+
# f_weight_np = result.cpu().detach().numpy()
134+
135+
# np.save('f_weight.npy', f_weight_np)
136+
# x = x.permute(0,2,1)
137+
# result = self.Linear((x *(f_weight_inverse)))#forL
138+
result = self.Linear((x *(f_weight)))#forL
139+
return result.permute(0,2,1)
140+
141+
142+
1143

0 commit comments

Comments
 (0)