pytorch面试题:实现attention结构

本文最后更新于 2024年7月8日凌晨3点37分

pytorch面试题I:transformer中重要模块

transformer中的attention机制很重要,面试中可能会让你手动实现attention。
这里记录了transformer架构会考的重要知识点:

  • pytorch手动搭建ScaledDotProduct Attention;
  • pytorch搭建multi-head attention;
  • pytorch搭建self-attention;
  • 基于numpy的位置编码的实现;

首先import所需的库:

1
2
3
4
import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt

1.单头注意力机制

使用pytorch实现Scaled Dot Product Attention:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
class ScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention """

def __init__(self, scale):
super().__init__()

self.scale = scale
self.softmax = nn.Softmax(dim=2)

def forward(self, q, k, v, mask=None):
u = torch.bmm(q, k.transpose(1, 2)) # 1.Matmul
u = u / self.scale # 2.Scale

if mask is not None:
u = u.masked_fill(mask, -np.inf) # 3.Mask # mask为1的部分设置为-np.inf

attn = self.softmax(u) # 4.Softmax
output = torch.bmm(attn, v) # 5.Output

return attn, output

if __name__ == "__main__":
n_q, n_k, n_v = 2, 4, 4
d_q, d_k, d_v = 128, 128, 64
batch = 4

q = torch.randn(batch, n_q, d_q)
k = torch.randn(batch, n_k, d_k)
v = torch.randn(batch, n_v, d_v)
mask = torch.zeros(batch, n_q, n_k).bool()

attention = ScaledDotProductAttention(scale=np.power(d_k, 0.5))
attn, output = attention(q, k, v, mask=mask)

print(attn)
print(output)

2.多头注意力机制

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
class MultiHeadAttention(nn.Module):
""" Multi-Head Attention """

def __init__(self, n_head, d_k_, d_v_, d_k, d_v, d_o):
super().__init__()

self.n_head = n_head
self.d_k = d_k
self.d_v = d_v

self.fc_q = nn.Linear(d_k_, n_head * d_k) # (in_feature, out_feature)
self.fc_k = nn.Linear(d_k_, n_head * d_k)
self.fc_v = nn.Linear(d_v_, n_head * d_v)

self.attention = ScaledDotProductAttention(scale=np.power(d_k, 0.5))

self.fc_o = nn.Linear(n_head * d_v, d_o)

def forward(self, q, k, v, mask=None):

n_head, d_q, d_k, d_v = self.n_head, self.d_k, self.d_k, self.d_v

batch, n_q, d_q_ = q.size() # q size=(batch_size, n_q, n_head*d_k) and d_k==d_q
batch, n_k, d_k_ = k.size()
batch, n_v, d_v_ = v.size()

q = self.fc_q(q) # 1.单头变多头
k = self.fc_k(k)
v = self.fc_v(v)
q = q.view(batch, n_q, n_head, d_q).permute(2, 0, 1, 3).contiguous().view(-1, n_q, d_q)
k = k.view(batch, n_k, n_head, d_k).permute(2, 0, 1, 3).contiguous().view(-1, n_k, d_k)
v = v.view(batch, n_v, n_head, d_v).permute(2, 0, 1, 3).contiguous().view(-1, n_v, d_v)
# q.view重构张量维度
# torch.permute维度换位, permute(2,0,1,3)->(n_head, batch, n_q, d_q)
# contiguous: 返回一个在内存中连续的Tensor


if mask is not None:
mask = mask.repeat(n_head, 1, 1) # -> mask.size()=(batch*n_head,n_q, n_k)
attn, output = self.attention(q, k, v, mask=mask) # 2.当成单头注意力求输出

output = output.view(n_head, batch, n_q, d_v).permute(1, 2, 0, 3).contiguous().view(batch, n_q, -1) # 3.Concat
output = self.fc_o(output) # 4.仿射变换得到最终输出
# permute(1,2,0,3)-> (batch, n_q,n_head,d_v)

return attn, output


if __name__ == "__main__":
n_q, n_k, n_v = 2, 4, 4
d_q_, d_k_, d_v_ = 128, 128, 64
batch=4

q = torch.randn(batch, n_q, d_q_)
k = torch.randn(batch, n_k, d_k_)
v = torch.randn(batch, n_v, d_v_)
mask = torch.zeros(batch, n_q, n_k).bool()

mha = MultiHeadAttention(n_head=8, d_k_=128, d_v_=64, d_k=256, d_v=128, d_o=128)
attn, output = mha(q, k, v, mask=mask)

print(attn.size())
print(output.size())

3.自注意力机制

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
class SelfAttention(nn.Module):
""" Self-Attention """

def __init__(self, n_head, d_k, d_v, d_x, d_o):
self.wq = nn.Parameter(torch.Tensor(d_x, d_k))
self.wk = nn.Parameter(torch.Tensor(d_x, d_k))
self.wv = nn.Parameter(torch.Tensor(d_x, d_v))

self.mha = MultiHeadAttention(n_head=n_head, d_k_=d_k, d_v_=d_v, d_k=d_k, d_v=d_v, d_o=d_o)

self.init_parameters()

def init_parameters(self):
for param in self.parameters():
stdv = 1. / np.power(param.size(-1), 0.5)
param.data.uniform_(-stdv, stdv)

def forward(self, x, mask=None):
q = torch.matmul(x, self.wq)
k = torch.matmul(x, self.wk)
v = torch.matmul(x, self.wv)

attn, output = self.mha(q, k, v, mask=mask)

return attn, output


if __name__ == "__main__":
n_x = 4
d_x = 80

x = torch.randn(batch, n_x, d_x)
mask = torch.zeros(batch, n_x, n_x).bool()

selfattn = SelfAttention(n_head=8, d_k=128, d_v=64, d_x=80, d_o=80)
attn, output = selfattn(x, mask=mask)

print(attn.size())
print(output.size())

4.位置编码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
def positional_encoding(max_position, d_model, min_freq=1e-4):
position = np.arange(max_position)
freqs = min_freq**(2*(np.arange(d_model)//2)/d_model)
pos_enc = position.reshape(-1,1)*freqs.reshape(1,-1)
pos_enc[:, ::2] = np.cos(pos_enc[:, ::2])
pos_enc[:, 1::2] = np.sin(pos_enc[:, 1::2])
return pos_enc

### Plotting ####
d_model = 128
max_pos = 256
mat = positional_encoding(max_pos, d_model)
plt.pcolormesh(mat, cmap='copper')
plt.xlabel('Depth')
plt.xlim((0, d_model))
plt.ylabel('Position')
plt.title("PE matrix heat map")
plt.colorbar()
plt.show()

参考:

https://www.cnblogs.com/chuqianyu/p/18048501


pytorch面试题:实现attention结构
https://kangkang37.github.io/2024/07/05/pytorch01_attn/
作者
kangkang
发布于
2024年7月5日
许可协议