forked from TingtingChentt/dFCExperts
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathstate.py
More file actions
184 lines (151 loc) · 7.14 KB
/
state.py
File metadata and controls
184 lines (151 loc) · 7.14 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
"""
From https://github.com/vlukiyanov/pt-dec
"""
from typing import Tuple
from torch.nn.parameter import Parameter
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional
import os
from einops import rearrange
class ClusterAssignment(nn.Module):
def __init__(
self,
cluster_number: int,
embedding_dimension: int,
alpha: float = 1.0,
cluster_centers: Optional[torch.Tensor] = None,
orthogonal=True,
freeze_center=True,
project_assignment=True
) -> None:
"""
Module to handle the soft assignment, for a description see in 3.1.1. in Xie/Girshick/Farhadi,
where the Student's t-distribution is used measure similarity between feature vector and each
cluster centroid.
:param cluster_number: number of clusters
:param embedding_dimension: embedding dimension of feature vectors
:param alpha: parameter representing the degrees of freedom in the t-distribution, default 1.0
:param cluster_centers: clusters centers to initialise, if None then use Xavier uniform
"""
super(ClusterAssignment, self).__init__()
self.embedding_dimension = embedding_dimension
self.cluster_number = cluster_number
self.alpha = alpha
self.project_assignment = project_assignment
if cluster_centers is None:
initial_cluster_centers = torch.zeros(
self.cluster_number, self.embedding_dimension, dtype=torch.float
)
nn.init.xavier_uniform_(initial_cluster_centers)
else:
initial_cluster_centers = cluster_centers
if orthogonal:
orthogonal_cluster_centers = torch.zeros(
self.cluster_number, self.embedding_dimension, dtype=torch.float
)
orthogonal_cluster_centers[0] = initial_cluster_centers[0]
for i in range(1, cluster_number):
project = 0
for j in range(i):
project += self.project(
initial_cluster_centers[j], initial_cluster_centers[i])
initial_cluster_centers[i] -= project
orthogonal_cluster_centers[i] = initial_cluster_centers[i] / \
torch.norm(initial_cluster_centers[i], p=2)
initial_cluster_centers = orthogonal_cluster_centers
self.cluster_centers = Parameter(initial_cluster_centers, requires_grad=(not freeze_center))
@staticmethod
def project(u, v):
return (torch.dot(u, v)/torch.dot(u, u))*u
def forward(self, batch: torch.Tensor) -> torch.Tensor:
"""
Compute the soft assignment for a batch of feature vectors, returning a batch of assignments
for each cluster.
:param batch: FloatTensor of [batch size, embedding dimension]
:return: FloatTensor [batch size, number of clusters]
"""
if self.project_assignment:
assignment = batch@self.cluster_centers.T
# prove
assignment = torch.pow(assignment, 2)
norm = torch.norm(self.cluster_centers, p=2, dim=-1)
soft_assign = assignment/norm
return F.softmax(soft_assign, dim=-1)
else:
norm_squared = torch.sum(
(batch.unsqueeze(1) - self.cluster_centers) ** 2, 2)
numerator = 1.0 / (1.0 + (norm_squared / self.alpha))
power = float(self.alpha + 1) / 2
numerator = numerator ** power
return numerator / torch.sum(numerator, dim=1, keepdim=True)
def get_cluster_centers(self) -> torch.Tensor:
"""
Get the cluster centers.
:return: FloatTensor [number of clusters, embedding dimension]
"""
return self.cluster_centers
class StateEx(nn.Module):
def __init__(self,
hidden_dim: int,
num_states: int,
alpha: float = 1.0,
orthogonal=False,
freeze_center=True,
project_assignment=True,
cluster_centers=None):
"""
Module which holds all the moving parts of the DEC algorithm, as described in
Xie/Girshick/Farhadi; this includes the AutoEncoder stage and the ClusterAssignment stage.
:param cluster_number: number of clusters
:param hidden_dimension: hidden dimension, output of the encoder
:param encoder: encoder to use
:param alpha: parameter representing the degrees of freedom in the t-distribution, default 1.0
"""
super(StateEx, self).__init__()
self.hidden_dim = hidden_dim
self.num_states = num_states
self.alpha = alpha
self.assignment = ClusterAssignment(
num_states, self.hidden_dim, alpha, cluster_centers=cluster_centers, orthogonal=orthogonal, freeze_center=freeze_center, project_assignment=project_assignment
)
self.loss_fn = nn.KLDivLoss(size_average=False)
def forward(self, batch: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Compute the cluster assignment using the ClusterAssignment after running the batch
through the encoder part of the associated AutoEncoder module.
:param batch: [batch size, embedding dimension] FloatTensor
:return: [batch size, number of clusters] FloatTensor
"""
node_num = batch.size(1)
batch_size = batch.size(0)
# (b,t,c) --> (bt, c)
flattened_batch = rearrange(batch, 'b t c -> (b t) c')
# flattened_batch = batch.view(batch_size * node_num, -1)
# (bt, num_states) --> (b,t,num_states)
assignment = self.assignment(flattened_batch).view(batch_size, node_num, -1)
# Multiply the encoded vectors by the cluster assignment to get the final node representations
state_repr = torch.bmm(assignment.transpose(1, 2), batch) # (b, num_states, c)
# state_repr = rearrange(state_repr, 'b num_states c -> (b num_states) c')
# logits = rearrange(self.mlp(state_repr), '(b num_states) c -> b num_states c', b=batch_size, num_states=self.num_states)
return state_repr, assignment
def target_distribution(self, batch: torch.Tensor) -> torch.Tensor:
"""
Compute the target distribution p_ij, given the batch (q_ij), as in 3.1.3 Equation 3 of
Xie/Girshick/Farhadi; this is used the KL-divergence loss function.
:param batch: [batch size, number of clusters] Tensor of dtype float
:return: [batch size, number of clusters] Tensor of dtype float
"""
weight = (batch ** 2) / torch.sum(batch, 0)
return (weight.t() / torch.sum(weight, 1)).t()
def loss(self, assignment):
flattened_assignment = assignment.view(-1, assignment.size(-1)) + 1e-10
target = self.target_distribution(flattened_assignment).detach()
return self.loss_fn(flattened_assignment.log(), target) / flattened_assignment.size(0)
def get_cluster_centers(self) -> torch.Tensor:
"""
Get the cluster centers, as computed by the encoder.
:return: [number of clusters, hidden dimension] Tensor of dtype float
"""
return self.assignment.get_cluster_centers()