PyTorch matrix factorization with fixed item matrix

192 Views Asked by At

I estimate ratings in a user-item matrix by decomposing the matrix into two matrices P and Q using PyTorch Matrix Factorization. I got my loss function L(X-PQ).

Let's say rows of X correspond to users, and x is new user's row, so that new X is X concatenated with x. Now I want to minimize L(X' - P'Q) = L(X - PQ) + L(x - x_pQ). Since I have already trained P and Q. I want to train x_p that is the new user's row, but leave Q fixed.

So my question would be, is there a way in PyTorch to train MatrixFactorization model for P with fixed Q?

Code I'm working with:

class MatrixFactorizationWithBiasXavier(nn.Module):
def __init__(self, num_people, num_partners, bias=(-0.01, 0.01), emb_size=100):
    super(MatrixFactorizationWithBiasXavier, self).__init__()
    self.person_emb = nn.Embedding(num_people, emb_size)
    self.person_bias = nn.Embedding(num_people, 1)
    self.partner_emb = nn.Embedding(num_partners, emb_size)
    self.parnter_bias = nn.Embedding(num_partners, 1)
    torch.nn.init.xavier_uniform_(self.person_emb.weight)
    torch.nn.init.xavier_uniform_(self.partner_emb.weight)
    self.person_bias.weight.data.uniform_(bias[0], bias[1])
    self.parnter_bias.weight.data.uniform_(bias[0], bias[1])
        
def forward(self, u, v):
    u = self.person_emb(u)
    v = self.partner_emb(v)
    bias_u = self.person_bias(u).squeeze()
    bias_v = self.parnter_bias(v).squeeze()
    # calculate dot product
    # u*v is a element wise vector multiplication
    return torch.sigmoid((u*v).sum(1) + bias_u + bias_v)

def test(model, df_test, verbose=False):
model.eval()
# .to(dev) puts code on either gpu or cpu.
people = torch.LongTensor(df_test.id.values).to(dev)
partners = torch.LongTensor(df_test.pid.values).to(dev)
decision = torch.FloatTensor(df_test.decision.values).to(dev)
y_hat = model(people, partners)
loss = F.mse_loss(y_hat, decision)
if verbose:
    print('test loss %.3f ' % loss.item())
return loss.item()

def train(model, df_train, epochs=100, learning_rate=0.01, weight_decay=1e-5, verbose=False):
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
model.train()

for epoch in range(epochs):
    # From numpy to PyTorch tensors.
    # .to(dev) puts code on either gpu or cpu.
    people = torch.LongTensor(df_train.id.values).to(dev)
    partners = torch.LongTensor(df_train.pid.values).to(dev)
    decision = torch.FloatTensor(df_train.decision.values).to(dev)
    
    # calls forward method of the model
    y_hat = model(people, partners)
    # Using mean squared errors loss function
    loss = F.mse_loss(y_hat, decision)
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
    
    if verbose and epoch % 100 == 0: 
        print(loss.item())
1

There are 1 best solutions below

0
On

Found a solution. Turns out I can register a hook on my embedding(partner emb, that is my Q) (that i want to stay fixed) so that it says it's gradient is zeroed.

mask = torch.zeros_like(mf_model.partner_emb.weight)
mf_model.partner_emb.weight.register_hook(lambda grad: grad*mask)