Graph Spectral Clustering for Codenames

Codenames is a party game where players need to hint at words by providing other words that have something in common. Can we use Graph Spectral Clustering to find groups of similar words?

We use GloVE embeddings of Wikipedia as vectorized representations of words.

In [2]:
from lib.datasets import load_wikipedia_wordvecs
from lib.spectral_clustering import similarity_matrix, laplacian_matrix, spectral_clustering
import numpy as np
import matplotlib.pyplot as plt
In [3]:
words, vecs = load_wikipedia_wordvecs().values()
In [4]:
def similarity_matrix(data, s=1):
    n = len(data)
    scale = [np.sum(np.linalg.norm( data - x.reshape((1, -1)), axis=0)) for x in data ]
    similarity_matrix = np.zeros((n, n))

    for i in range(n):
        for j in range(n):
            similarity_matrix[i][j] = s * np.linalg.norm(data[i] - data[j]) / (scale[i]+scale[j])
    return similarity_matrix
In [5]:
codenames = "penguin,Germany,spy,battery,stadium,opera,shop,ambulance,brush,forest,Mexico,beat,fire,whip,\
indices = np.array([words.index(c) for c in codenames])

word_vecs = vecs[indices]

s = similarity_matrix(word_vecs)
In [6]:
k = 14
assns, (evals, evecs) = spectral_clustering(word_vecs, k=k, lform = "rw", metric="g", s=0.5, with_eigen=True)
In [7]:
[[codenames[j] for j in range(len(codenames)) if assns[j] == i] for i in range(k)]
 ['ambulance', 'fire']]
In [8]:
def avg_vec(words, candidates, vocabulary):
    # words is a list of word strings
    # candidates is a Nx300 dataset of all candidate word vectors
    # vocabulary is a list of word strings, 1 per vector in candidate
    mean = np.mean([candidates[vocabulary.index(word)] for word in words], axis=0).reshape((1, -1))
    diff = np.linalg.norm(candidates - mean, axis=0)
    return vocabulary[np.argmin(diff)]
In [9]:
avg_vec(('fly', 'Moscow'), vecs, words)