problem saving pre-trained fasttext vectors in "word2vec" format with _save_word2vec_format()

1k views Asked by At

For a list of words I want to get their fasttext vectors and save them to a file in the same "word2vec" .txt format (word+space+vector in txt format).

This is what I did:

dict = open("word_list.txt","r") #the list of words I have

path = "cc.en.300.bin" 

model = load_facebook_model(path)

vectors = []

words =[] 

for word in dict: 
    vectors.append(model[word])
    words.append(word)

vectors_array = np.array(vectors)


*I want to take the list "words" and nd.array "vectors_array" and save in the original .txt format.

I try to use the function from gensim "_save_word2vec_format":

def _save_word2vec_format(fname, vocab, vectors, fvocab=None, binary=False, total_vec=None):
    """Store the input-hidden weight matrix in the same format used by the original
    C word2vec-tool, for compatibility.
    Parameters
    ----------
    fname : str
        The file path used to save the vectors in.
    vocab : dict
        The vocabulary of words.
    vectors : numpy.array
        The vectors to be stored.
    fvocab : str, optional
        File path used to save the vocabulary.
    binary : bool, optional
        If True, the data wil be saved in binary word2vec format, else it will be saved in plain text.
    total_vec : int, optional
        Explicitly specify total number of vectors
        (in case word vectors are appended with document vectors afterwards).
    """
    if not (vocab or vectors):
        raise RuntimeError("no input")
    if total_vec is None:
        total_vec = len(vocab)
    vector_size = vectors.shape[1]
    if fvocab is not None:
        logger.info("storing vocabulary in %s", fvocab)
        with utils.open(fvocab, 'wb') as vout:
            for word, vocab_ in sorted(iteritems(vocab), key=lambda item: -item[1].count):
                vout.write(utils.to_utf8("%s %s\n" % (word, vocab_.count)))
    logger.info("storing %sx%s projection weights into %s", total_vec, vector_size, fname)
    assert (len(vocab), vector_size) == vectors.shape
    with utils.open(fname, 'wb') as fout:
        fout.write(utils.to_utf8("%s %s\n" % (total_vec, vector_size)))
        # store in sorted order: most frequent words at the top
        for word, vocab_ in sorted(iteritems(vocab), key=lambda item: -item[1].count):
            row = vectors[vocab_.index]
            if binary:
                row = row.astype(REAL)
                fout.write(utils.to_utf8(word) + b" " + row.tostring())
            else:
                fout.write(utils.to_utf8("%s %s\n" % (word, ' '.join(repr(val) for val in row))))

but I get the error:

INFO:gensim.models._fasttext_bin:loading 2000000 words for fastText model from cc.en.300.bin
INFO:gensim.models.word2vec:resetting layer weights
INFO:gensim.models.word2vec:Updating model with new vocabulary
INFO:gensim.models.word2vec:New added 2000000 unique words (50% of original 4000000) and increased the count of 2000000 pre-existing words (50% of original 4000000)
INFO:gensim.models.word2vec:deleting the raw counts dictionary of 2000000 items
INFO:gensim.models.word2vec:sample=1e-05 downsamples 6996 most-common words
INFO:gensim.models.word2vec:downsampling leaves estimated 390315457935 word corpus (70.7% of prior 552001338161)
INFO:gensim.models.fasttext:loaded (4000000, 300) weight matrix for fastText model from cc.en.300.bin
trials.py:42: DeprecationWarning: Call to deprecated `__getitem__` (Method will be removed in 4.0.0, use self.wv.__getitem__() instead).
  vectors.append(model[word])
INFO:__main__:storing 8664x300 projection weights into arrays_to_txt_oct3.txt
loading the model for: en
finish loading the model for: en
len(vectors): 8664
len(words):  8664
shape of vectors_array (8664, 300)
mission launched!
Traceback (most recent call last):
  File "trials.py", line 102, in <module>
    _save_word2vec_format(YOUR_VEC_FILE_PATH, words, vectors_array, fvocab=None, binary=False, total_vec=None)
  File "trials.py", line 89, in _save_word2vec_format
    for word, vocab_ in sorted(iteritems(vocab), key=lambda item: -item[1].count):
  File "/cs/snapless/oabend/tailin/transdiv/lib/python3.7/site-packages/six.py", line 589, in iteritems
    return iter(d.items(**kw))
AttributeError: 'list' object has no attribute 'items'

I understand that it has to do with the second argument in the function, but I don't understand how should I make a list of words into a dictionary object?

I tried doing that with:

#convert list of words into a dictionary
words_dict = {i:x for i,x in enumerate(words)}

But still got the error message:

Traceback (most recent call last):
  File "trials.py", line 99, in <module>
    _save_word2vec_format(YOUR_VEC_FILE_PATH, dict, vectors_array, fvocab=None, binary=False, total_vec=None)
  File "trials.py", line 77, in _save_word2vec_format
    total_vec = len(vocab)
TypeError: object of type '_io.TextIOWrapper' has no len()

I don't understand how to insert the word list in the right format...

1

There are 1 answers

1
gojomo On

You can directly import & re-use the Gensim KeyedVectors class to assemble your own (sub)set of word-vectors as one instance of KeyedVectors, then use its .save_word2vec_format() method.

For example, roughly this should work:

from gensim.models import KeyedVectors

words_file = open("word_list.txt","r")  # your word-list as a text file
words_list = list(words_file)  # reads each line of file into a new `list` object

fasttext_path = "cc.en.300.bin" 
model = load_facebook_model(path)

kv = KeyedVectors(vector_size=model.wv.vector_size)  # new empty KV object

vectors = [] 
for word in words_list: 
    vectors.append(model[word])  # vectors for words_list, in same order

kv.add(words_list, vectors)  # adds those keys (words) & vectors as batch

kv.save_word2vec_format('my_kv.vec', binary=False)