“Model.Lapeers [4] .set_weights ([Ingredding_matrix])” Código de respuesta

Model.Lapeers [4] .set_weights ([Ingredding_matrix])

def load_embedding_matrix(embedding_file):
    print("read embedding from: %s " %embedding_file)
    d = {}
    n = 0
    with open(embedding_file, "r") as f:
        line = f.readline()
        while line:
            n += 1
            w, v = line.strip().split(" ", 1)
            d[int(w[1:])] = v
            line = f.readline()
    dim = len(v.split(" "))

    # add two index for missing and padding
    emb_matrix = np.zeros((n+2, dim), dtype=float)
    for key ,val in d.items():
        v = np.asarray(val.split(" "), dtype=float)
        emb_matrix[key] = v
    emb_matrix = np.array(emb_matrix, dtype=np.float32)
    return emb_matrix
Colorful Caribou

Model.Lapeers [4] .set_weights ([Ingredding_matrix])

def load_embedding_matrix(wv_path,int2vocabPath="dataset/training_i2v.json"):
    int2vocab=loadDict(int2vocabPath)
    vocab2int=loadDict(int2vocabPath.replace("i2v","v2i"))
    vocab_size=vocab2int["<unk>"]+1
    assert vocab_size==len(int2vocab.keys()),"Here must be a global dict, no matter static or nonstatic!"
    embedding_size=int(wv_path.split("-")[-1])
    embeddings = np.random.uniform(low=-0.05,high=0.05,size=(vocab_size, embedding_size))
    if "glove" in wv_path.split("/"):
        model = models.KeyedVectors.load_word2vec_format(wv_path, binary=False)
        embeddings[vocab_size - 1] = model['<unk>']
    else:
        model = models.Word2Vec.load(wv_path)
        infrequentWords = loadDict(os.path.dirname(wv_path)+"/infrequent.json")
        tmp = np.zeros([embedding_size, ])
        for w in infrequentWords[str(2)]:
            tmp += model[w]
        embeddings[vocab_size - 1] = tmp / len(infrequentWords[str(2)])
    for i in range(1,vocab_size-1):
        word=int2vocab[str(i)]
        embeddings[i] = model[word]
    return embeddings
Colorful Caribou

Explore las respuestas de código populares por idioma

Explorar otros lenguajes de código