a
    dG(b_                     @   s$  d Z ddlZddlZddlZddlZddlmZ ddlZ	ddl
mZ ddlmZ ddlmZ eeZdd ZG dd	 d	ZG d
d deZG dd deZG dd deZG dd deZG dd deeZG dd deZG dd deZG dd deZG dd dejZG dd deZ dS )z~This module contains classes for analyzing the texts of a corpus to accumulate
statistical information about word occurrences.    N)Counter)utils)Word2Vecc                 C   s`   |j s"t|ddd |j D  t }| D ].}|j | }t|trP||}q,|| q,|S )a[  Convert an iterable of ids to their corresponding words using a dictionary.
    Abstract away the differences between the HashDictionary and the standard one.

    Parameters
    ----------
    ids: dict
        Dictionary of ids and their words.
    dictionary: :class:`~gensim.corpora.dictionary.Dictionary`
        Input gensim dictionary

    Returns
    -------
    set
        Corresponding words.

    Examples
    --------
    .. sourcecode:: pycon

        >>> from gensim.corpora.dictionary import Dictionary
        >>> from gensim.topic_coherence import text_analysis
        >>>
        >>> dictionary = Dictionary()
        >>> ids = {1: 'fake', 4: 'cats'}
        >>> dictionary.id2token = {1: 'fake', 2: 'tokens', 3: 'rabbids', 4: 'cats'}
        >>>
        >>> text_analysis._ids_to_words(ids, dictionary)
        set(['cats', 'fake'])

    id2tokenc                 S   s   i | ]\}}||qS  r   ).0kvr   r   Clib/python3.9/site-packages/gensim/topic_coherence/text_analysis.py
<dictcomp>9       z!_ids_to_words.<locals>.<dictcomp>)r   setattrtoken2iditemsset
isinstanceunionadd)ids
dictionaryZ	top_wordsword_idwordr   r   r
   _ids_to_words   s    

r   c                   @   sd   e Zd ZdZdd Zedd Zejdd Zddd	Zd
d Z	dd Z
dd Zdd Zdd ZdS )BaseAnalyzeraG  Base class for corpus and text analyzers.

    Attributes
    ----------
    relevant_ids : dict
        Mapping
    _vocab_size : int
        Size of vocabulary.
    id2contiguous : dict
        Mapping word_id -> number.
    log_every : int
        Interval for logging.
    _num_docs : int
        Number of documents.

    c                 C   s8   || _ t| j | _dd t| j D | _d| _d| _dS )a=  

        Parameters
        ----------
        relevant_ids : dict
            Mapping

        Examples
        --------
        .. sourcecode:: pycon

            >>> from gensim.topic_coherence import text_analysis
            >>> ids = {1: 'fake', 4: 'cats'}
            >>> base = text_analysis.BaseAnalyzer(ids)
            >>> # should return {1: 'fake', 4: 'cats'} 2 {1: 0, 4: 1} 1000 0
            >>> print(base.relevant_ids, base._vocab_size, base.id2contiguous, base.log_every, base._num_docs)
            {1: 'fake', 4: 'cats'} 2 {1: 0, 4: 1} 1000 0

        c                 S   s   i | ]\}}||qS r   r   )r   nr   r   r   r
   r   m   r   z)BaseAnalyzer.__init__.<locals>.<dictcomp>i  r   N)relevant_idslen_vocab_size	enumerateid2contiguous	log_every	_num_docs)selfr   r   r   r
   __init__W   s
    zBaseAnalyzer.__init__c                 C   s   | j S N)r!   r"   r   r   r
   num_docsq   s    zBaseAnalyzer.num_docsc                 C   s.   || _ | j | j dkr*td| jj| j  d S )Nr   z&%s accumulated stats from %d documents)r!   r    loggerinfo	__class____name__)r"   Znumr   r   r
   r&   u   s    
Nc                 C   s   t dd S )Nz+Base classes should implement analyze_text.NotImplementedError)r"   textdoc_numr   r   r
   analyze_text}   s    zBaseAnalyzer.analyze_textc                 C   s,   t |tst|ds| |S | j| S d S )N__iter__)r   strhasattrget_occurrencesget_co_occurrences)r"   Zword_or_wordsr   r   r
   __getitem__   s    
zBaseAnalyzer.__getitem__c                 C   s   |  | j| S LReturn number of docs the word occurs in, once `accumulate` has been called.)_get_occurrencesr   r"   r   r   r   r
   r3      s    zBaseAnalyzer.get_occurrencesc                 C   s   t dd S )Nz)Base classes should implement occurrencesr+   r9   r   r   r
   r8      s    zBaseAnalyzer._get_occurrencesc                 C   s   |  | j| | j| S OReturn number of docs the words co-occur in, once `accumulate` has been called.)_get_co_occurrencesr   r"   word_id1word_id2r   r   r
   r4      s    zBaseAnalyzer.get_co_occurrencesc                 C   s   t dd S )Nz,Base classes should implement co_occurrencesr+   r=   r   r   r
   r<      s    z BaseAnalyzer._get_co_occurrences)N)r*   
__module____qualname____doc__r#   propertyr&   setterr/   r5   r3   r8   r4   r<   r   r   r   r
   r   F   s   


r   c                       s8   e Zd ZdZ fddZdd Zdd Zdd	 Z  ZS )
UsesDictionarya  A BaseAnalyzer that uses a Dictionary, hence can translate tokens to counts.
    The standard BaseAnalyzer can only deal with token ids since it doesn't have the token2id
    mapping.

    Attributes
    ----------
    relevant_words : set
        Set of words that occurrences should be accumulated for.
    dictionary : :class:`~gensim.corpora.dictionary.Dictionary`
        Dictionary based on text
    token2id : dict
        Mapping from :class:`~gensim.corpora.dictionary.Dictionary`

    c                    s0   t t| | t| j|| _|| _|j| _dS )a  

        Parameters
        ----------
        relevant_ids : dict
            Mapping
        dictionary : :class:`~gensim.corpora.dictionary.Dictionary`
            Dictionary based on text

        Examples
        --------
        .. sourcecode:: pycon

            >>> from gensim.topic_coherence import text_analysis
            >>> from gensim.corpora.dictionary import Dictionary
            >>>
            >>> ids = {1: 'foo', 2: 'bar'}
            >>> dictionary = Dictionary([['foo', 'bar', 'baz'], ['foo', 'bar', 'bar', 'baz']])
            >>> udict = text_analysis.UsesDictionary(ids, dictionary)
            >>>
            >>> print(udict.relevant_words)
            set([u'foo', u'baz'])

        N)superrE   r#   r   r   relevant_wordsr   r   r"   r   r   r)   r   r
   r#      s    zUsesDictionary.__init__c                 C   s6   z| j | }W n ty$   |}Y n0 | | j| S r6   )r   KeyErrorr8   r   r"   r   r   r   r   r
   r3      s
    
zUsesDictionary.get_occurrencesc                 C   s0   z| j | }W n ty$   |}Y n0 | j| S r$   )r   rJ   r   rK   r   r   r
   _word2_contiguous_id   s
    
z#UsesDictionary._word2_contiguous_idc                 C   s    |  |}|  |}| ||S r:   )rL   r<   )r"   word1word2r>   r?   r   r   r
   r4      s    

z!UsesDictionary.get_co_occurrences)	r*   r@   rA   rB   r#   r3   rL   r4   __classcell__r   r   rI   r
   rE      s
   rE   c                       s8   e Zd ZdZ fddZdd Zdd Zdd	 Z  ZS )
InvertedIndexBasedz>Analyzer that builds up an inverted index to accumulate stats.c                    s0   t t| j|  tdd t| jD | _dS )a  

        Parameters
        ----------
        args : dict
            Look at :class:`~gensim.topic_coherence.text_analysis.BaseAnalyzer`

        Examples
        --------
        .. sourcecode:: pycon

            >>> from gensim.topic_coherence import text_analysis
            >>>
            >>> ids = {1: 'fake', 4: 'cats'}
            >>> ininb = text_analysis.InvertedIndexBased(ids)
            >>>
            >>> print(ininb._inverted_index)
            [set([]) set([])]

        c                 S   s   g | ]
}t  qS r   )r   )r   _r   r   r
   
<listcomp>   r   z/InvertedIndexBased.__init__.<locals>.<listcomp>N)rF   rP   r#   npZarrayranger   _inverted_indexr"   argsrI   r   r
   r#      s    zInvertedIndexBased.__init__c                 C   s   t | j| S r$   )r   rU   r9   r   r   r
   r8      s    z#InvertedIndexBased._get_occurrencesc                 C   s"   | j | }| j | }t||S r$   )rU   r   intersection)r"   r>   r?   s1s2r   r   r
   r<      s    

z&InvertedIndexBased._get_co_occurrencesc                    s,   dd | j  D   fddt| jD S )Nc                 S   s   i | ]\}}||qS r   r   )r   r   r   r   r   r
   r      r   z4InvertedIndexBased.index_to_dict.<locals>.<dictcomp>c                    s   i | ]\}} | |qS r   r   )r   r   Z
doc_id_setZcontiguous2idr   r
   r      r   )r   r   r   rU   r%   r   r[   r
   index_to_dict   s    z InvertedIndexBased.index_to_dict)	r*   r@   rA   rB   r#   r8   r<   r\   rO   r   r   rI   r
   rP      s
   rP   c                   @   s"   e Zd ZdZdddZdd ZdS )CorpusAccumulatorzTGather word occurrence stats from a corpus by iterating over its BoW representation.Nc                 C   sD   t dd |D }| j|}|D ]}| j| j|  | j q"dS )8Build an inverted index from a sequence of corpus texts.c                 s   s   | ]}|d  V  qdS )r   Nr   )r   xr   r   r
   	<genexpr>  r   z1CorpusAccumulator.analyze_text.<locals>.<genexpr>N)	frozensetr   rX   rU   r   r   r!   )r"   r-   r.   Z	doc_wordsZtop_ids_in_docr   r   r   r
   r/     s    zCorpusAccumulator.analyze_textc                 C   s&   |D ]}|  | |  jd7  _q| S )N   )r/   r&   )r"   ZcorpusZdocumentr   r   r
   
accumulate
  s    
zCorpusAccumulator.accumulate)N)r*   r@   rA   rB   r/   rc   r   r   r   r
   r]      s   
r]   c                       s8   e Zd ZdZ fddZdd Zdd Zdd	 Z  ZS )
WindowedTextsAnalyzerzVGather some stats about relevant terms of a corpus by iterating over windows of texts.c                    s   t t| || | j| _dS )z

        Parameters
        ----------
        relevant_ids : set of int
            Relevant id
        dictionary : :class:`~gensim.corpora.dictionary.Dictionary`
            Dictionary instance with mappings for the relevant_ids.

        N)rF   rd   r#   r   _none_tokenrH   rI   r   r
   r#     s    zWindowedTextsAnalyzer.__init__c                 C   sH   |  |}tj||ddd}|D ]"\}}| || |  jd7  _q | S )NFT)Zignore_below_sizeZinclude_doc_numrb   )_iter_textsr   Ziter_windowsr/   r&   )r"   textswindow_sizeZrelevant_textsZwindowsr.   Zvirtual_documentr   r   r
   rc   "  s    
z WindowedTextsAnalyzer.accumulatec                 #   s\   t t jj jkrt jnt j}|D ]2} |r$t j fdd|D |t|dV  q$d S )Nc                 3   s.   | ]&}| j v r  j j|  n jV  qd S r$   )rG   r   r   re   )r   wr%   r   r
   r`   0  s   z4WindowedTextsAnalyzer._iter_texts.<locals>.<genexpr>)dtypecount)	rS   ZiinfoZuint16maxr   uint32text_is_relevantZfromiterr   )r"   rg   rj   r-   r   r%   r
   rf   ,  s     
z!WindowedTextsAnalyzer._iter_textsc                 C   s   |D ]}|| j v r dS qdS )z)Check if the text has any relevant words.TF)rG   )r"   r-   r   r   r   r
   rn   5  s    
z&WindowedTextsAnalyzer.text_is_relevant)	r*   r@   rA   rB   r#   rc   rf   rn   rO   r   r   rI   r
   rd     s
   
	rd   c                   @   s   e Zd ZdZdddZdS )InvertedIndexAccumulatorr^   Nc                 C   s*   |D ] }|| j ur| j| | j qd S r$   )re   rU   r   r!   )r"   windowr.   r   r   r   r
   r/   @  s    
z%InvertedIndexAccumulator.analyze_text)N)r*   r@   rA   rB   r/   r   r   r   r
   ro   =  s   ro   c                       sn   e Zd ZdZ fddZdd Zdd Z fdd	ZdddZdd Z	dd Z
dd Zdd Zdd Z  ZS )WordOccurrenceAccumulatorzOAccumulate word occurrences and co-occurrences from a sequence of corpus texts.c                    s^   t t| j|  tj| jdd| _tj| j| jfdd| _	tj| jd ft
d| _t | _d S )Nrm   )rj   rb   )rF   rq   r#   rS   Zzerosr   _occurrencesspsZ
lil_matrix_co_occurrencesbool_uniq_wordsr   _counterrV   rI   r   r
   r#   I  s
    z"WordOccurrenceAccumulator.__init__c                 C   s   | j jS r$   )r)   r*   r%   r   r   r
   __str__Q  s    z!WordOccurrenceAccumulator.__str__c                 C   s$   | j  | _ | || |   | S r$   )rt   Ztolilpartial_accumulate_symmetrize)r"   rg   rh   r   r   r
   rc   T  s    z$WordOccurrenceAccumulator.accumulatec                    sR   d| _ d| _| j  tt| || | j D ]\}}| j|  |7  < q2| S )aS  Meant to be called several times to accumulate partial results.

        Notes
        -----
        The final accumulation should be performed with the `accumulate` method as opposed to this one.
        This method does not ensure the co-occurrence matrix is in lil format and does not
        symmetrize it after accumulation.

        N)	_current_doc_num_token_at_edgerw   clearrF   rq   rc   r   rt   )r"   rg   rh   Zcombork   rI   r   r
   ry   Z  s    

z,WordOccurrenceAccumulator.partial_accumulateNc                 C   sV   |  || | jd d }| rR| j|  d7  < | jtt	|d d d S )Nr{   rb   r      )
_slide_windowrv   anyrr   rw   update	itertoolscombinationsrS   Znonzero)r"   rp   r.   maskr   r   r
   r/   n  s
    z&WordOccurrenceAccumulator.analyze_textc                 C   sX   || j kr0d| jd d < d| jt|< || _ nd| j| j< d| j|d < |d | _d S )NFTr{   r   )r|   rv   rS   uniquer}   )r"   rp   r.   r   r   r
   r   u  s    
z'WordOccurrenceAccumulator._slide_windowc                 C   s6   | j }|| j ||j tj| ddd | _ dS )a  Word pairs may have been encountered in (i, j) and (j, i) order.

        Notes
        -----
        Rather than enforcing a particular ordering during the update process,
        we choose to symmetrize the co-occurrence matrix after accumulation has completed.

        r   rm   )Zoffsetsrj   N)rt   Zsetdiagrr   Trs   ZdiagsZdiagonal)r"   Zco_occr   r   r
   rz     s    	z%WordOccurrenceAccumulator._symmetrizec                 C   s
   | j | S r$   )rr   r9   r   r   r
   r8     s    z*WordOccurrenceAccumulator._get_occurrencesc                 C   s   | j ||f S r$   )rt   r=   r   r   r
   r<     s    z-WordOccurrenceAccumulator._get_co_occurrencesc                 C   s4   |  j |j 7  _ |  j|j7  _|  j|j7  _d S r$   )rr   rt   r!   )r"   otherr   r   r
   merge  s    zWordOccurrenceAccumulator.merge)N)r*   r@   rA   rB   r#   rx   rc   ry   r/   r   rz   r8   r<   r   rO   r   r   rI   r
   rq   F  s   
rq   c                   @   s   e Zd ZdZdd ZdS ) PatchedWordOccurrenceAccumulatorzaMonkey patched for multiprocessing worker usage, to move some of the logic to the master process.c                 C   s   |S r$   r   )r"   rg   r   r   r
   rf     s    z,PatchedWordOccurrenceAccumulator._iter_textsN)r*   r@   rA   rB   rf   r   r   r   r
   r     s   r   c                       sZ   e Zd ZdZ fddZdd Zdd Zdd	 Zd
d Zdd Z	dddZ
dd Z  ZS )!ParallelWordOccurrenceAccumulatora  Accumulate word occurrences in parallel.

    Attributes
    ----------
    processes : int
        Number of processes to use; must be at least two.
    args :
        Should include `relevant_ids` and `dictionary` (see :class:`~UsesDictionary.__init__`).
    kwargs :
        Can include `batch_size`, which is the number of docs to send to a worker at a time.
        If not included, it defaults to 64.
    c                    s<   t t| j|  |dk r$td| || _|dd| _d S )Nr   z9Must have at least 2 processes to run in parallel; got %d
batch_size@   )rF   r   r#   
ValueError	processesgetr   )r"   r   rW   kwargsrI   r   r
   r#     s    z*ParallelWordOccurrenceAccumulator.__init__c                 C   s   d| j j| j| jf S )Nz%s(processes=%s, batch_size=%s))r)   r*   r   r   r%   r   r   r
   rx     s    z)ParallelWordOccurrenceAccumulator.__str__c                 C   sf   |  |\}}}z| ||| d}W n$ tyJ   td| j d}Y n0 | ||||}| |S )NFz9stats accumulation interrupted; <= %d documents processedT)start_workersqueue_all_textsKeyboardInterruptr'   warnr!   terminate_workersmerge_accumulators)r"   rg   rh   workersinput_qoutput_qinterruptedaccumulatorsr   r   r
   rc     s    
z,ParallelWordOccurrenceAccumulator.accumulatec                 C   sb   t j| jd}t  }g }t| jD ]2}t| j| j}t||||}|  |	| q$|||fS )a  Set up an input and output queue and start processes for each worker.

        Notes
        -----
        The input queue is used to transmit batches of documents to the workers.
        The output queue is used by workers to transmit the WordOccurrenceAccumulator instances.

        Parameters
        ----------
        window_size : int

        Returns
        -------
        (list of lists)
            Tuple of (list of workers, input queue, output queue).
        )maxsize)
mpZQueuer   rT   r   r   r   AccumulatingWorkerstartappend)r"   rh   r   r   r   rQ   accumulatorworkerr   r   r
   r     s    z/ParallelWordOccurrenceAccumulator.start_workersc                 c   sD   g }|  |D ]&}|| t|| jkr|V  g }q|r@|V  dS )z\Return a generator over the given texts that yields batches of `batch_size` texts at a time.N)rf   r   r   r   )r"   rg   batchr-   r   r   r
   yield_batches  s    
z/ParallelWordOccurrenceAccumulator.yield_batchesc                    s   t | |D ]r\}}|j|dd | j| j }|  jt fdd|D 7  _|| j| j k rtd|d |d | j | j qdS )zSequentially place batches of texts on the given queue until `texts` is consumed.
        The texts are filtered so that only those with at least one relevant token are queued.
        Tblockc                 3   s   | ]}t |  d  V  qdS )rb   N)r   )r   docrh   r   r
   r`     r   zDParallelWordOccurrenceAccumulator.queue_all_texts.<locals>.<genexpr>zG%d batches submitted to accumulate stats from %d documents (%d virtual)rb   N)	r   r   putr!   r    sumr'   r(   r   )r"   qrg   rh   	batch_numr   Zbeforer   r   r
   r     s     z1ParallelWordOccurrenceAccumulator.queue_all_textsFc                 C   s~   |s|D ]}|j ddd qg }t|t|kr@||  q tdt| |D ]}| rT|  qT|  |  |S )a_  Wait until all workers have transmitted their WordOccurrenceAccumulator instances, then terminate each.

        Warnings
        --------
        We do not use join here because it has been shown to have some issues
        in Python 2.7 (and even in later versions). This method also closes both the input and output queue.
        If `interrupted` is False (normal execution), a None value is placed on the input queue for
        each worker. The workers are looking for this sentinel value and interpret it as a signal to
        terminate themselves. If `interrupted` is True, a KeyboardInterrupt occurred. The workers are
        programmed to recover from this and continue on to transmit their results before terminating.
        So in this instance, the sentinel values are not queued, but the rest of the execution
        continues as usual.

        NTr   z+%d accumulators retrieved from output queue)	r   r   r   r   r'   r(   Zis_aliveZ	terminateclose)r"   r   r   r   r   rQ   r   r   r   r   r
   r     s    
z3ParallelWordOccurrenceAccumulator.terminate_workersc                 C   s<   t | j| j}|D ]}|| q|  td|j |S )zMerge the list of accumulators into a single `WordOccurrenceAccumulator` with all
        occurrence and co-occurrence counts, and a `num_docs` that reflects the total observed
        by all the individual accumulators.

        z:accumulated word occurrence stats for %d virtual documents)rq   r   r   r   rz   r'   r(   r&   )r"   r   r   Zother_accumulatorr   r   r
   r     s    z4ParallelWordOccurrenceAccumulator.merge_accumulators)F)r*   r@   rA   rB   r#   rx   rc   r   r   r   r   r   rO   r   r   rI   r
   r     s   
 r   c                       s8   e Zd ZdZ fddZdd Zdd Zdd	 Z  ZS )
r   z.Accumulate stats from texts fed in from queue.c                    s4   t t|   || _|| _|| _tj| j_|| _	d S r$   )
rF   r   r#   r   r   r   sysr   r    rh   )r"   r   r   r   rh   rI   r   r
   r#   .  s    
zAccumulatingWorker.__init__c                 C   sl   z\z|    W nB ty6   td| jj| jj Y n tyP   t	d Y n0 W | 
  n
| 
  0 d S )Nz,%s interrupted after processing %d documentsz'worker encountered unexpected exception)_runr   r'   r(   r)   r*   r   r&   	ExceptionZ	exceptionreply_to_masterr%   r   r   r
   run6  s    zAccumulatingWorker.runc                 C   sz   d}d}|d7 }| j jdd}|d u r2td qd| j|| j |t|7 }td||| jj qtd|| jj d S )	Nr{   r   rb   Tr   z$observed sentinel value; terminatingz7completed batch %d; %d documents processed (%d virtual)z9finished all batches; %d documents processed (%d virtual))	r   r   r'   debugr   ry   rh   r   r&   )r"   r   Zn_docsZdocsr   r   r
   r   B  s"    

zAccumulatingWorker._runc                 C   s*   t d | jj| jdd t d d S )Nz.serializing accumulator to return to master...Fr   zaccumulator serialized)r'   r(   r   r   r   r%   r   r   r
   r   V  s    
z"AccumulatingWorker.reply_to_master)	r*   r@   rA   rB   r#   r   r   r   rO   r   r   rI   r
   r   +  s
   r   c                       sR   e Zd ZdZd fdd	Zdd Zdd Zd	d
 Zdd Zdd Z	dd Z
  ZS )WordVectorsAccumulatora  Accumulate context vectors for words using word vector embeddings.

    Attributes
    ----------
    model: Word2Vec (:class:`~gensim.models.keyedvectors.KeyedVectors`)
        If None, a new Word2Vec model is trained on the given text corpus. Otherwise,
        it should be a pre-trained Word2Vec context vectors.
    model_kwargs:
        if model is None, these keyword arguments will be passed through to the Word2Vec constructor.
    Nc                    s"   t t| || || _|| _d S r$   )rF   r   r#   modelmodel_kwargs)r"   r   r   r   r   rI   r   r
   r#   h  s    zWordVectorsAccumulator.__init__c                    s$   t t|}t  fdd|D S )Nc                 3   s   | ]}| j vr|V  qd S r$   r   r   r   r%   r   r
   r`   o  r   z6WordVectorsAccumulator.not_in_vocab.<locals>.<genexpr>)r   r   Zflatten)r"   wordsZ
uniq_wordsr   r%   r
   not_in_vocabm  s    z#WordVectorsAccumulator.not_in_vocabc                 C   s<   z| j |  W n ty,   | jj| }Y n0 | j|dS )r7   rk   )r   rJ   r   r   r   Zget_vecattr)r"   r   r   r   r
   r3   q  s
    z&WordVectorsAccumulator.get_occurrencesc                 C   s   t ddS )r;   z6Word2Vec model does not support co-occurrence countingNr+   )r"   rM   rN   r   r   r
   r4   y  s    z)WordVectorsAccumulator.get_co_occurrencesc                 C   s   | j d urtd | S | j }|d ur2||d< |dd|d< |dd|d< |dd|d< tf i || _ | j | | j j|| j j	| j j
d	 | j j| _ | S )
Nz3model is already trained; no accumulation necessaryrp   Z	min_countrb   ZsgZhwr   Zhs)Ztotal_examplesepochs)r   r'   r   r   copyr   r   Zbuild_vocabZtrainZcorpus_countr   Zwv)r"   rg   rh   r   r   r   r
   rc   }  s    



z!WordVectorsAccumulator.accumulatec                 C   s"   |  |}|  |}| j||S r$   )_words_with_embeddingsr   Zn_similarity)r"   Zids1Zids2Zwords1Zwords2r   r   r
   ids_similarity  s    

z%WordVectorsAccumulator.ids_similarityc                    s4   t |ds|g} fdd|D } fdd|D S )Nr0   c                    s   g | ]} j j| qS r   )r   r   )r   r   r%   r   r
   rR     r   zAWordVectorsAccumulator._words_with_embeddings.<locals>.<listcomp>c                    s   g | ]}| j v r|qS r   r   r   r%   r   r
   rR     r   )r2   )r"   r   r   r   r%   r
   r     s    
z-WordVectorsAccumulator._words_with_embeddings)N)r*   r@   rA   rB   r#   r   r3   r4   rc   r   r   rO   r   r   rI   r
   r   \  s   r   )!rB   r   ZloggingZmultiprocessingr   r   collectionsr   ZnumpyrS   Zscipy.sparseZsparsers   Zgensimr   Zgensim.models.word2vecr   Z	getLoggerr*   r'   r   r   rE   rP   r]   rd   ro   rq   r   r   ZProcessr   r   r   r   r   r
   <module>   s.   
-OC(,	T 1