Skip to content

Commit

Permalink
deploy: adf6dc7
Browse files Browse the repository at this point in the history
  • Loading branch information
Demirrr committed Nov 26, 2024
1 parent 8b5935d commit 60f16c1
Show file tree
Hide file tree
Showing 12 changed files with 54 additions and 23 deletions.
11 changes: 6 additions & 5 deletions _modules/dicee/models/ensemble.html
Original file line number Diff line number Diff line change
Expand Up @@ -91,11 +91,6 @@ <h1>Source code for dicee.models.ensemble</h1><div class="highlight"><pre>
<span></span><span class="kn">import</span> <span class="nn">torch</span>
<span class="kn">import</span> <span class="nn">copy</span>

<span class="kn">import</span> <span class="nn">torch._dynamo</span>

<span class="n">torch</span><span class="o">.</span><span class="n">_dynamo</span><span class="o">.</span><span class="n">config</span><span class="o">.</span><span class="n">suppress_errors</span> <span class="o">=</span> <span class="kc">True</span>


<div class="viewcode-block" id="EnsembleKGE">
<a class="viewcode-back" href="../../../autoapi/dicee/models/ensemble/index.html#dicee.EnsembleKGE">[docs]</a>
<span class="k">class</span> <span class="nc">EnsembleKGE</span><span class="p">:</span>
Expand Down Expand Up @@ -209,6 +204,12 @@ <h1>Source code for dicee.models.ensemble</h1><div class="highlight"><pre>
<span class="k">for</span> <span class="n">opt</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">optimizers</span><span class="p">:</span>
<span class="n">opt</span><span class="o">.</span><span class="n">step</span><span class="p">()</span></div>


<div class="viewcode-block" id="EnsembleKGE.get_embeddings">
<a class="viewcode-back" href="../../../autoapi/dicee/models/ensemble/index.html#dicee.EnsembleKGE.get_embeddings">[docs]</a>
<span class="k">def</span> <span class="nf">get_embeddings</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="k">raise</span> <span class="ne">NotImplementedError</span><span class="p">(</span><span class="s2">&quot;Not yet Implemented&quot;</span><span class="p">)</span></div>

<span class="w"> </span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> def __getattr__(self, name):</span>
Expand Down
31 changes: 18 additions & 13 deletions _modules/dicee/trainer/model_parallelism.html
Original file line number Diff line number Diff line change
Expand Up @@ -118,11 +118,15 @@ <h1>Source code for dicee.trainer.model_parallelism</h1><div class="highlight"><

<div class="viewcode-block" id="find_good_batch_size">
<a class="viewcode-back" href="../../../autoapi/dicee/trainer/model_parallelism/index.html#dicee.trainer.model_parallelism.find_good_batch_size">[docs]</a>
<span class="k">def</span> <span class="nf">find_good_batch_size</span><span class="p">(</span><span class="n">train_loader</span><span class="p">,</span><span class="n">ensemble_model</span><span class="p">,</span><span class="n">max_available_gpu_memory</span><span class="p">:</span><span class="nb">float</span><span class="o">=</span><span class="mf">0.05</span><span class="p">):</span>
<span class="k">def</span> <span class="nf">find_good_batch_size</span><span class="p">(</span><span class="n">train_loader</span><span class="p">,</span><span class="n">ensemble_model</span><span class="p">,</span> <span class="n">max_available_gpu_memory</span><span class="p">:</span><span class="nb">float</span><span class="o">=</span><span class="mf">0.1</span><span class="p">):</span>
<span class="c1"># () Initial batch size</span>
<span class="n">batch_size</span><span class="o">=</span><span class="n">train_loader</span><span class="o">.</span><span class="n">batch_size</span>
<span class="k">if</span> <span class="n">batch_size</span> <span class="o">&gt;=</span> <span class="nb">len</span><span class="p">(</span><span class="n">train_loader</span><span class="o">.</span><span class="n">dataset</span><span class="p">):</span>
<span class="k">return</span> <span class="n">batch_size</span>
<span class="n">first_batch_size</span> <span class="o">=</span> <span class="n">train_loader</span><span class="o">.</span><span class="n">batch_size</span>

<span class="nb">print</span><span class="p">(</span><span class="s2">&quot;Automatic batch size finding&quot;</span><span class="p">)</span>
<span class="k">for</span> <span class="n">n</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="mi">200</span><span class="p">):</span>
<span class="k">while</span> <span class="kc">True</span><span class="p">:</span>
<span class="c1"># () Initialize a dataloader with a current batch_size</span>
<span class="n">train_dataloaders</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">utils</span><span class="o">.</span><span class="n">data</span><span class="o">.</span><span class="n">DataLoader</span><span class="p">(</span><span class="n">train_loader</span><span class="o">.</span><span class="n">dataset</span><span class="p">,</span>
<span class="n">batch_size</span><span class="o">=</span><span class="n">batch_size</span><span class="p">,</span>
Expand All @@ -136,25 +140,26 @@ <h1>Source code for dicee.trainer.model_parallelism</h1><div class="highlight"><
<span class="n">worker_init_fn</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
<span class="n">persistent_workers</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>
<span class="n">loss</span><span class="o">=</span><span class="kc">None</span>
<span class="n">avg_global_free_memory</span><span class="o">=</span><span class="p">[]</span>
<span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">z</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">train_dataloaders</span><span class="p">):</span>
<span class="n">loss</span> <span class="o">=</span> <span class="n">forward_backward_update_loss</span><span class="p">(</span><span class="n">z</span><span class="p">,</span><span class="n">ensemble_model</span><span class="p">)</span>
<span class="k">break</span>
<span class="n">global_free_memory</span><span class="p">,</span> <span class="n">total_memory</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">mem_get_info</span><span class="p">()</span>
<span class="n">available_gpu_memory</span> <span class="o">=</span> <span class="n">global_free_memory</span> <span class="o">/</span> <span class="n">total_memory</span>
<span class="nb">print</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;Random Batch Loss: </span><span class="si">{</span><span class="n">loss</span><span class="si">}</span><span class="se">\t</span><span class="s2">Free/Total GPU Memory: </span><span class="si">{</span><span class="n">available_gpu_memory</span><span class="si">}</span><span class="se">\t</span><span class="s2">Batch Size:</span><span class="si">{</span><span class="n">batch_size</span><span class="si">}</span><span class="s2">&quot;</span><span class="p">)</span>
<span class="n">global_free_memory</span><span class="p">,</span> <span class="n">total_memory</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">mem_get_info</span><span class="p">()</span>
<span class="n">avg_global_free_memory</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">global_free_memory</span> <span class="o">/</span> <span class="n">total_memory</span><span class="p">)</span>
<span class="k">if</span> <span class="n">i</span><span class="o">==</span><span class="mi">3</span><span class="p">:</span>
<span class="k">break</span>

<span class="n">avg_global_free_memory</span><span class="o">=</span><span class="nb">sum</span><span class="p">(</span><span class="n">avg_global_free_memory</span><span class="p">)</span><span class="o">/</span><span class="nb">len</span><span class="p">(</span><span class="n">avg_global_free_memory</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;Random Batch Loss: </span><span class="si">{</span><span class="n">loss</span><span class="si">}</span><span class="se">\t</span><span class="s2">Free/Total GPU Memory: </span><span class="si">{</span><span class="n">avg_global_free_memory</span><span class="si">}</span><span class="se">\t</span><span class="s2">Batch Size:</span><span class="si">{</span><span class="n">batch_size</span><span class="si">}</span><span class="s2">&quot;</span><span class="p">)</span>
<span class="c1"># () Stepping criterion</span>
<span class="k">if</span> <span class="n">available_gpu_memory</span> <span class="o">&gt;</span> <span class="n">max_available_gpu_memory</span> <span class="ow">and</span> <span class="n">batch_size</span> <span class="o">&lt;</span> <span class="nb">len</span><span class="p">(</span><span class="n">train_loader</span><span class="o">.</span><span class="n">dataset</span><span class="p">)</span> <span class="p">:</span>
<span class="k">if</span> <span class="n">avg_global_free_memory</span> <span class="o">&gt;</span> <span class="n">max_available_gpu_memory</span> <span class="ow">and</span> <span class="n">batch_size</span> <span class="o">&lt;</span> <span class="nb">len</span><span class="p">(</span><span class="n">train_loader</span><span class="o">.</span><span class="n">dataset</span><span class="p">)</span> <span class="p">:</span>
<span class="c1"># Increment the current batch size</span>
<span class="n">batch_size</span><span class="o">+=</span><span class="n">batch_size</span>
<span class="n">batch_size</span><span class="o">+=</span><span class="n">first_batch_size</span>
<span class="k">else</span><span class="p">:</span>
<span class="k">if</span> <span class="n">batch_size</span> <span class="o">&gt;=</span> <span class="nb">len</span><span class="p">(</span><span class="n">train_loader</span><span class="o">.</span><span class="n">dataset</span><span class="p">):</span>
<span class="nb">print</span><span class="p">(</span><span class="s2">&quot;Batch size equals to the training dataset size&quot;</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="nb">print</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;Max GPU memory used</span><span class="se">\t</span><span class="s2">Free/Total GPU Memory:</span><span class="si">{</span><span class="n">available_gpu_memory</span><span class="si">}</span><span class="s2">&quot;</span><span class="p">)</span>

<span class="k">return</span> <span class="n">batch_size</span>

<span class="k">raise</span> <span class="ne">RuntimeError</span><span class="p">(</span><span class="s2">&quot;The computation should be here!&quot;</span><span class="p">)</span></div>
<span class="nb">print</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;Max GPU memory used</span><span class="se">\t</span><span class="s2">Free/Total GPU Memory:</span><span class="si">{</span><span class="n">avg_global_free_memory</span><span class="si">}</span><span class="s2">&quot;</span><span class="p">)</span>
<span class="k">return</span> <span class="n">batch_size</span></div>


<div class="viewcode-block" id="forward_backward_update_loss">
Expand Down
5 changes: 5 additions & 0 deletions _sources/autoapi/dicee/index.rst.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2047,6 +2047,11 @@ Package Contents
.. py:method:: step()
.. py:method:: get_embeddings()
:abstractmethod:



.. py:method:: __str__()
Expand Down
5 changes: 5 additions & 0 deletions _sources/autoapi/dicee/models/ensemble/index.rst.txt
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,11 @@ Module Contents
.. py:method:: step()
.. py:method:: get_embeddings()
:abstractmethod:



.. py:method:: __str__()
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ Module Contents

.. py:function:: extract_input_outputs(z: list, device=None)
.. py:function:: find_good_batch_size(train_loader, ensemble_model, max_available_gpu_memory: float = 0.05)
.. py:function:: find_good_batch_size(train_loader, ensemble_model, max_available_gpu_memory: float = 0.1)
.. py:function:: forward_backward_update_loss(z: Tuple, ensemble_model)
Expand Down
6 changes: 6 additions & 0 deletions autoapi/dicee/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -401,6 +401,7 @@
<li class="toctree-l4"><a class="reference internal" href="#dicee.EnsembleKGE.mem_of_model"><code class="docutils literal notranslate"><span class="pre">EnsembleKGE.mem_of_model()</span></code></a></li>
<li class="toctree-l4"><a class="reference internal" href="#dicee.EnsembleKGE.__call__"><code class="docutils literal notranslate"><span class="pre">EnsembleKGE.__call__()</span></code></a></li>
<li class="toctree-l4"><a class="reference internal" href="#dicee.EnsembleKGE.step"><code class="docutils literal notranslate"><span class="pre">EnsembleKGE.step()</span></code></a></li>
<li class="toctree-l4"><a class="reference internal" href="#dicee.EnsembleKGE.get_embeddings"><code class="docutils literal notranslate"><span class="pre">EnsembleKGE.get_embeddings()</span></code></a></li>
<li class="toctree-l4"><a class="reference internal" href="#dicee.EnsembleKGE.__str__"><code class="docutils literal notranslate"><span class="pre">EnsembleKGE.__str__()</span></code></a></li>
</ul>
</li>
Expand Down Expand Up @@ -3259,6 +3260,11 @@ <h3>Output<a class="headerlink" href="#id9" title="Link to this heading"></a>
<span class="sig-name descname"><span class="pre">step</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="reference internal" href="../../_modules/dicee/models/ensemble.html#EnsembleKGE.step"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#dicee.EnsembleKGE.step" title="Link to this definition"></a></dt>
<dd></dd></dl>

<dl class="py method">
<dt class="sig sig-object py" id="dicee.EnsembleKGE.get_embeddings">
<em class="property"><span class="pre">abstract</span><span class="w"> </span></em><span class="sig-name descname"><span class="pre">get_embeddings</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="reference internal" href="../../_modules/dicee/models/ensemble.html#EnsembleKGE.get_embeddings"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#dicee.EnsembleKGE.get_embeddings" title="Link to this definition"></a></dt>
<dd></dd></dl>

<dl class="py method">
<dt class="sig sig-object py" id="dicee.EnsembleKGE.__str__">
<span class="sig-name descname"><span class="pre">__str__</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="reference internal" href="../../_modules/dicee/models/ensemble.html#EnsembleKGE.__str__"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#dicee.EnsembleKGE.__str__" title="Link to this definition"></a></dt>
Expand Down
5 changes: 5 additions & 0 deletions autoapi/dicee/models/ensemble/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -243,6 +243,11 @@ <h2>Module Contents<a class="headerlink" href="#module-contents" title="Link to
<span class="sig-name descname"><span class="pre">step</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="reference internal" href="../../../../_modules/dicee/models/ensemble.html#EnsembleKGE.step"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#dicee.models.ensemble.EnsembleKGE.step" title="Link to this definition"></a></dt>
<dd></dd></dl>

<dl class="py method">
<dt class="sig sig-object py" id="dicee.models.ensemble.EnsembleKGE.get_embeddings">
<em class="property"><span class="pre">abstract</span><span class="w"> </span></em><span class="sig-name descname"><span class="pre">get_embeddings</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="reference internal" href="../../../../_modules/dicee/models/ensemble.html#EnsembleKGE.get_embeddings"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#dicee.models.ensemble.EnsembleKGE.get_embeddings" title="Link to this definition"></a></dt>
<dd></dd></dl>

<dl class="py method">
<dt class="sig sig-object py" id="dicee.models.ensemble.EnsembleKGE.__str__">
<span class="sig-name descname"><span class="pre">__str__</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="reference internal" href="../../../../_modules/dicee/models/ensemble.html#EnsembleKGE.__str__"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#dicee.models.ensemble.EnsembleKGE.__str__" title="Link to this definition"></a></dt>
Expand Down
2 changes: 1 addition & 1 deletion autoapi/dicee/trainer/model_parallelism/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ <h2>Module Contents<a class="headerlink" href="#module-contents" title="Link to

<dl class="py function">
<dt class="sig sig-object py" id="dicee.trainer.model_parallelism.find_good_batch_size">
<span class="sig-prename descclassname"><span class="pre">dicee.trainer.model_parallelism.</span></span><span class="sig-name descname"><span class="pre">find_good_batch_size</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">train_loader</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">ensemble_model</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_available_gpu_memory</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">float</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">0.05</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../../../../_modules/dicee/trainer/model_parallelism.html#find_good_batch_size"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#dicee.trainer.model_parallelism.find_good_batch_size" title="Link to this definition"></a></dt>
<span class="sig-prename descclassname"><span class="pre">dicee.trainer.model_parallelism.</span></span><span class="sig-name descname"><span class="pre">find_good_batch_size</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">train_loader</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">ensemble_model</span></span></em>, <em class="sig-param"><span class="n"><span class="pre">max_available_gpu_memory</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><span class="pre">float</span></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">0.1</span></span></em><span class="sig-paren">)</span><a class="reference internal" href="../../../../_modules/dicee/trainer/model_parallelism.html#find_good_batch_size"><span class="viewcode-link"><span class="pre">[source]</span></span></a><a class="headerlink" href="#dicee.trainer.model_parallelism.find_good_batch_size" title="Link to this definition"></a></dt>
<dd></dd></dl>

<dl class="py function">
Expand Down
Binary file modified diceembeddings.pdf
Binary file not shown.
Loading

0 comments on commit 60f16c1

Please sign in to comment.