From a7ecfc7d878874763de989b9a070dfe23e7cb3ac Mon Sep 17 00:00:00 2001 From: husein zolkepli Date: Fri, 10 Jul 2020 12:57:08 +0800 Subject: [PATCH] fix pagerank --- docs/GPU.rst | 4 +- docs/gpu-environment.rst | 155 +++++++------- docs/running-on-windows.ipynb | 8 +- docs/running-on-windows.rst | 6 + example/gpu-environment/README.rst | 155 +++++++------- example/gpu-environment/gpu-environment.ipynb | 190 +++++++++--------- malaya/graph/pagerank.py | 11 +- 7 files changed, 269 insertions(+), 260 deletions(-) diff --git a/docs/GPU.rst b/docs/GPU.rst index 156f7f6c..300cf632 100644 --- a/docs/GPU.rst +++ b/docs/GPU.rst @@ -16,9 +16,11 @@ After that simply install gpu version, GPU Version Benefit -------------------- -1. Different models different GPUs. +1. Limit GPU memory. 2. Automatically try to use cugraph for any networkx functions. +**We will add more GPU benefits in the future**. + Different models different GPUs ---------------------------------- diff --git a/docs/gpu-environment.rst b/docs/gpu-environment.rst index b6769baf..a2161275 100644 --- a/docs/gpu-environment.rst +++ b/docs/gpu-environment.rst @@ -1,26 +1,27 @@ -One model always consumed one unit of gpu. For now we do not support -distributed batch processing to multiple GPUs from one model. But we can -initiate multiple models to multiple GPUs. +.. code:: python -model_emotion -> GPU0 + %%time + + import malaya -model_sentiment -> GPU1 -model_translation -> GPU2 +.. parsed-literal:: + + CPU times: user 5.79 s, sys: 2.45 s, total: 8.24 s + Wall time: 3.63 s -and so on. .. code:: python - %%time - - import malaya + malaya.gpu_available() + + .. parsed-literal:: - CPU times: user 5.94 s, sys: 2.35 s, total: 8.29 s - Wall time: 4.01 s + True + .. code:: python @@ -30,7 +31,7 @@ and so on. .. parsed-literal:: - Tue Jul 7 21:32:37 2020 + Fri Jul 10 12:39:26 2020 +-----------------------------------------------------------------------------+ | NVIDIA-SMI 410.129 Driver Version: 410.129 CUDA Version: 10.0 | |-------------------------------+----------------------+----------------------+ @@ -38,25 +39,23 @@ and so on. | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | |===============================+======================+======================| | 0 Tesla V100-DGXS... On | 00000000:07:00.0 On | 0 | - | N/A 55C P0 42W / 300W | 0MiB / 32475MiB | 0% Default | + | N/A 43C P0 39W / 300W | 0MiB / 32475MiB | 0% Default | +-------------------------------+----------------------+----------------------+ | 1 Tesla V100-DGXS... On | 00000000:08:00.0 Off | 0 | - | N/A 65C P0 250W / 300W | 31452MiB / 32478MiB | 89% Default | + | N/A 45C P0 39W / 300W | 0MiB / 32478MiB | 0% Default | +-------------------------------+----------------------+----------------------+ | 2 Tesla V100-DGXS... On | 00000000:0E:00.0 Off | 0 | - | N/A 63C P0 270W / 300W | 31452MiB / 32478MiB | 92% Default | + | N/A 44C P0 38W / 300W | 0MiB / 32478MiB | 0% Default | +-------------------------------+----------------------+----------------------+ | 3 Tesla V100-DGXS... On | 00000000:0F:00.0 Off | 0 | - | N/A 63C P0 252W / 300W | 31452MiB / 32478MiB | 77% Default | + | N/A 44C P0 40W / 300W | 0MiB / 32478MiB | 0% Default | +-------------------------------+----------------------+----------------------+ +-----------------------------------------------------------------------------+ | Processes: GPU Memory | | GPU PID Type Process name Usage | |=============================================================================| - | 1 11646 C python3 31431MiB | - | 2 11646 C python3 31431MiB | - | 3 11646 C python3 31431MiB | + | No running processes found | +-----------------------------------------------------------------------------+ @@ -65,18 +64,18 @@ Right now all the GPUs in resting mode, no computation happened. GPU Rules --------- -1. By default, all models will initiate in first GPU, unless override - ``gpu`` parameter in any load model API. Example as below. -2. Malaya will not consumed all available GPU memory, but slowly grow +1. Malaya will not consumed all available GPU memory, but slowly grow based on batch size. This growth only towards positive (use more GPU memory) dynamically, but will not reduce GPU memory if feed small batch size. -3. Use ``malaya.clear_session`` to clear session from unused models but +2. Use ``malaya.clear_session`` to clear session from unused models but this will not free GPU memory. -4. By default Malaya will not set max cap for GPU memory, to put a cap, +3. By default Malaya will not set max cap for GPU memory, to put a cap, override ``gpu_limit`` parameter in any load model API. ``gpu_limit`` should 0 < ``gpu_limit`` < 1. If ``gpu_limit = 0.3``, it means the model will not use more than 30% of GPU memory. +4. Even if you installed Malaya CPU version, it will always to load the + models in GPU first, if failed, it will load it in CPU. .. code:: python @@ -89,20 +88,20 @@ GPU Rules .. code:: python - model = malaya.emotion.transformer(model = 'bert', gpu = '0') + model = malaya.emotion.transformer(model = 'bert', gpu_limit = 0.5) .. parsed-literal:: - WARNING:tensorflow:From /home/husein/malaya/Malaya/malaya/function/__init__.py:61: The name tf.gfile.GFile is deprecated. Please use tf.io.gfile.GFile instead. + WARNING:tensorflow:From /home/husein/malaya/Malaya/malaya/function/__init__.py:72: The name tf.gfile.GFile is deprecated. Please use tf.io.gfile.GFile instead. - WARNING:tensorflow:From /home/husein/malaya/Malaya/malaya/function/__init__.py:62: The name tf.GraphDef is deprecated. Please use tf.compat.v1.GraphDef instead. + WARNING:tensorflow:From /home/husein/malaya/Malaya/malaya/function/__init__.py:73: The name tf.GraphDef is deprecated. Please use tf.compat.v1.GraphDef instead. - WARNING:tensorflow:From /home/husein/malaya/Malaya/malaya/function/__init__.py:50: The name tf.GPUOptions is deprecated. Please use tf.compat.v1.GPUOptions instead. + WARNING:tensorflow:From /home/husein/malaya/Malaya/malaya/function/__init__.py:58: The name tf.GPUOptions is deprecated. Please use tf.compat.v1.GPUOptions instead. - WARNING:tensorflow:From /home/husein/malaya/Malaya/malaya/function/__init__.py:51: The name tf.ConfigProto is deprecated. Please use tf.compat.v1.ConfigProto instead. + WARNING:tensorflow:From /home/husein/malaya/Malaya/malaya/function/__init__.py:61: The name tf.ConfigProto is deprecated. Please use tf.compat.v1.ConfigProto instead. - WARNING:tensorflow:From /home/husein/malaya/Malaya/malaya/function/__init__.py:53: The name tf.InteractiveSession is deprecated. Please use tf.compat.v1.InteractiveSession instead. + WARNING:tensorflow:From /home/husein/malaya/Malaya/malaya/function/__init__.py:63: The name tf.InteractiveSession is deprecated. Please use tf.compat.v1.InteractiveSession instead. @@ -117,50 +116,50 @@ GPU Rules .. parsed-literal:: - CPU times: user 1.94 s, sys: 541 ms, total: 2.48 s - Wall time: 2.52 s + CPU times: user 1.8 s, sys: 504 ms, total: 2.3 s + Wall time: 2.3 s .. parsed-literal:: - [{'anger': 0.9998965, - 'fear': 1.7692768e-05, - 'happy': 1.8747674e-05, - 'love': 1.656881e-05, - 'sadness': 3.130815e-05, - 'surprise': 1.9183277e-05}, - {'anger': 7.4469484e-05, - 'fear': 0.99977416, - 'happy': 6.824215e-05, - 'love': 2.773282e-05, - 'sadness': 1.9767067e-05, - 'surprise': 3.5663204e-05}, - {'anger': 0.99963737, - 'fear': 3.931449e-05, - 'happy': 0.0001562279, - 'love': 3.3580774e-05, - 'sadness': 0.00011328616, - 'surprise': 2.0134145e-05}, - {'anger': 3.1319763e-05, - 'fear': 1.7286226e-05, - 'happy': 2.9899325e-05, - 'love': 0.99987257, - 'sadness': 2.7867774e-05, - 'surprise': 2.096328e-05}, - {'anger': 8.965934e-05, - 'fear': 1.8196944e-05, - 'happy': 2.9275663e-05, - 'love': 1.7211949e-05, - 'sadness': 0.9998247, - 'surprise': 2.0944033e-05}, - {'anger': 4.132152e-05, - 'fear': 6.202527e-05, - 'happy': 3.1012056e-05, - 'love': 5.3896296e-05, - 'sadness': 6.202101e-05, - 'surprise': 0.9997497}] + [{'anger': 0.99989223, + 'fear': 1.5843118e-05, + 'happy': 1.660186e-05, + 'love': 1.9634477e-05, + 'sadness': 3.827092e-05, + 'surprise': 1.7427232e-05}, + {'anger': 4.894743e-05, + 'fear': 0.999795, + 'happy': 6.764499e-05, + 'love': 3.6289443e-05, + 'sadness': 1.9702624e-05, + 'surprise': 3.2430926e-05}, + {'anger': 0.9997905, + 'fear': 2.5795038e-05, + 'happy': 6.7572015e-05, + 'love': 2.6636817e-05, + 'sadness': 6.734582e-05, + 'surprise': 2.2285754e-05}, + {'anger': 2.4449551e-05, + 'fear': 2.6033362e-05, + 'happy': 3.1518703e-05, + 'love': 0.9998758, + 'sadness': 1.895303e-05, + 'surprise': 2.326243e-05}, + {'anger': 8.095824e-05, + 'fear': 2.3824483e-05, + 'happy': 2.1045413e-05, + 'love': 1.6150812e-05, + 'sadness': 0.99983835, + 'surprise': 1.9708685e-05}, + {'anger': 4.470948e-05, + 'fear': 0.00010641558, + 'happy': 2.9055469e-05, + 'love': 4.5270677e-05, + 'sadness': 5.7159534e-05, + 'surprise': 0.9997173}] @@ -171,7 +170,7 @@ GPU Rules .. parsed-literal:: - Tue Jul 7 21:32:57 2020 + Fri Jul 10 12:39:56 2020 +-----------------------------------------------------------------------------+ | NVIDIA-SMI 410.129 Driver Version: 410.129 CUDA Version: 10.0 | |-------------------------------+----------------------+----------------------+ @@ -179,26 +178,26 @@ GPU Rules | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | |===============================+======================+======================| | 0 Tesla V100-DGXS... On | 00000000:07:00.0 On | 0 | - | N/A 56C P0 58W / 300W | 1099MiB / 32475MiB | 0% Default | + | N/A 44C P0 54W / 300W | 1099MiB / 32475MiB | 0% Default | +-------------------------------+----------------------+----------------------+ | 1 Tesla V100-DGXS... On | 00000000:08:00.0 Off | 0 | - | N/A 64C P0 219W / 300W | 31452MiB / 32478MiB | 99% Default | + | N/A 45C P0 52W / 300W | 418MiB / 32478MiB | 0% Default | +-------------------------------+----------------------+----------------------+ | 2 Tesla V100-DGXS... On | 00000000:0E:00.0 Off | 0 | - | N/A 62C P0 248W / 300W | 31452MiB / 32478MiB | 99% Default | + | N/A 44C P0 51W / 300W | 418MiB / 32478MiB | 0% Default | +-------------------------------+----------------------+----------------------+ | 3 Tesla V100-DGXS... On | 00000000:0F:00.0 Off | 0 | - | N/A 62C P0 236W / 300W | 31452MiB / 32478MiB | 76% Default | + | N/A 45C P0 54W / 300W | 418MiB / 32478MiB | 0% Default | +-------------------------------+----------------------+----------------------+ +-----------------------------------------------------------------------------+ | Processes: GPU Memory | | GPU PID Type Process name Usage | |=============================================================================| - | 0 2536 C /usr/bin/python3 1087MiB | - | 1 11646 C python3 31431MiB | - | 2 11646 C python3 31431MiB | - | 3 11646 C python3 31431MiB | + | 0 35310 C /usr/bin/python3 1087MiB | + | 1 35310 C /usr/bin/python3 407MiB | + | 2 35310 C /usr/bin/python3 407MiB | + | 3 35310 C /usr/bin/python3 407MiB | +-----------------------------------------------------------------------------+ diff --git a/docs/running-on-windows.ipynb b/docs/running-on-windows.ipynb index e8da1337..f1861e3a 100644 --- a/docs/running-on-windows.ipynb +++ b/docs/running-on-windows.ipynb @@ -23,7 +23,13 @@ "source": [ "## Unable to use any T5 models\n", "\n", - "T5 depends on tensorflow-text, currently there is no official tensorflow-text binary released for Windows. So no T5 model for Windows users." + "T5 depends on tensorflow-text, currently there is no official tensorflow-text binary released for Windows. So no T5 model for Windows users.\n", + "\n", + "List T5 models,\n", + "\n", + "1. [malaya.summarization.abstractive.t5](https://malaya.readthedocs.io/en/latest/Abstractive.html#load-t5)\n", + "2. [malaya.generator.t5](https://malaya.readthedocs.io/en/latest/Generator.html#load-t5)\n", + "3. [malaya.paraphrase.t5](https://malaya.readthedocs.io/en/latest/Paraphrase.html#load-t5-models)" ] }, { diff --git a/docs/running-on-windows.rst b/docs/running-on-windows.rst index 843994c8..810b0d43 100644 --- a/docs/running-on-windows.rst +++ b/docs/running-on-windows.rst @@ -20,6 +20,12 @@ T5 depends on tensorflow-text, currently there is no official tensorflow-text binary released for Windows. So no T5 model for Windows users. +List T5 models, + +1. `malaya.summarization.abstractive.t5 `__ +2. `malaya.generator.t5 `__ +3. `malaya.paraphrase.t5 `__ + Lack development on Windows --------------------------- diff --git a/example/gpu-environment/README.rst b/example/gpu-environment/README.rst index e711ba0b..80692867 100644 --- a/example/gpu-environment/README.rst +++ b/example/gpu-environment/README.rst @@ -1,30 +1,27 @@ -One model always consumed one unit of gpu. For now we do not support -distributed batch processing to multiple GPUs from one model. But we can -initiate multiple models to multiple GPUs. +.. code:: ipython3 + + %%time + + import malaya -model_emotion -> GPU0 -model_sentiment -> GPU1 +.. parsed-literal:: -model_translation -> GPU2 + CPU times: user 5.79 s, sys: 2.45 s, total: 8.24 s + Wall time: 3.63 s -and so on. .. code:: ipython3 - !git pull + malaya.gpu_available() -.. code:: ipython3 - %%time - - import malaya .. parsed-literal:: - CPU times: user 5.94 s, sys: 2.35 s, total: 8.29 s - Wall time: 4.01 s + True + .. code:: ipython3 @@ -34,7 +31,7 @@ and so on. .. parsed-literal:: - Tue Jul 7 21:32:37 2020 + Fri Jul 10 12:39:26 2020 +-----------------------------------------------------------------------------+ | NVIDIA-SMI 410.129 Driver Version: 410.129 CUDA Version: 10.0 | |-------------------------------+----------------------+----------------------+ @@ -42,25 +39,23 @@ and so on. | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | |===============================+======================+======================| | 0 Tesla V100-DGXS... On | 00000000:07:00.0 On | 0 | - | N/A 55C P0 42W / 300W | 0MiB / 32475MiB | 0% Default | + | N/A 43C P0 39W / 300W | 0MiB / 32475MiB | 0% Default | +-------------------------------+----------------------+----------------------+ | 1 Tesla V100-DGXS... On | 00000000:08:00.0 Off | 0 | - | N/A 65C P0 250W / 300W | 31452MiB / 32478MiB | 89% Default | + | N/A 45C P0 39W / 300W | 0MiB / 32478MiB | 0% Default | +-------------------------------+----------------------+----------------------+ | 2 Tesla V100-DGXS... On | 00000000:0E:00.0 Off | 0 | - | N/A 63C P0 270W / 300W | 31452MiB / 32478MiB | 92% Default | + | N/A 44C P0 38W / 300W | 0MiB / 32478MiB | 0% Default | +-------------------------------+----------------------+----------------------+ | 3 Tesla V100-DGXS... On | 00000000:0F:00.0 Off | 0 | - | N/A 63C P0 252W / 300W | 31452MiB / 32478MiB | 77% Default | + | N/A 44C P0 40W / 300W | 0MiB / 32478MiB | 0% Default | +-------------------------------+----------------------+----------------------+ +-----------------------------------------------------------------------------+ | Processes: GPU Memory | | GPU PID Type Process name Usage | |=============================================================================| - | 1 11646 C python3 31431MiB | - | 2 11646 C python3 31431MiB | - | 3 11646 C python3 31431MiB | + | No running processes found | +-----------------------------------------------------------------------------+ @@ -69,18 +64,18 @@ Right now all the GPUs in resting mode, no computation happened. GPU Rules --------- -1. By default, all models will initiate in first GPU, unless override - ``gpu`` parameter in any load model API. Example as below. -2. Malaya will not consumed all available GPU memory, but slowly grow +1. Malaya will not consumed all available GPU memory, but slowly grow based on batch size. This growth only towards positive (use more GPU memory) dynamically, but will not reduce GPU memory if feed small batch size. -3. Use ``malaya.clear_session`` to clear session from unused models but +2. Use ``malaya.clear_session`` to clear session from unused models but this will not free GPU memory. -4. By default Malaya will not set max cap for GPU memory, to put a cap, +3. By default Malaya will not set max cap for GPU memory, to put a cap, override ``gpu_limit`` parameter in any load model API. ``gpu_limit`` should 0 < ``gpu_limit`` < 1. If ``gpu_limit = 0.3``, it means the model will not use more than 30% of GPU memory. +4. Even if you installed Malaya CPU version, it will always to load the + models in GPU first, if failed, it will load it in CPU. .. code:: ipython3 @@ -93,20 +88,20 @@ GPU Rules .. code:: ipython3 - model = malaya.emotion.transformer(model = 'bert', gpu = '0') + model = malaya.emotion.transformer(model = 'bert', gpu_limit = 0.5) .. parsed-literal:: - WARNING:tensorflow:From /home/husein/malaya/Malaya/malaya/function/__init__.py:61: The name tf.gfile.GFile is deprecated. Please use tf.io.gfile.GFile instead. + WARNING:tensorflow:From /home/husein/malaya/Malaya/malaya/function/__init__.py:72: The name tf.gfile.GFile is deprecated. Please use tf.io.gfile.GFile instead. - WARNING:tensorflow:From /home/husein/malaya/Malaya/malaya/function/__init__.py:62: The name tf.GraphDef is deprecated. Please use tf.compat.v1.GraphDef instead. + WARNING:tensorflow:From /home/husein/malaya/Malaya/malaya/function/__init__.py:73: The name tf.GraphDef is deprecated. Please use tf.compat.v1.GraphDef instead. - WARNING:tensorflow:From /home/husein/malaya/Malaya/malaya/function/__init__.py:50: The name tf.GPUOptions is deprecated. Please use tf.compat.v1.GPUOptions instead. + WARNING:tensorflow:From /home/husein/malaya/Malaya/malaya/function/__init__.py:58: The name tf.GPUOptions is deprecated. Please use tf.compat.v1.GPUOptions instead. - WARNING:tensorflow:From /home/husein/malaya/Malaya/malaya/function/__init__.py:51: The name tf.ConfigProto is deprecated. Please use tf.compat.v1.ConfigProto instead. + WARNING:tensorflow:From /home/husein/malaya/Malaya/malaya/function/__init__.py:61: The name tf.ConfigProto is deprecated. Please use tf.compat.v1.ConfigProto instead. - WARNING:tensorflow:From /home/husein/malaya/Malaya/malaya/function/__init__.py:53: The name tf.InteractiveSession is deprecated. Please use tf.compat.v1.InteractiveSession instead. + WARNING:tensorflow:From /home/husein/malaya/Malaya/malaya/function/__init__.py:63: The name tf.InteractiveSession is deprecated. Please use tf.compat.v1.InteractiveSession instead. @@ -121,50 +116,50 @@ GPU Rules .. parsed-literal:: - CPU times: user 1.94 s, sys: 541 ms, total: 2.48 s - Wall time: 2.52 s + CPU times: user 1.8 s, sys: 504 ms, total: 2.3 s + Wall time: 2.3 s .. parsed-literal:: - [{'anger': 0.9998965, - 'fear': 1.7692768e-05, - 'happy': 1.8747674e-05, - 'love': 1.656881e-05, - 'sadness': 3.130815e-05, - 'surprise': 1.9183277e-05}, - {'anger': 7.4469484e-05, - 'fear': 0.99977416, - 'happy': 6.824215e-05, - 'love': 2.773282e-05, - 'sadness': 1.9767067e-05, - 'surprise': 3.5663204e-05}, - {'anger': 0.99963737, - 'fear': 3.931449e-05, - 'happy': 0.0001562279, - 'love': 3.3580774e-05, - 'sadness': 0.00011328616, - 'surprise': 2.0134145e-05}, - {'anger': 3.1319763e-05, - 'fear': 1.7286226e-05, - 'happy': 2.9899325e-05, - 'love': 0.99987257, - 'sadness': 2.7867774e-05, - 'surprise': 2.096328e-05}, - {'anger': 8.965934e-05, - 'fear': 1.8196944e-05, - 'happy': 2.9275663e-05, - 'love': 1.7211949e-05, - 'sadness': 0.9998247, - 'surprise': 2.0944033e-05}, - {'anger': 4.132152e-05, - 'fear': 6.202527e-05, - 'happy': 3.1012056e-05, - 'love': 5.3896296e-05, - 'sadness': 6.202101e-05, - 'surprise': 0.9997497}] + [{'anger': 0.99989223, + 'fear': 1.5843118e-05, + 'happy': 1.660186e-05, + 'love': 1.9634477e-05, + 'sadness': 3.827092e-05, + 'surprise': 1.7427232e-05}, + {'anger': 4.894743e-05, + 'fear': 0.999795, + 'happy': 6.764499e-05, + 'love': 3.6289443e-05, + 'sadness': 1.9702624e-05, + 'surprise': 3.2430926e-05}, + {'anger': 0.9997905, + 'fear': 2.5795038e-05, + 'happy': 6.7572015e-05, + 'love': 2.6636817e-05, + 'sadness': 6.734582e-05, + 'surprise': 2.2285754e-05}, + {'anger': 2.4449551e-05, + 'fear': 2.6033362e-05, + 'happy': 3.1518703e-05, + 'love': 0.9998758, + 'sadness': 1.895303e-05, + 'surprise': 2.326243e-05}, + {'anger': 8.095824e-05, + 'fear': 2.3824483e-05, + 'happy': 2.1045413e-05, + 'love': 1.6150812e-05, + 'sadness': 0.99983835, + 'surprise': 1.9708685e-05}, + {'anger': 4.470948e-05, + 'fear': 0.00010641558, + 'happy': 2.9055469e-05, + 'love': 4.5270677e-05, + 'sadness': 5.7159534e-05, + 'surprise': 0.9997173}] @@ -175,7 +170,7 @@ GPU Rules .. parsed-literal:: - Tue Jul 7 21:32:57 2020 + Fri Jul 10 12:39:56 2020 +-----------------------------------------------------------------------------+ | NVIDIA-SMI 410.129 Driver Version: 410.129 CUDA Version: 10.0 | |-------------------------------+----------------------+----------------------+ @@ -183,26 +178,26 @@ GPU Rules | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | |===============================+======================+======================| | 0 Tesla V100-DGXS... On | 00000000:07:00.0 On | 0 | - | N/A 56C P0 58W / 300W | 1099MiB / 32475MiB | 0% Default | + | N/A 44C P0 54W / 300W | 1099MiB / 32475MiB | 0% Default | +-------------------------------+----------------------+----------------------+ | 1 Tesla V100-DGXS... On | 00000000:08:00.0 Off | 0 | - | N/A 64C P0 219W / 300W | 31452MiB / 32478MiB | 99% Default | + | N/A 45C P0 52W / 300W | 418MiB / 32478MiB | 0% Default | +-------------------------------+----------------------+----------------------+ | 2 Tesla V100-DGXS... On | 00000000:0E:00.0 Off | 0 | - | N/A 62C P0 248W / 300W | 31452MiB / 32478MiB | 99% Default | + | N/A 44C P0 51W / 300W | 418MiB / 32478MiB | 0% Default | +-------------------------------+----------------------+----------------------+ | 3 Tesla V100-DGXS... On | 00000000:0F:00.0 Off | 0 | - | N/A 62C P0 236W / 300W | 31452MiB / 32478MiB | 76% Default | + | N/A 45C P0 54W / 300W | 418MiB / 32478MiB | 0% Default | +-------------------------------+----------------------+----------------------+ +-----------------------------------------------------------------------------+ | Processes: GPU Memory | | GPU PID Type Process name Usage | |=============================================================================| - | 0 2536 C /usr/bin/python3 1087MiB | - | 1 11646 C python3 31431MiB | - | 2 11646 C python3 31431MiB | - | 3 11646 C python3 31431MiB | + | 0 35310 C /usr/bin/python3 1087MiB | + | 1 35310 C /usr/bin/python3 407MiB | + | 2 35310 C /usr/bin/python3 407MiB | + | 3 35310 C /usr/bin/python3 407MiB | +-----------------------------------------------------------------------------+ diff --git a/example/gpu-environment/gpu-environment.ipynb b/example/gpu-environment/gpu-environment.ipynb index a4e49a8d..45cb7fdc 100644 --- a/example/gpu-environment/gpu-environment.ipynb +++ b/example/gpu-environment/gpu-environment.ipynb @@ -1,29 +1,5 @@ { "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "One model always consumed one unit of gpu. For now we do not support distributed batch processing to multiple GPUs from one model. But we can initiate multiple models to multiple GPUs.\n", - "\n", - "model_emotion -> GPU0\n", - "\n", - "model_sentiment -> GPU1\n", - "\n", - "model_translation -> GPU2\n", - "\n", - "and so on." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!git pull" - ] - }, { "cell_type": "code", "execution_count": 1, @@ -33,8 +9,8 @@ "name": "stdout", "output_type": "stream", "text": [ - "CPU times: user 5.94 s, sys: 2.35 s, total: 8.29 s\n", - "Wall time: 4.01 s\n" + "CPU times: user 5.79 s, sys: 2.45 s, total: 8.24 s\n", + "Wall time: 3.63 s\n" ] } ], @@ -47,6 +23,26 @@ { "cell_type": "code", "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "True" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "malaya.gpu_available()" + ] + }, + { + "cell_type": "code", + "execution_count": 3, "metadata": { "scrolled": true }, @@ -55,7 +51,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Tue Jul 7 21:32:37 2020 \n", + "Fri Jul 10 12:39:26 2020 \n", "+-----------------------------------------------------------------------------+\n", "| NVIDIA-SMI 410.129 Driver Version: 410.129 CUDA Version: 10.0 |\n", "|-------------------------------+----------------------+----------------------+\n", @@ -63,25 +59,23 @@ "| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n", "|===============================+======================+======================|\n", "| 0 Tesla V100-DGXS... On | 00000000:07:00.0 On | 0 |\n", - "| N/A 55C P0 42W / 300W | 0MiB / 32475MiB | 0% Default |\n", + "| N/A 43C P0 39W / 300W | 0MiB / 32475MiB | 0% Default |\n", "+-------------------------------+----------------------+----------------------+\n", "| 1 Tesla V100-DGXS... On | 00000000:08:00.0 Off | 0 |\n", - "| N/A 65C P0 250W / 300W | 31452MiB / 32478MiB | 89% Default |\n", + "| N/A 45C P0 39W / 300W | 0MiB / 32478MiB | 0% Default |\n", "+-------------------------------+----------------------+----------------------+\n", "| 2 Tesla V100-DGXS... On | 00000000:0E:00.0 Off | 0 |\n", - "| N/A 63C P0 270W / 300W | 31452MiB / 32478MiB | 92% Default |\n", + "| N/A 44C P0 38W / 300W | 0MiB / 32478MiB | 0% Default |\n", "+-------------------------------+----------------------+----------------------+\n", "| 3 Tesla V100-DGXS... On | 00000000:0F:00.0 Off | 0 |\n", - "| N/A 63C P0 252W / 300W | 31452MiB / 32478MiB | 77% Default |\n", + "| N/A 44C P0 40W / 300W | 0MiB / 32478MiB | 0% Default |\n", "+-------------------------------+----------------------+----------------------+\n", " \n", "+-----------------------------------------------------------------------------+\n", "| Processes: GPU Memory |\n", "| GPU PID Type Process name Usage |\n", "|=============================================================================|\n", - "| 1 11646 C python3 31431MiB |\n", - "| 2 11646 C python3 31431MiB |\n", - "| 3 11646 C python3 31431MiB |\n", + "| No running processes found |\n", "+-----------------------------------------------------------------------------+\n" ] } @@ -103,15 +97,15 @@ "source": [ "## GPU Rules\n", "\n", - "1. By default, all models will initiate in first GPU, unless override `gpu` parameter in any load model API. Example as below.\n", - "2. Malaya will not consumed all available GPU memory, but slowly grow based on batch size. This growth only towards positive (use more GPU memory) dynamically, but will not reduce GPU memory if feed small batch size.\n", - "3. Use `malaya.clear_session` to clear session from unused models but this will not free GPU memory.\n", - "4. By default Malaya will not set max cap for GPU memory, to put a cap, override `gpu_limit` parameter in any load model API. `gpu_limit` should 0 < `gpu_limit` < 1. If `gpu_limit = 0.3`, it means the model will not use more than 30% of GPU memory." + "1. Malaya will not consumed all available GPU memory, but slowly grow based on batch size. This growth only towards positive (use more GPU memory) dynamically, but will not reduce GPU memory if feed small batch size.\n", + "2. Use `malaya.clear_session` to clear session from unused models but this will not free GPU memory.\n", + "3. By default Malaya will not set max cap for GPU memory, to put a cap, override `gpu_limit` parameter in any load model API. `gpu_limit` should 0 < `gpu_limit` < 1. If `gpu_limit = 0.3`, it means the model will not use more than 30% of GPU memory.\n", + "4. Even if you installed Malaya CPU version, it will always to load the models in GPU first, if failed, it will load it in CPU." ] }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -125,85 +119,85 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "WARNING:tensorflow:From /home/husein/malaya/Malaya/malaya/function/__init__.py:61: The name tf.gfile.GFile is deprecated. Please use tf.io.gfile.GFile instead.\n", + "WARNING:tensorflow:From /home/husein/malaya/Malaya/malaya/function/__init__.py:72: The name tf.gfile.GFile is deprecated. Please use tf.io.gfile.GFile instead.\n", "\n", - "WARNING:tensorflow:From /home/husein/malaya/Malaya/malaya/function/__init__.py:62: The name tf.GraphDef is deprecated. Please use tf.compat.v1.GraphDef instead.\n", + "WARNING:tensorflow:From /home/husein/malaya/Malaya/malaya/function/__init__.py:73: The name tf.GraphDef is deprecated. Please use tf.compat.v1.GraphDef instead.\n", "\n", - "WARNING:tensorflow:From /home/husein/malaya/Malaya/malaya/function/__init__.py:50: The name tf.GPUOptions is deprecated. Please use tf.compat.v1.GPUOptions instead.\n", + "WARNING:tensorflow:From /home/husein/malaya/Malaya/malaya/function/__init__.py:58: The name tf.GPUOptions is deprecated. Please use tf.compat.v1.GPUOptions instead.\n", "\n", - "WARNING:tensorflow:From /home/husein/malaya/Malaya/malaya/function/__init__.py:51: The name tf.ConfigProto is deprecated. Please use tf.compat.v1.ConfigProto instead.\n", + "WARNING:tensorflow:From /home/husein/malaya/Malaya/malaya/function/__init__.py:61: The name tf.ConfigProto is deprecated. Please use tf.compat.v1.ConfigProto instead.\n", "\n", - "WARNING:tensorflow:From /home/husein/malaya/Malaya/malaya/function/__init__.py:53: The name tf.InteractiveSession is deprecated. Please use tf.compat.v1.InteractiveSession instead.\n", + "WARNING:tensorflow:From /home/husein/malaya/Malaya/malaya/function/__init__.py:63: The name tf.InteractiveSession is deprecated. Please use tf.compat.v1.InteractiveSession instead.\n", "\n" ] } ], "source": [ - "model = malaya.emotion.transformer(model = 'bert', gpu = '0')" + "model = malaya.emotion.transformer(model = 'bert', gpu_limit = 0.5)" ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 6, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "CPU times: user 1.94 s, sys: 541 ms, total: 2.48 s\n", - "Wall time: 2.52 s\n" + "CPU times: user 1.8 s, sys: 504 ms, total: 2.3 s\n", + "Wall time: 2.3 s\n" ] }, { "data": { "text/plain": [ - "[{'anger': 0.9998965,\n", - " 'fear': 1.7692768e-05,\n", - " 'happy': 1.8747674e-05,\n", - " 'love': 1.656881e-05,\n", - " 'sadness': 3.130815e-05,\n", - " 'surprise': 1.9183277e-05},\n", - " {'anger': 7.4469484e-05,\n", - " 'fear': 0.99977416,\n", - " 'happy': 6.824215e-05,\n", - " 'love': 2.773282e-05,\n", - " 'sadness': 1.9767067e-05,\n", - " 'surprise': 3.5663204e-05},\n", - " {'anger': 0.99963737,\n", - " 'fear': 3.931449e-05,\n", - " 'happy': 0.0001562279,\n", - " 'love': 3.3580774e-05,\n", - " 'sadness': 0.00011328616,\n", - " 'surprise': 2.0134145e-05},\n", - " {'anger': 3.1319763e-05,\n", - " 'fear': 1.7286226e-05,\n", - " 'happy': 2.9899325e-05,\n", - " 'love': 0.99987257,\n", - " 'sadness': 2.7867774e-05,\n", - " 'surprise': 2.096328e-05},\n", - " {'anger': 8.965934e-05,\n", - " 'fear': 1.8196944e-05,\n", - " 'happy': 2.9275663e-05,\n", - " 'love': 1.7211949e-05,\n", - " 'sadness': 0.9998247,\n", - " 'surprise': 2.0944033e-05},\n", - " {'anger': 4.132152e-05,\n", - " 'fear': 6.202527e-05,\n", - " 'happy': 3.1012056e-05,\n", - " 'love': 5.3896296e-05,\n", - " 'sadness': 6.202101e-05,\n", - " 'surprise': 0.9997497}]" + "[{'anger': 0.99989223,\n", + " 'fear': 1.5843118e-05,\n", + " 'happy': 1.660186e-05,\n", + " 'love': 1.9634477e-05,\n", + " 'sadness': 3.827092e-05,\n", + " 'surprise': 1.7427232e-05},\n", + " {'anger': 4.894743e-05,\n", + " 'fear': 0.999795,\n", + " 'happy': 6.764499e-05,\n", + " 'love': 3.6289443e-05,\n", + " 'sadness': 1.9702624e-05,\n", + " 'surprise': 3.2430926e-05},\n", + " {'anger': 0.9997905,\n", + " 'fear': 2.5795038e-05,\n", + " 'happy': 6.7572015e-05,\n", + " 'love': 2.6636817e-05,\n", + " 'sadness': 6.734582e-05,\n", + " 'surprise': 2.2285754e-05},\n", + " {'anger': 2.4449551e-05,\n", + " 'fear': 2.6033362e-05,\n", + " 'happy': 3.1518703e-05,\n", + " 'love': 0.9998758,\n", + " 'sadness': 1.895303e-05,\n", + " 'surprise': 2.326243e-05},\n", + " {'anger': 8.095824e-05,\n", + " 'fear': 2.3824483e-05,\n", + " 'happy': 2.1045413e-05,\n", + " 'love': 1.6150812e-05,\n", + " 'sadness': 0.99983835,\n", + " 'surprise': 1.9708685e-05},\n", + " {'anger': 4.470948e-05,\n", + " 'fear': 0.00010641558,\n", + " 'happy': 2.9055469e-05,\n", + " 'love': 4.5270677e-05,\n", + " 'sadness': 5.7159534e-05,\n", + " 'surprise': 0.9997173}]" ] }, - "execution_count": 5, + "execution_count": 6, "metadata": {}, "output_type": "execute_result" } @@ -218,14 +212,14 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 7, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Tue Jul 7 21:32:57 2020 \n", + "Fri Jul 10 12:39:56 2020 \n", "+-----------------------------------------------------------------------------+\n", "| NVIDIA-SMI 410.129 Driver Version: 410.129 CUDA Version: 10.0 |\n", "|-------------------------------+----------------------+----------------------+\n", @@ -233,26 +227,26 @@ "| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n", "|===============================+======================+======================|\n", "| 0 Tesla V100-DGXS... On | 00000000:07:00.0 On | 0 |\n", - "| N/A 56C P0 58W / 300W | 1099MiB / 32475MiB | 0% Default |\n", + "| N/A 44C P0 54W / 300W | 1099MiB / 32475MiB | 0% Default |\n", "+-------------------------------+----------------------+----------------------+\n", "| 1 Tesla V100-DGXS... On | 00000000:08:00.0 Off | 0 |\n", - "| N/A 64C P0 219W / 300W | 31452MiB / 32478MiB | 99% Default |\n", + "| N/A 45C P0 52W / 300W | 418MiB / 32478MiB | 0% Default |\n", "+-------------------------------+----------------------+----------------------+\n", "| 2 Tesla V100-DGXS... On | 00000000:0E:00.0 Off | 0 |\n", - "| N/A 62C P0 248W / 300W | 31452MiB / 32478MiB | 99% Default |\n", + "| N/A 44C P0 51W / 300W | 418MiB / 32478MiB | 0% Default |\n", "+-------------------------------+----------------------+----------------------+\n", "| 3 Tesla V100-DGXS... On | 00000000:0F:00.0 Off | 0 |\n", - "| N/A 62C P0 236W / 300W | 31452MiB / 32478MiB | 76% Default |\n", + "| N/A 45C P0 54W / 300W | 418MiB / 32478MiB | 0% Default |\n", "+-------------------------------+----------------------+----------------------+\n", " \n", "+-----------------------------------------------------------------------------+\n", "| Processes: GPU Memory |\n", "| GPU PID Type Process name Usage |\n", "|=============================================================================|\n", - "| 0 2536 C /usr/bin/python3 1087MiB |\n", - "| 1 11646 C python3 31431MiB |\n", - "| 2 11646 C python3 31431MiB |\n", - "| 3 11646 C python3 31431MiB |\n", + "| 0 35310 C /usr/bin/python3 1087MiB |\n", + "| 1 35310 C /usr/bin/python3 407MiB |\n", + "| 2 35310 C /usr/bin/python3 407MiB |\n", + "| 3 35310 C /usr/bin/python3 407MiB |\n", "+-----------------------------------------------------------------------------+\n" ] } @@ -263,7 +257,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 8, "metadata": {}, "outputs": [ { @@ -272,7 +266,7 @@ "True" ] }, - "execution_count": 7, + "execution_count": 8, "metadata": {}, "output_type": "execute_result" } diff --git a/malaya/graph/pagerank.py b/malaya/graph/pagerank.py index d6e18316..01c07680 100644 --- a/malaya/graph/pagerank.py +++ b/malaya/graph/pagerank.py @@ -6,6 +6,7 @@ def pagerank(array, retry = 5): cpu = False + fail = True if gpu_available(): try: import cugraph @@ -27,8 +28,14 @@ def pagerank(array, retry = 5): for _ in range(retry): try: scores = nx.pagerank(nx_graph, max_iter = 10000) + fail = False break - except: - pass + except Exception as e: + logging.warning(e) + + if fail: + raise Exception( + 'pagerank not able to converge, rerun may able to solve it.' + ) return scores