Skip to content

Commit

Permalink
update docs
Browse files Browse the repository at this point in the history
update docs
  • Loading branch information
shenweichen authored Apr 4, 2021
1 parent ac9ea22 commit ea6bc38
Show file tree
Hide file tree
Showing 32 changed files with 76 additions and 36 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ jobs:
strategy:
matrix:
python-version: [3.6,3.7]
torch-version: [1.1.0,1.2.0,1.3.0,1.4.0,1.5.0,1.6.0,1.7.0]
torch-version: [1.1.0,1.2.0,1.3.0,1.4.0,1.5.0,1.6.0,1.7.0,1.8.1]

# exclude:
# - python-version: 3.5
Expand Down
10 changes: 5 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,11 @@ Let's [**Get Started!**](https://deepctr-torch.readthedocs.io/en/latest/Quick-St
​ <a href="https://github.com/shenweichen">Shen Weichen</a> ​
<p>Core Dev<br> Zhejiang Unversity <br> <br> </p>​
</td>
<td>
​ <a href="https://github.com/zanshuxun"><img width="70" height="70" src="https://github.com/zanshuxun.png?s=40" alt="pic"></a><br>
​ <a href="https://github.com/zanshuxun">Zan Shuxun</a>
<p>Core Dev<br> Beijing University <br> of Posts and <br> Telecommunications</p>​
</td>
<td>
<a href="https://github.com/weberrr"><img width="70" height="70" src="https://github.com/weberrr.png?s=40" alt="pic"></a><br>
<a href="https://github.com/weberrr">Wang Ze</a> ​
Expand All @@ -94,11 +99,6 @@ Let's [**Get Started!**](https://deepctr-torch.readthedocs.io/en/latest/Quick-St
<a href="https://github.com/wutongzhang">Zhang Wutong</a>
<p>Core Dev<br> Beijing University <br> of Posts and <br> Telecommunications</p>​
</td>
<td>
​ <a href="https://github.com/zanshuxun"><img width="70" height="70" src="https://github.com/zanshuxun.png?s=40" alt="pic"></a><br>
​ <a href="https://github.com/zanshuxun">Zan Shuxun</a>
<p>Core Dev<br> Beijing University <br> of Posts and <br> Telecommunications</p>​
</td>
<td>
​ <a href="https://github.com/ZhangYuef"><img width="70" height="70" src="https://github.com/ZhangYuef.png?s=40" alt="pic"></a><br>
​ <a href="https://github.com/ZhangYuef">Zhang Yuefeng</a>
Expand Down
2 changes: 1 addition & 1 deletion deepctr_torch/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,5 @@
from . import models
from .utils import check_version

__version__ = '0.2.5'
__version__ = '0.2.6'
check_version(__version__)
2 changes: 1 addition & 1 deletion deepctr_torch/layers/activation.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ class Dice(nn.Module):
Output shape:
- Same shape as input.
References
- [Zhou G, Zhu X, Song C, et al. Deep interest network for click-through rate prediction[C]//Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining. ACM, 2018: 1059-1068.](https://arxiv.org/pdf/1706.06978.pdf)
- https://github.com/zhougr1993/DeepInterestNetwork, https://github.com/fanoping/DIN-pytorch
Expand Down
2 changes: 1 addition & 1 deletion deepctr_torch/layers/interaction.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ def __init__(self, filed_size, embedding_size, bilinear_type="interaction", seed
self.bilinear = nn.Linear(
embedding_size, embedding_size, bias=False)
elif self.bilinear_type == "each":
for i in range(filed_size):
for _ in range(filed_size):
self.bilinear.append(
nn.Linear(embedding_size, embedding_size, bias=False))
elif self.bilinear_type == "interaction":
Expand Down
2 changes: 1 addition & 1 deletion deepctr_torch/models/autoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ class AutoInt(BaseModel):
:param device: str, ``"cpu"`` or ``"cuda:0"``
:param gpus: list of int or torch.device for multiple gpus. If None, run on `device`. `gpus[0]` should be the same gpu with `device`.
:return: A PyTorch model instance.
"""

def __init__(self, linear_feature_columns, dnn_feature_columns, att_layer_num=3, att_embedding_size=8, att_head_num=2,
Expand Down
4 changes: 2 additions & 2 deletions deepctr_torch/models/basemodel.py
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,7 @@ def fit(self, x=None, y=None, batch_size=None, epochs=1, verbose=1, initial_epoc
train_result = {}
try:
with tqdm(enumerate(train_loader), disable=verbose != 1) as t:
for index, (x_train, y_train) in t:
for _, (x_train, y_train) in t:
x = x_train.to(self.device).float()
y = y_train.to(self.device).float()

Expand Down Expand Up @@ -333,7 +333,7 @@ def predict(self, x, batch_size=256):

pred_ans = []
with torch.no_grad():
for index, x_test in enumerate(test_loader):
for _, x_test in enumerate(test_loader):
x = x_test[0].to(self.device).float()

y_pred = model(x).cpu().data.numpy() # .squeeze()
Expand Down
2 changes: 1 addition & 1 deletion deepctr_torch/models/dcn.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ class DCN(BaseModel):
:param device: str, ``"cpu"`` or ``"cuda:0"``
:param gpus: list of int or torch.device for multiple gpus. If None, run on `device`. `gpus[0]` should be the same gpu with `device`.
:return: A PyTorch model instance.
"""

def __init__(self, linear_feature_columns, dnn_feature_columns, cross_num=2, cross_parameterization='vector',
Expand Down
2 changes: 1 addition & 1 deletion deepctr_torch/models/dcnmix.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ class DCNMix(BaseModel):
:param device: str, ``"cpu"`` or ``"cuda:0"``
:param gpus: list of int or torch.device for multiple gpus. If None, run on `device`. `gpus[0]` should be the same gpu with `device`.
:return: A PyTorch model instance.
"""

def __init__(self, linear_feature_columns,
Expand Down
2 changes: 1 addition & 1 deletion deepctr_torch/models/deepfm.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ class DeepFM(BaseModel):
:param device: str, ``"cpu"`` or ``"cuda:0"``
:param gpus: list of int or torch.device for multiple gpus. If None, run on `device`. `gpus[0]` should be the same gpu with `device`.
:return: A PyTorch model instance.
"""

def __init__(self,
Expand Down
8 changes: 4 additions & 4 deletions deepctr_torch/models/difm.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
Author:
zanshuxun, [email protected]
Reference:
Lu W, Yu Y, Chang Y, et al. A Dual Input-aware Factorization Machine for CTR Prediction[C]//IJCAI. 2020: 3139-3145.
[1] Lu W, Yu Y, Chang Y, et al. A Dual Input-aware Factorization Machine for CTR Prediction[C]//IJCAI. 2020: 3139-3145.(https://www.ijcai.org/Proceedings/2020/0434.pdf)
"""
import torch
import torch.nn as nn
Expand All @@ -29,9 +29,9 @@ class DIFM(BaseModel):
:param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN
:param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss
:param device: str, ``"cpu"`` or ``"cuda:0"``
:param gpus: list of int or torch.device for multiple gpus. If None, run on `device`. `gpus[0]` should be the same gpu with `device`.
:param gpus: list of int or torch.device for multiple gpus. If None, run on ``device`` . ``gpus[0]`` should be the same gpu with ``device`` .
:return: A PyTorch model instance.
"""

def __init__(self,
Expand Down Expand Up @@ -79,7 +79,7 @@ def __init__(self,
self.to(device)

def forward(self, X):
sparse_embedding_list, dense_value_list = self.input_from_feature_columns(X, self.dnn_feature_columns,
sparse_embedding_list, _ = self.input_from_feature_columns(X, self.dnn_feature_columns,
self.embedding_dict)
if not len(sparse_embedding_list) > 0:
raise ValueError("there are no sparse features")
Expand Down
2 changes: 1 addition & 1 deletion deepctr_torch/models/fibinet.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ class FiBiNET(BaseModel):
:param device: str, ``"cpu"`` or ``"cuda:0"``
:param gpus: list of int or torch.device for multiple gpus. If None, run on `device`. `gpus[0]` should be the same gpu with `device`.
:return: A PyTorch model instance.
"""

def __init__(self, linear_feature_columns, dnn_feature_columns, bilinear_type='interaction',
Expand Down
6 changes: 3 additions & 3 deletions deepctr_torch/models/ifm.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
Author:
zanshuxun, [email protected]
Reference:
Yu Y, Wang Z, Yuan B. An Input-aware Factorization Machine for Sparse Prediction[C]//IJCAI. 2019: 1466-1472.
[1] Yu Y, Wang Z, Yuan B. An Input-aware Factorization Machine for Sparse Prediction[C]//IJCAI. 2019: 1466-1472.(https://www.ijcai.org/Proceedings/2019/0203.pdf)
"""
import torch
import torch.nn as nn
Expand All @@ -29,9 +29,9 @@ class IFM(BaseModel):
:param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN
:param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss
:param device: str, ``"cpu"`` or ``"cuda:0"``
:param gpus: list of int or torch.device for multiple gpus. If None, run on `device`. `gpus[0]` should be the same gpu with `device`.
:param gpus: list of int or torch.device for multiple gpus. If None, run on ``device`` . ``gpus[0]`` should be the same gpu with ``device`` .
:return: A PyTorch model instance.
"""

def __init__(self,
Expand Down
2 changes: 1 addition & 1 deletion deepctr_torch/models/mlr.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ class MLR(BaseModel):
:param device: str, ``"cpu"`` or ``"cuda:0"``
:param gpus: list of int or torch.device for multiple gpus. If None, run on `device`. `gpus[0]` should be the same gpu with `device`.
:return: A PyTorch model instance.
"""

def __init__(self, region_feature_columns, base_feature_columns=None, bias_feature_columns=None,
Expand Down
2 changes: 1 addition & 1 deletion deepctr_torch/models/nfm.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ class NFM(BaseModel):
:param device: str, ``"cpu"`` or ``"cuda:0"``
:param gpus: list of int or torch.device for multiple gpus. If None, run on `device`. `gpus[0]` should be the same gpu with `device`.
:return: A PyTorch model instance.
"""

def __init__(self,
Expand Down
2 changes: 1 addition & 1 deletion deepctr_torch/models/onn.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ class ONN(BaseModel):
:param device: str, ``"cpu"`` or ``"cuda:0"``
:param gpus: list of int or torch.device for multiple gpus. If None, run on `device`. `gpus[0]` should be the same gpu with `device`.
:return: A PyTorch model instance.
"""

def __init__(self, linear_feature_columns, dnn_feature_columns,
Expand Down
2 changes: 1 addition & 1 deletion deepctr_torch/models/pnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ class PNN(BaseModel):
:param device: str, ``"cpu"`` or ``"cuda:0"``
:param gpus: list of int or torch.device for multiple gpus. If None, run on `device`. `gpus[0]` should be the same gpu with `device`.
:return: A PyTorch model instance.
"""

def __init__(self, dnn_feature_columns, dnn_hidden_units=(128, 128), l2_reg_embedding=1e-5, l2_reg_dnn=0,
Expand Down
2 changes: 1 addition & 1 deletion deepctr_torch/models/wdl.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ class WDL(BaseModel):
:param device: str, ``"cpu"`` or ``"cuda:0"``
:param gpus: list of int or torch.device for multiple gpus. If None, run on `device`. `gpus[0]` should be the same gpu with `device`.
:return: A PyTorch model instance.
"""

def __init__(self,
Expand Down
2 changes: 1 addition & 1 deletion deepctr_torch/models/xdeepfm.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ class xDeepFM(BaseModel):
:param device: str, ``"cpu"`` or ``"cuda:0"``
:param gpus: list of int or torch.device for multiple gpus. If None, run on `device`. `gpus[0]` should be the same gpu with `device`.
:return: A PyTorch model instance.
"""

def __init__(self, linear_feature_columns, dnn_feature_columns, dnn_hidden_units=(256, 256),
Expand Down
Binary file added docs/pics/DIFM.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added docs/pics/IFM.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
2 changes: 1 addition & 1 deletion docs/source/FAQ.md
Original file line number Diff line number Diff line change
Expand Up @@ -75,5 +75,5 @@ model = DeepFM(...,device=device)
## 5. How to run the demo with multiple GPUs ?

```python
model = DeepFM(..., device=device, gpus=[0, 1])
model = DeepFM(..., device=device, gpus=[0, 1])
```
21 changes: 21 additions & 0 deletions docs/source/Features.md
Original file line number Diff line number Diff line change
Expand Up @@ -241,6 +241,27 @@ Feature Importance and Bilinear feature Interaction NETwork is proposed to dynam

[Huang T, Zhang Z, Zhang J. FiBiNET: Combining Feature Importance and Bilinear feature Interaction for Click-Through Rate Prediction[J]. arXiv preprint arXiv:1905.09433, 2019.](https://arxiv.org/pdf/1905.09433.pdf)

### IFM(Input-aware Factorization Machine)

Input-aware Factorization Machine (IFM) learns a unique input-aware factor for the same feature in different instances via a neural network.

[**IFM Model API**](./deepctr_torch.models.ifm.html)

![IFM](../pics/IFM.png)

[Yu Y, Wang Z, Yuan B. An Input-aware Factorization Machine for Sparse Prediction[C]//IJCAI. 2019: 1466-1472.](https://www.ijcai.org/Proceedings/2019/0203.pdf)

### DIFM(Dual Input-aware Factorization Machine)

Dual Inputaware Factorization Machines (DIFM) can adaptively reweight the original feature representations at the bit-wise and vector-wise levels simultaneously.Furthermore, DIFMs strategically integrate various components including Multi-Head Self-Attention, Residual Networks and DNNs into a unified end-to-end model.

[**DFM Model API**](./deepctr_torch.models.difm.html)

![DIFM](../pics/DIFM.png)

[Lu W, Yu Y, Chang Y, et al. A Dual Input-aware Factorization Machine for CTR Prediction[C]//IJCAI. 2020: 3139-3145.](https://www.ijcai.org/Proceedings/2020/0434.pdf)



## Layers

Expand Down
1 change: 1 addition & 0 deletions docs/source/History.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
# History
- 04/04/2021 : [v0.2.6](https://github.com/shenweichen/DeepCTR-Torch/releases/tag/v0.2.6) released.Add add [IFM](./Features.html#ifm-input-aware-factorization-machine) and [DIFM](./Features.html#difm-dual-input-aware-factorization-machine);Support multi-gpus running([example](./FAQ.html#how-to-run-the-demo-with-multiple-gpus)).
- 02/12/2021 : [v0.2.5](https://github.com/shenweichen/DeepCTR-Torch/releases/tag/v0.2.5) released.Fix bug in DCN-M.
- 12/05/2020 : [v0.2.4](https://github.com/shenweichen/DeepCTR-Torch/releases/tag/v0.2.4) released.Imporve compatibility & fix issues.Add History callback.([example](https://deepctr-torch.readthedocs.io/en/latest/FAQ.html#set-learning-rate-and-use-earlystopping)).
- 10/18/2020 : [v0.2.3](https://github.com/shenweichen/DeepCTR-Torch/releases/tag/v0.2.3) released.Add [DCN-M](./Features.html#dcn-deep-cross-network)&[DCN-Mix](./Features.html#dcn-mix-improved-deep-cross-network-with-mix-of-experts-and-matrix-kernel).Add EarlyStopping and ModelCheckpoint callbacks([example](https://deepctr-torch.readthedocs.io/en/latest/FAQ.html#set-learning-rate-and-use-earlystopping)).
Expand Down
2 changes: 2 additions & 0 deletions docs/source/Models.rst
Original file line number Diff line number Diff line change
Expand Up @@ -21,3 +21,5 @@ DeepCTR-Torch Models API
ONN<deepctr_torch.models.onn>
FGCNN<deepctr_torch.models.fgcnn>
FiBiNET<deepctr_torch.models.fibinet>
IFM<deepctr_torch.models.ifm>
DIFM<deepctr_torch.models.difm>
2 changes: 1 addition & 1 deletion docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.2.5'
release = '0.2.6'


# -- General configuration ---------------------------------------------------
Expand Down
7 changes: 7 additions & 0 deletions docs/source/deepctr_torch.models.difm.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
deepctr\_torch.models.difm module
================================

.. automodule:: deepctr_torch.models.difm
:members:
:no-undoc-members:
:no-show-inheritance:
7 changes: 7 additions & 0 deletions docs/source/deepctr_torch.models.ifm.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
deepctr\_torch.models.ifm module
================================

.. automodule:: deepctr_torch.models.ifm
:members:
:no-undoc-members:
:no-show-inheritance:
3 changes: 3 additions & 0 deletions docs/source/deepctr_torch.models.rst
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ Submodules
deepctr_torch.models.autoint
deepctr_torch.models.basemodel
deepctr_torch.models.dcn
deepctr_torch.models.dcnmix
deepctr_torch.models.deepfm
deepctr_torch.models.fibinet
deepctr_torch.models.mlr
Expand All @@ -20,6 +21,8 @@ Submodules
deepctr_torch.models.xdeepfm
deepctr_torch.models.din
deepctr_torch.models.dien
deepctr_torch.models.ifm
deepctr_torch.models.difm

Module contents
---------------
Expand Down
5 changes: 2 additions & 3 deletions docs/source/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -34,13 +34,12 @@ You can read the latest code at https://github.com/shenweichen/DeepCTR-Torch and

News
-----
04/04/2021 : Add `IFM <./Features.html#ifm-input-aware-factorization-machine>`_ and `DIFM <./Features.html#difm-dual-input-aware-factorization-machine>`_ . Support multi-gpus running(`example <./FAQ.html#how-to-run-the-demo-with-multiple-gpus>`_). `Changelog <https://github.com/shenweichen/DeepCTR-Torch/releases/tag/v0.2.6>`_

02/12/2021 : Fix bug in DCN-M. `Changelog <https://github.com/shenweichen/DeepCTR-Torch/releases/tag/v0.2.4>`_

12/05/2020 : Imporve compatibility & fix issues.Add History callback(`example <https://deepctr-torch.readthedocs.io/en/latest/FAQ.html#set-learning-rate-and-use-earlystopping>`_). `Changelog <https://github.com/shenweichen/DeepCTR-Torch/releases/tag/v0.2.3>`_

10/18/2020 : Add `DCN-M <./Features.html#dcn-deep-cross-network>`_ and `DCN-Mix <./Features.html#dcn-mix-improved-deep-cross-network-with-mix-of-experts-and-matrix-kernel>`_ . Add EarlyStopping and ModelCheckpoint callbacks(`example <https://deepctr-torch.readthedocs.io/en/latest/FAQ.html#set-learning-rate-and-use-earlystopping>`_). `Changelog <https://github.com/shenweichen/DeepCTR-Torch/releases/tag/v0.2.3>`_


DisscussionGroup
-----------------------

Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@

setuptools.setup(
name="deepctr-torch",
version="0.2.5",
version="0.2.6",
author="Weichen Shen",
author_email="[email protected]",
description="Easy-to-use,Modular and Extendible package of deep learning based CTR(Click Through Rate) prediction models with PyTorch",
Expand Down
2 changes: 1 addition & 1 deletion tests/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def layer_test(layer_cls, kwargs = {}, input_shape=None,
input_dtype=torch.float32, input_data=None, expected_output=None,
expected_output_shape=None, expected_output_dtype=None, fixed_batch_size=False):
'''check layer is valid or not
:param layer_cls:
:param input_shape:
:param input_dtype:
Expand Down

0 comments on commit ea6bc38

Please sign in to comment.