use numpydoc instead of napoleon
This commit is contained in:
parent
bbc50faef2
commit
49c9cb38be
|
@ -34,7 +34,8 @@ extensions = [
|
|||
'sphinx.ext.autodoc',
|
||||
'sphinx.ext.viewcode',
|
||||
"sphinx_rtd_theme",
|
||||
'sphinx.ext.napoleon',
|
||||
'sphinx.ext.mathjax',
|
||||
'numpydoc',
|
||||
]
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
|
@ -60,3 +61,4 @@ html_static_path = ['_static']
|
|||
source_suffix = ['.rst', '.md']
|
||||
|
||||
# -- Extension configuration -------------------------------------------------
|
||||
numpydoc_show_class_members = False
|
||||
|
|
|
@ -36,10 +36,13 @@ def scaled_dot_product_attention(q,
|
|||
|
||||
q: Tensor [shape=(*, T_q, d)]
|
||||
the query tensor.
|
||||
|
||||
k: Tensor [shape=(*, T_k, d)]
|
||||
the key tensor.
|
||||
|
||||
v: Tensor [shape=(*, T_k, d_v)]
|
||||
the value tensor.
|
||||
|
||||
mask: Tensor, [shape=(*, T_q, T_k) or broadcastable shape], optional
|
||||
the mask tensor, zeros correspond to paddings. Defaults to None.
|
||||
|
||||
|
@ -47,6 +50,7 @@ def scaled_dot_product_attention(q,
|
|||
----------
|
||||
out: Tensor [shape(*, T_q, d_v)]
|
||||
the context vector.
|
||||
|
||||
attn_weights [Tensor shape(*, T_q, T_k)]
|
||||
the attention weights.
|
||||
"""
|
||||
|
|
Loading…
Reference in New Issue