-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathlab-paper.bib
266 lines (243 loc) · 10.5 KB
/
lab-paper.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
@inproceedings{Ding_SSII2024,
author = {丁 寧 and 武田 一哉 and Jin Wenhui and Bei Yingjiu and 藤井 慶輔},
booktitle = {SSII2024},
title = {多視点ドローン映像からの姿勢情報を用いたバドミントンダブルスの最適ポジショニングの推定},
year = {2024},
yomi = {Ning Ding and K Takeda and Jin Wenhui and Bei Yingjiu and Y Fujii}
}
@inproceedings{Fukuzawa_MMM2025_Zero_shot,
author = {Takumi Fukuzawa and Kensho Hara and Hirokatsu Kataoka and Toru Tamaki},
booktitle = {MMM2025},
title = {Can masking background and object reduce static bias for zero-shot action recognition?},
year = {2025}
}
@inproceedings{Fukuzawa_SSII2024_Zero_shot,
author = {福沢 匠 and 原 健翔 and 片岡 裕雄 and 玉木 徹},
booktitle = {SSII2024},
title = {静的バイアスの強弱に関わらないZero-shot動作認識手法の検討},
year = {2024},
yomi = {Takumi Fukuzawa and Kensho Hara and Hirokatsu Kataoka and Toru Tamaki}
}
@inproceedings{Hashiguchi_2022_ACCVW_MSCA,
author = {Hashiguchi, Ryota and Tamaki, Toru},
booktitle = {Proceedings of the Asian Conference on Computer Vision (ACCV) Workshops},
month = {December},
pages = {276-288},
title = {Temporal Cross-attention for Action Recognition},
url = {https://openaccess.thecvf.com/content/ACCV2022W/TCV/html/Hashiguchi_Temporal_Cross-attention_for_Action_Recognition_ACCVW_2022_paper.html},
year = {2022}
}
@article{Hashiguchi_arxiv2022_MSCA,
author = {Ryota Hashiguchi and
Toru Tamaki},
bibsource = {dblp computer science bibliography, https://dblp.org},
biburl = {https://dblp.org/rec/journals/corr/abs-2204-00452.bib},
doi = {10.48550/arXiv.2204.00452},
eprint = {2204.00452},
eprinttype = {arXiv},
journal = {CoRR},
timestamp = {Wed, 06 Apr 2022 14:29:31 +0200},
title = {Vision Transformer with Cross-attention by Temporal Shift for Efficient
Action Recognition},
url = {https://doi.org/10.48550/arXiv.2204.00452},
volume = {abs/2204.00452},
year = {2022}
}
@inproceedings{Hashiguchi_MIRU2024_TAL,
author = {橋口 凌大 and 玉木 徹},
booktitle = {MIRU2024},
title = {動作区間検出データセットの動作区間長に応じたノイズ除去},
year = {2024},
yomi = {Ryota Hasigichi and Toru Tamaki}
}
@misc{Hori_arXiv2024_STAD_query_matching,
archiveprefix = {arXiv},
author = {Shimon Hori and Kazuki Omi and Toru Tamaki},
eprint = {2409.18408},
primaryclass = {cs.CV},
title = {Query matching for spatio-temporal action detection with query-based object detector},
url = {https://arxiv.org/abs/2409.18408},
year = {2024}
}
@inproceedings{Hori_SSII2024_STAD_query_matching,
author = {堀 史門 and 大見 一樹 and 玉木 徹},
booktitle = {SSII2024},
title = {フレーム間のクエリマッチングを用いた物体検出モデルの時空間動作検出への拡張},
year = {2024},
yomi = {Shimon Hori and Kazuki Omi and Toru Tamaki}
}
@inproceedings{Kamiya_IWFCV2024_Multi_Model,
author = {Kodai Kamiya and Toru Tamaki},
booktitle = {The 30th International Workshop on Frontiers of Computer Vision (IW-FCV2024)},
organization = {The Institute of Electrical Engineers of Japan},
title = {Multi-model learning by sequential reading of untrimmed videos for action recognition},
url = {https://www.scitepress.org/Link.aspx?doi=10.5220/0012310400003660},
url = {https://arxiv.org/abs/2401.14675},
year = {2024}
}
@inproceedings{Kato_GCCE2024_online_video,
author = {Kato, Itsuki and Kamiya, Kodai and Tamaki, Toru},
booktitle = {2024 IEEE 13th Global Conference on Consumer Electronics (GCCE)},
doi = {10.1109/GCCE62371.2024.10760638},
keywords = {Image recognition;Correlation;Contrastive learning;Consumer electronics;untrimmed video;pre-training;action recognition},
number = {},
pages = {462-463},
title = {Online pre-training with long-form videos},
volume = {},
year = {2024}
}
@inproceedings{Kato_SSII2024_online_video,
author = {加藤 樹 and 玉木 徹},
booktitle = {SSII2024},
title = {長時間動画のオンライン事前学習方法の検討},
year = {2024},
yomi = {Itsuki Kato and Toru Tamaki}
}
@inproceedings{Kimata_MIRU2024_shared_LoRA,
author = {木全 潤 and 玉木 徹},
booktitle = {MIRU2024},
title = {動作認識における複数データセット学習のための固有LoRAと共有LoRA},
year = {2024},
yomi = {Jun Kimata and Toru Tamaki}
}
@inproceedings{Kimata_MMAsia2022_ObjectMix,
address = {New York, NY, USA},
articleno = {26},
author = {Kimata, Jun and Nitta, Tomoya and Tamaki, Toru},
booktitle = {Proceedings of the 4th ACM International Conference on Multimedia in Asia},
doi = {10.1145/3551626.3564941},
isbn = {9781450394789},
keywords = {action recognition, instance segmentation, data augmentation},
location = {Tokyo, Japan},
numpages = {7},
publisher = {Association for Computing Machinery},
series = {MMAsia '22},
title = {ObjectMix: Data Augmentation by Copy-Pasting Objects in Videos for Action Recognition},
url = {https://doi.org/10.1145/3551626.3564941},
year = {2022}
}
@misc{Mizuno_arXiv2024_VideoSegmentation,
archiveprefix = {arXiv},
author = {Tsubasa Mizuno and Toru Tamaki},
eprint = {2410.07635},
primaryclass = {cs.CV},
title = {Shift and matching queries for video semantic segmentation},
url = {https://arxiv.org/abs/2410.07635},
year = {2024}
}
@inproceedings{Mizuno_SSII2024_VideoSegmentation,
author = {水野 翼 and 玉木 徹},
booktitle = {SSII2024},
title = {画像セマンティックセグメンテーションの動画像への効率的な拡張},
year = {2024},
yomi = {Tsubasa Mizuno and Toru Tamaki}
}
@article{Nitta_IEEE_Access_2024_captioning,
author = {Nitta, Tomoya and Fukuzawa, Takumi and Tamaki, Toru},
doi = {10.1109/ACCESS.2024.3506751},
journal = {IEEE Access},
number = {},
pages = {1-1},
title = {Fine-grained length controllable video captioning with ordinal embeddings},
volume = {},
year = {2024}
}
@article{Nitta_IEICEED_2023_Object-ABN,
author = {Tomoya Nitta and
Tsubasa Hirakawa and
Hironobu Fujiyoshi and
Toru Tamaki},
doi = {10.1587/transinf.2022EDP7138},
journal = {IEICE Transactions on Information and Systems},
number = {3},
pages = {391-400},
title = {Object-ABN: Learning to Generate Sharp Attention Maps for Action Recognition},
url = {https://doi.org/10.1587/transinf.2022EDP7138},
volume = {E106.D},
year = {2023}
}
@inproceedings{Nitta_MIRU2024_Length_Embedding,
author = {仁田 智也 and 玉木 徹},
booktitle = {MIRU2024},
title = {動画の説明文生成における長さ制御のための埋め込みとその解析},
year = {2024},
yomi = {Tomoya Nitta and Toru Tamaki}
}
@article{Omi_IEICE-ED2022_MDL,
author = {Kazuki Omi and Jun Kimata and Toru Tamaki},
doi = {10.1587/transinf.2022EDP7058},
journal = {IEICE Transactions on Information and Systems},
number = {12},
pages = {2119-2126},
title = {Model-Agnostic Multi-Domain Learning with Domain-Specific Adapters for Action Recognition},
url = {https://doi.org/10.1587/transinf.2022EDP7058},
volume = {E105-D},
year = {2022}
}
@inproceedings{Omi_IWAIT2022_ADDA,
author = {Kazuki Omi and Toru Tamaki},
booktitle = {International Workshop on Advanced Imaging Technology (IWAIT) 2022},
doi = {10.1117/12.2625953},
month = {January},
title = {On the instability of unsupervised domain adaptation with ADDA},
url = {https://doi.org/10.1117/12.2625953},
volume = {121771X},
year = {2021}
}
@inproceedings{Omi_MIRU2024_ActionTube,
author = {大見 一樹 and 玉木 徹},
booktitle = {MIRU2024},
title = {時空間アクション検出のための人物クエリのマッチングによるアクションチューブ生成},
year = {2024},
yomi = {Kazuki Omi and Toru Tamaki}
}
@inproceedings{Oshima_MIRU2024_AVA_Query_Matching,
author = {大島 慈温 and 堀 史門 and 玉木 徹},
booktitle = {MIRU2024},
title = {時空間動作検出のための人物クエリマッチングのAVAデータセットに対する性能評価},
year = {2024},
yomi = {Jion Oshima and Shimon Hori and Toru Tamaki}
}
@article{Otani_IEEEAccess2022_MPEG_JPEG,
author = {Aoi Otani and
Ryota Hashiguchi and
Kazuki Omi and
Norishige Fukushima and
Toru Tamaki},
bibsource = {dblp computer science bibliography, https://dblp.org},
biburl = {https://dblp.org/rec/journals/access/OtaniHOFT22.bib},
doi = {10.1109/ACCESS.2022.3204755},
journal = {{IEEE} Access},
pages = {94898--94907},
timestamp = {Sun, 02 Oct 2022 15:26:28 +0200},
title = {Performance Evaluation of Action Recognition Models on Low Quality
Videos},
url = {https://doi.org/10.1109/ACCESS.2022.3204755},
volume = {10},
year = {2022}
}
@inproceedings{Shimizu_MVA2023_IV_ViT,
author = {Shimizu, Shuki and Tamaki, Toru},
booktitle = {2023 18th International Conference on Machine Vision and Applications (MVA)},
doi = {10.23919/MVA57639.2023.10215661},
keywords = {Training;Image recognition;Machine vision;Transformers;Tuning;Videos},
number = {},
pages = {1-6},
title = {Joint learning of images and videos with a single Vision Transformer},
url = {https://ieeexplore.ieee.org/document/10215661},
volume = {},
year = {2023}
}
@inproceedings{Sugiura_VISAPP2024_S3Aug,
author = {Taiki Sugiura and Toru Tamaki},
booktitle = {Proceedings of the 19th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications - Volume 2: VISAPP},
doi = {10.5220/0012310400003660},
isbn = {978-989-758-679-8},
issn = {2184-4321},
organization = {INSTICC},
pages = {71-79},
publisher = {SciTePress},
title = {S3Aug: Segmentation, Sampling, and Shift for Action Recognition},
url = {https://www.scitepress.org/Link.aspx?doi=10.5220/0012310400003660},
year = {2024}
}