diff --git a/_config.yml b/_config.yml
index 12bf649a7ba..ad503ff6c73 100644
--- a/_config.yml
+++ b/_config.yml
@@ -6,13 +6,13 @@
# `jekyll serve`. If you change this file, please restart the server process.
# Site Settings
-title : "Lorem ipsum"
-description : "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus ornare aliquet ipsum, ac tempus justo dapibus sit amet. "
-repository : "RayeRen/acad-homepage.github.io"
+title : "Welcom to Yining Pan's Homepage"
+description : "Always be curious, always be exploring."
+repository : "pynsigrid/pynsigrid.github.io"
google_scholar_stats_use_cdn : true
# google analytics
-google_analytics_id : # get google_analytics_id from https://analytics.google.com/analytics/
+google_analytics_id : G-6ZMFJEP46X
# SEO Related
google_site_verification : # get google_site_verification from https://search.google.com/search-console/about
@@ -21,14 +21,14 @@ baidu_site_verification : # get baidu_site_verification from https://ziyuan.ba
# Site Author
author:
- name : "Lorem ipsum"
- avatar : "images/android-chrome-512x512.png"
- bio : "Lorem ipsum College"
- location : "Beijing, China"
+ name : "Yining Pan"
+ avatar : "images/android-chrome-192x192.png"
+ bio : "PhD student in SUTD | A*STAR, focusing on multi-modal perception and generation"
+ location : "Singapore"
employer :
pubmed :
- googlescholar : "https://scholar.google.com/citations?user=YOUR_GOOGLE_SCHOLAR_ID"
- email : "Lorem@ipsum.com"
+ googlescholar : "https://scholar.google.com/citations?user=SCHOLAR_ID&user=l_6n20kAAAAJ"
+ email : "firstname_secondname@mymail.sutd.edu.sg"
researchgate : # e.g., "https://www.researchgate.net/profile/yourprofile"
uri :
bitbucket :
@@ -37,13 +37,13 @@ author:
flickr :
facebook :
foursquare :
- github : # e.g., "github username"
+ github : "pynsigrid"
google_plus :
keybase :
- instagram :
+ instagram : "@sigridpan"
impactstory : # e.g., "https://profiles.impactstory.org/u/xxxx-xxxx-xxxx-xxxx"
lastfm :
- linkedin : # e.g., "linkedin username"
+ linkedin : yining-pan-187333287
dblp : # e.g., "https://dblp.org/pid/xx/xxxx.html"
orcid : # e.g., "https://orcid.org/xxxx"
pinterest :
diff --git a/_includes/author-profile.html b/_includes/author-profile.html
index 9f0d208e6ff..410aaa045d0 100644
--- a/_includes/author-profile.html
+++ b/_includes/author-profile.html
@@ -56,7 +56,7 @@
{{ author.name }}
XING
{% endif %}
{% if author.instagram %}
- Instagram
+ Unsplash
{% endif %}
{% if author.tumblr %}
Tumblr
diff --git a/_pages/about.md b/_pages/about.md
index 1e8935ec9ca..666f0f75f8f 100644
--- a/_pages/about.md
+++ b/_pages/about.md
@@ -16,43 +16,12 @@ redirect_from:
{% assign url = gsDataBaseUrl | append: "google-scholar-stats/gs_data_shieldsio.json" %}
+{% include_relative includes/intro.md %}
-Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus ornare aliquet ipsum, ac tempus justo dapibus sit amet. Suspendisse condimentum, libero vel tempus mattis, risus risus vulputate libero, elementum fermentum mi neque vel nisl. Maecenas facilisis maximus dignissim. Curabitur mattis vulputate dui, tincidunt varius libero luctus eu. Mauris mauris nulla, scelerisque eget massa id, tincidunt congue felis. Sed convallis tempor ipsum rhoncus viverra. Pellentesque nulla orci, accumsan volutpat fringilla vitae, maximus sit amet tortor. Aliquam ultricies odio ut volutpat scelerisque. Donec nisl nisl, porttitor vitae pharetra quis, fringilla sed mi. Fusce pretium dolor ut aliquam consequat. Cras volutpat, tellus accumsan mattis molestie, nisl lacus tempus massa, nec malesuada tortor leo vel quam. Aliquam vel ex consectetur, vehicula leo nec, efficitur eros. Donec convallis non urna quis feugiat.
+{% include_relative includes/news.md %}
-My research interest includes neural machine translation and computer vision. I have published more than 100 papers at the top international AI conferences with total google scholar citations 260000+ (You can also use google scholar badge
).
+{% include_relative includes/pub.md %}
+{% include_relative includes/honers.md %}
-# π₯ News
-- *2022.02*: ππ Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus ornare aliquet ipsum, ac tempus justo dapibus sit amet.
-- *2022.02*: ππ Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus ornare aliquet ipsum, ac tempus justo dapibus sit amet.
-
-# π Publications
-
-CVPR 2016

-
-
-[Deep Residual Learning for Image Recognition](https://openaccess.thecvf.com/content_cvpr_2016/papers/He_Deep_Residual_Learning_CVPR_2016_paper.pdf)
-
-**Kaiming He**, Xiangyu Zhang, Shaoqing Ren, Jian Sun
-
-[**Project**](https://scholar.google.com/citations?view_op=view_citation&hl=zh-CN&user=DhtAFkwAAAAJ&citation_for_view=DhtAFkwAAAAJ:ALROH1vI_8AC)
-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus ornare aliquet ipsum, ac tempus justo dapibus sit amet.
-
-
-
-- [Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus ornare aliquet ipsum, ac tempus justo dapibus sit amet](https://github.com), A, B, C, **CVPR 2020**
-
-# π Honors and Awards
-- *2021.10* Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus ornare aliquet ipsum, ac tempus justo dapibus sit amet.
-- *2021.09* Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus ornare aliquet ipsum, ac tempus justo dapibus sit amet.
-
-# π Educations
-- *2019.06 - 2022.04 (now)*, Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus ornare aliquet ipsum, ac tempus justo dapibus sit amet.
-- *2015.09 - 2019.06*, Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus ornare aliquet ipsum, ac tempus justo dapibus sit amet.
-
-# π¬ Invited Talks
-- *2021.06*, Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus ornare aliquet ipsum, ac tempus justo dapibus sit amet.
-- *2021.03*, Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus ornare aliquet ipsum, ac tempus justo dapibus sit amet. \| [\[video\]](https://github.com/)
-
-# π» Internships
-- *2019.05 - 2020.02*, [Lorem](https://github.com/), China.
\ No newline at end of file
+{% include_relative includes/others.md %}
diff --git a/_pages/includes/honers.md b/_pages/includes/honers.md
new file mode 100644
index 00000000000..55ba661c249
--- /dev/null
+++ b/_pages/includes/honers.md
@@ -0,0 +1,6 @@
+# π Honors and Awards
+- Singapore International Graduate Award (SINGA) from A*STAR.
+- ZJU Graduate of Merit, Triple-A Graduate.
+- National Undergraduate Electronics Design Contest, First Prize.
+- China College Studentsβ βInternet+β Innovation and Entrepreneurship Competition, First Prize.
+- Win the People Scholarship for three consecutive years (Top 1%).
diff --git a/_pages/includes/intro.md b/_pages/includes/intro.md
new file mode 100644
index 00000000000..0bf19c795db
--- /dev/null
+++ b/_pages/includes/intro.md
@@ -0,0 +1,8 @@
+I am now a second-year PhD student in [IMPL Lab](https://impl2023.github.io/), Singapore University of Technology and Design (SUTD), fortunately supervised by Prof [Na Zhao](https://na-z.github.io/).
+I am also supported by the Agency for Science, Technology and Research (A*STAR) [SINGA](https://www.a-star.edu.sg/Scholarships/for-graduate-studies/singapore-international-graduate-award-singa) scholarship and am grateful to be supervised by Prof. [Xulei Yang](https://www.google.com/search?q=xulei+yang&oq=xu&gs_lcrp=EgZjaHJvbWUqEAgCEEUYExgnGDsYgAQYigUyBggAEEUYOTIGCAEQRRhAMhAIAhBFGBMYJxg7GIAEGIoFMggIAxBFGCcYOzIGCAQQRRg7MgoIBRAAGLEDGIAEMgYIBhBFGD0yBggHEEUYPdIBCDMyMjVqMGo3qAIAsAIA&sourceid=chrome&ie=UTF-8#:~:text=Xulei%20Yang%20%2D%20Singapore,%E6%82%A8%E7%BB%8F%E5%B8%B8%E8%AE%BF%E9%97%AE).
+Prior to this, I obtained my Masterβs degree from Zhejiang University in 2023 and worked as a research intern at Alibaba DAMO Academy.
+
+My research interests include multi-modal scene understanding and generation. Currently, I focus on building a comprehensive understanding of complex scenes by leveraging multi-modal features (e.g., LiDAR and RGB images). I am also interested in transferring learned knowledge to address real-world challenges such as domain shift.
+
+
+
\ No newline at end of file
diff --git a/_pages/includes/news.md b/_pages/includes/news.md
new file mode 100644
index 00000000000..a715558cea1
--- /dev/null
+++ b/_pages/includes/news.md
@@ -0,0 +1,3 @@
+# π₯ News
+- *2025.05*: πππ One paper is accepted by ICML 2025!
+- *2025.05*: My new homepage is now live!
diff --git a/_pages/includes/others.md b/_pages/includes/others.md
new file mode 100644
index 00000000000..453c552cf8f
--- /dev/null
+++ b/_pages/includes/others.md
@@ -0,0 +1,13 @@
+
\ No newline at end of file
diff --git a/_pages/includes/pub.md b/_pages/includes/pub.md
new file mode 100644
index 00000000000..7af82035dc4
--- /dev/null
+++ b/_pages/includes/pub.md
@@ -0,0 +1,49 @@
+# π Publications
+
+A full publication list is available on my [google scholar](https://scholar.google.com/citations?user=SCHOLAR_ID&user=l_6n20kAAAAJ) page.
+
+
+ICML 2025

+
+
+[**ICML 2025**] [How Do Images Align and Complement LiDAR? Towards a Harmonized Multi-modal 3D Panoptic Segmentation](https://arxiv.org/abs/2505.18956) \\
+**Yining Pan**, Qiongjie Cui, Xulei Yang, Na Zhao
+[[Project page]](https://github.com/IMPL-Lab/IAL)
+
+- This paper proposes the Image-Assists-LiDAR (IAL) model, which harmonizes LiDAR and images through synchronized augmentation, token fusion, and prior query generation.
+- IAL achieves SOTA performance on 3D panoptic benchmarks, outperforming baseline methods by over 4%.
+
+
+
+
+
+
+
+CVPR 2024

+
+
+[**CVPR 2024**] [InstructVideo: Instructing Video Diffusion Models with Human Feedback](https://arxiv.org/abs/2312.12490) \\
+H. Yuan, S. Zhang, X. Wang, Y. Wei, T. Feng, **Yining Pan**, Y. Zhang, Z. Liu, S. Albanie, D. Ni \\
+[](https://github.com/damo-vilab/i2vgen-xl)
+[](https://github.com/damo-vilab/i2vgen-xl)
+[[Project page]](https://instructvideo.github.io/)
+
+- InstructVideo is the first research attempt that instructs video diffusion models with human feedback.
+- InstructVideo significantly enhances the visual quality of generated videos without compromising generalization capabilities, with merely 0.1% of the parameters being fine-tuned.
+
+
+
+
+
+ICCV 2023

+
+
+[**ICCV 2023**] [RLIPv2: Fast Scaling of Relational Language-Image Pre-training](https://arxiv.org/abs/2308.09351) \\
+H. Yuan, S. Zhang, X. Wang, S. Albanie, **Yining Pan**, T. Feng, J. Jiang, D. Ni, Y. Zhang, D. Zhao \\
+[](https://github.com/JacobYuan7/RLIPv2)
+[](https://github.com/JacobYuan7/RLIPv2)
+
+- RLIPv2 elevates [RLIP](https://arxiv.org/abs/2209.01814) by leveraging a new language-image fusion mechanism, designed for expansive data scales.
+
+
+
diff --git a/_sass/_utilities.scss b/_sass/_utilities.scss
index 97ef7fb127f..75c6396686c 100644
--- a/_sass/_utilities.scss
+++ b/_sass/_utilities.scss
@@ -220,7 +220,7 @@ body:hover .visually-hidden button {
}
.fa-instagram {
- color: $instagram-color;
+ color: $github-color;
}
.fa-lastfm,
diff --git a/history/about_.md b/history/about_.md
new file mode 100644
index 00000000000..a1b368fcd98
--- /dev/null
+++ b/history/about_.md
@@ -0,0 +1,108 @@
+---
+permalink: /
+title: ""
+excerpt: ""
+author_profile: true
+redirect_from:
+ - /about/
+ - /about.html
+---
+
+{% if site.google_scholar_stats_use_cdn %}
+{% assign gsDataBaseUrl = "https://cdn.jsdelivr.net/gh/" | append: site.repository | append: "@" %}
+{% else %}
+{% assign gsDataBaseUrl = "https://raw.githubusercontent.com/" | append: site.repository | append: "/" %}
+{% endif %}
+{% assign url = gsDataBaseUrl | append: "google-scholar-stats/gs_data_shieldsio.json" %}
+
+
+
+I am now a second-year PhD student in [IMPL Lab](https://impl2023.github.io/), Singapore University of Technology and Design (SUTD), fortunately supervised by Prof. [Na Zhao](https://na-z.github.io/).
+I am also supported by the Agency for Science, Technology and Research (A*STAR) [SINGA](https://www.a-star.edu.sg/Scholarships/for-graduate-studies/singapore-international-graduate-award-singa) scholarship and am grateful to be supervised by Prof. [Xulei Yang](https://www.google.com/search?q=xulei+yang&oq=xu&gs_lcrp=EgZjaHJvbWUqEAgCEEUYExgnGDsYgAQYigUyBggAEEUYOTIGCAEQRRhAMhAIAhBFGBMYJxg7GIAEGIoFMggIAxBFGCcYOzIGCAQQRRg7MgoIBRAAGLEDGIAEMgYIBhBFGD0yBggHEEUYPdIBCDMyMjVqMGo3qAIAsAIA&sourceid=chrome&ie=UTF-8#:~:text=Xulei%20Yang%20%2D%20Singapore,%E6%82%A8%E7%BB%8F%E5%B8%B8%E8%AE%BF%E9%97%AE).
+Prior to this, I obtained my Masterβs degree from Zhejiang University in 2023 and worked as a research intern at Alibaba DAMO Academy.
+
+My research interests include multi-modal scene understanding and generation. Currently, I focus on building a comprehensive understanding of complex scenes by leveraging multi-modal features (e.g., LiDAR and RGB images). I am also interested in transferring learned knowledge to address real-world challenges such as domain shift.
+
+
+
+
+
+# π₯ News
+- *2025.05*: πππ One paper is accepted by ICML 2025!
+- *2025.05*: My new homepage is now live!
+
+# π Publications
+
+A full publication list is available on my [google scholar](https://scholar.google.com/citations?user=SCHOLAR_ID&user=l_6n20kAAAAJ) page.
+
+
+ICML 2025

+
+
+[**ICML 2025**] [How Do Images Align and Complement LiDAR? Towards a Harmonized Multi-modal 3D Panoptic Segmentation](https://arxiv.org/abs/2505.18956) \\
+**Yining Pan**, Qiongjie Cui, Xulei Yang, Na Zhao
+[[Project page]](https://github.com/IMPL-Lab/IAL)
+
+- This paper proposes the Image-Assists-LiDAR (IAL) model, which harmonizes LiDAR and images through synchronized augmentation, token fusion, and prior query generation.
+- IAL achieves SOTA performance on 3D panoptic benchmarks, outperforming baseline methods by over 4%.
+
+
+
+
+
+
+
+CVPR 2024

+
+
+[**CVPR 2024**] [InstructVideo: Instructing Video Diffusion Models with Human Feedback](https://arxiv.org/abs/2312.12490) \\
+H. Yuan, S. Zhang, X. Wang, Y. Wei, T. Feng, **Yining Pan**, Y. Zhang, Z. Liu, S. Albanie, D. Ni \\
+[](https://github.com/damo-vilab/i2vgen-xl)
+[](https://github.com/damo-vilab/i2vgen-xl)
+[[Project page]](https://instructvideo.github.io/)
+
+- InstructVideo is the first research attempt that instructs video diffusion models with human feedback.
+- InstructVideo significantly enhances the visual quality of generated videos without compromising generalization capabilities, with merely 0.1% of the parameters being fine-tuned.
+
+
+
+
+
+
+
+ICCV 2023

+
+
+[**ICCV 2023**] [RLIPv2: Fast Scaling of Relational Language-Image Pre-training](https://arxiv.org/abs/2308.09351) \\
+H. Yuan, S. Zhang, X. Wang, S. Albanie, **Yining Pan**, T. Feng, J. Jiang, D. Ni, Y. Zhang, D. Zhao \\
+[](https://github.com/JacobYuan7/RLIPv2)
+[](https://github.com/JacobYuan7/RLIPv2)
+
+- RLIPv2 elevates [RLIP](https://arxiv.org/abs/2209.01814) by leveraging a new language-image fusion mechanism, designed for expansive data scales.
+
+
+
+
+
+
+
+# π Honors and Awards
+- Singapore International Graduate Award (SINGA) from A*STAR.
+- ZJU Graduate of Merit, Triple-A Graduate.
+- National Undergraduate Electronics Design Contest, First Prize.
+- China College Studentsβ βInternet+β Innovation and Entrepreneurship Competition, First Prize.
+- Win the People Scholarship for three consecutive years (Top 1%).
+
+
\ No newline at end of file
diff --git a/images/android-chrome-192x192.png b/images/android-chrome-192x192.png
old mode 100755
new mode 100644
index 5e8d9034999..dc47c151c56
Binary files a/images/android-chrome-192x192.png and b/images/android-chrome-192x192.png differ
diff --git a/images/apple-touch-icon.png b/images/apple-touch-icon.png
old mode 100755
new mode 100644
index 9de4c24bdd3..7d3b9142f12
Binary files a/images/apple-touch-icon.png and b/images/apple-touch-icon.png differ
diff --git a/images/favicon-16x16.png b/images/favicon-16x16.png
old mode 100755
new mode 100644
index 59307a0cede..96cef293f64
Binary files a/images/favicon-16x16.png and b/images/favicon-16x16.png differ
diff --git a/images/favicon-32x32.png b/images/favicon-32x32.png
old mode 100755
new mode 100644
index 1af5b2e6f6b..58835a053e3
Binary files a/images/favicon-32x32.png and b/images/favicon-32x32.png differ
diff --git a/images/favicon.ico b/images/favicon.ico
old mode 100755
new mode 100644
index 83aa24b99f2..8168ba90fae
Binary files a/images/favicon.ico and b/images/favicon.ico differ
diff --git a/images/papers/1-RLIPv2-ICCV23.png b/images/papers/1-RLIPv2-ICCV23.png
new file mode 100644
index 00000000000..1d851bab4be
Binary files /dev/null and b/images/papers/1-RLIPv2-ICCV23.png differ
diff --git a/images/papers/2-InstructVideo-CVPR24.png b/images/papers/2-InstructVideo-CVPR24.png
new file mode 100644
index 00000000000..90f7f0fed7c
Binary files /dev/null and b/images/papers/2-InstructVideo-CVPR24.png differ
diff --git a/images/papers/3-IAL-ICML25.png b/images/papers/3-IAL-ICML25.png
new file mode 100644
index 00000000000..692044b158a
Binary files /dev/null and b/images/papers/3-IAL-ICML25.png differ
diff --git a/images/site.webmanifest b/images/site.webmanifest
old mode 100755
new mode 100644
index 1e4db685f49..a9434a3e284
--- a/images/site.webmanifest
+++ b/images/site.webmanifest
@@ -6,12 +6,7 @@
"src": "/android-chrome-192x192.png",
"sizes": "192x192",
"type": "image/png"
- },
- {
- "src": "/android-chrome-512x512.png",
- "sizes": "512x512",
- "type": "image/png"
- }
+ }
],
"theme_color": "#FFFFFF",
"background_color": "#FFFFFF",