Graph plays a significant role in representing and analyzing complex relationships in real-world applications such as citation networks, social networks, and biological data. Graph intelligence is rapidly becoming a crucial aspect of understanding and exploiting the intricate interconnections within graph data. Recently, large language models (LLMs) and prompt learning techniques have pushed graph intelligence forward, outperforming traditional Graph Neural Network (GNN) pre-training methods and setting new benchmarks for performance. In this tutorial, we begin by offering a comprehensive review and analysis of existing methods that integrate LLMs with graphs. We introduce existing works based on a novel taxonomy that classifies them into three distinct categories according to the roles of LLMs in graph tasks: as enhancers, predictors, or alignment components. Secondly, we introduce a new learning method that utilizes prompting on graphs, offering substantial potential to enhance graph transfer capabilities across diverse tasks and domains. We discuss existing works on graph prompting within a unified framework and introduce our developed tool for executing a variety of graph prompting tasks. Additionally, we discuss the applications of combining Graphs, LLMs, and prompt learning across various tasks, such as urban computing, recommendation systems, and anomaly detection. This lecture-style tutorial is an extension of our original work published in IJCAI 2024 and arXiv with the invitation of KDD24.
The tutorial will be held on August 25 10:00 - 13:00 UTC+1.
@article{li2023survey,
title={A survey of graph meets large language model: Progress and future directions},
author={Li, Yuhan and Li, Zhixun and Wang, Peisong and Li, Jia and Sun, Xiangguo and Cheng, Hong and Yu, Jeffrey Xu},
journal={arXiv preprint arXiv:2311.12399},
year={2023}
}
@article{sun2023graph,
title={Graph prompt learning: A comprehensive survey and beyond},
author={Sun, Xiangguo and Zhang, Jiawen and Wu, Xixi and Cheng, Hong and Xiong, Yun and Li, Jia},
journal={arXiv preprint arXiv:2311.16534},
year={2023}
}
@article{chen2024graphwiz,
title={GraphWiz: An Instruction-Following Language Model for Graph Problems},
author={Chen, Nuo and Li, Yuhan and Tang, Jianheng and Li, Jia},
journal={arXiv preprint arXiv:2402.16029},
year={2024}
}
@article{li2024zerog,
title={ZeroG: Investigating Cross-dataset Zero-shot Transferability in Graphs},
author={Li, Yuhan and Wang, Peisong and Li, Zhixun and Yu, Jeffrey Xu and Li, Jia},
journal={arXiv preprint arXiv:2402.11235},
year={2024}
}
@article{zhao2024all,
title={All in one and one for all: A simple yet effective method towards cross-domain graph pretraining},
author={Zhao, Haihong and Chen, Aochuan and Sun, Xiangguo and Cheng, Hong and Li, Jia},
journal={arXiv preprint arXiv:2402.09834},
year={2024}
}