@Article{info:doi/10.2196/64318, author="Seo, Sujeong and Kim, Kyuli and Yang, Heyoung", title="Performance Assessment of Large Language Models in Medical Consultation: Comparative Study", journal="JMIR Med Inform", year="2025", month="Feb", day="12", volume="13", pages="e64318", keywords="artificial intelligence; biomedical; large language model; depression; similarity measurement; text validity", abstract="Background: The recent introduction of generative artificial intelligence (AI) as an interactive consultant has sparked interest in evaluating its applicability in medical discussions and consultations, particularly within the domain of depression. Objective: This study evaluates the capability of large language models (LLMs) in AI to generate responses to depression-related queries. Methods: Using the PubMedQA and QuoraQA data sets, we compared various LLMs, including BioGPT, PMC-LLaMA, GPT-3.5, and Llama2, and measured the similarity between the generated and original answers. Results: The latest general LLMs, GPT-3.5 and Llama2, exhibited superior performance, particularly in generating responses to medical inquiries from the PubMedQA data set. Conclusions: Considering the rapid advancements in LLM development in recent years, it is hypothesized that version upgrades of general LLMs offer greater potential for enhancing their ability to generate ``knowledge text'' in the biomedical domain compared with fine-tuning for the biomedical field. These findings are expected to contribute significantly to the evolution of AI-based medical counseling systems. ", issn="2291-9694", doi="10.2196/64318", url="https://medinform.jmir.org/2025/1/e64318", url="https://doi.org/10.2196/64318" }