<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR Med Inform</journal-id><journal-id journal-id-type="publisher-id">medinform</journal-id><journal-id journal-id-type="index">7</journal-id><journal-title>JMIR Medical Informatics</journal-title><abbrev-journal-title>JMIR Med Inform</abbrev-journal-title><issn pub-type="epub">2291-9694</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v13i1e64979</article-id><article-id pub-id-type="doi">10.2196/64979</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>Interpretable Machine Learning Model for Predicting and Assessing the Risk of Diabetic Nephropathy: Prediction Model Study</article-title></title-group><contrib-group><contrib contrib-type="author"><name name-style="western"><surname>Wen</surname><given-names>Yili</given-names></name><degrees>MM</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Wan</surname><given-names>Zhiqiang</given-names></name><degrees>MEng</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Ren</surname><given-names>Huiling</given-names></name><degrees>MM</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Wang</surname><given-names>Xu</given-names></name><degrees>MM</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Wang</surname><given-names>Weijie</given-names></name><degrees>MM</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib></contrib-group><aff id="aff1"><institution>Institute of Medical Information/Medical Library, Chinese Academy of Medical Sciences &#x0026; Peking Union Medical College</institution><addr-line>3 Yabao Road, Chaoyang District</addr-line><addr-line>Beijing</addr-line><country>China</country></aff><aff id="aff2"><institution>Peking Union Medical College Hospital, Chinese Academy of Medical Science &#x0026; Peking Union Medical College</institution><addr-line>Beijing</addr-line><country>China</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Coristine</surname><given-names>Andrew</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Chadaga</surname><given-names>Krishnaraj</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Yella</surname><given-names>Venkata Rajesh</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Huiling Ren, MM, Institute of Medical Information/Medical Library, Chinese Academy of Medical Sciences &#x0026; Peking Union Medical College, 3 Yabao Road, Chaoyang District, Beijing, 100010, China, 86 01052328911; <email>ren.huiling@imicams.ac.cn</email></corresp></author-notes><pub-date pub-type="collection"><year>2025</year></pub-date><pub-date pub-type="epub"><day>22</day><month>10</month><year>2025</year></pub-date><volume>13</volume><elocation-id>e64979</elocation-id><history><date date-type="received"><day>01</day><month>08</month><year>2024</year></date><date date-type="rev-recd"><day>09</day><month>09</month><year>2025</year></date><date date-type="accepted"><day>09</day><month>09</month><year>2025</year></date></history><copyright-statement>&#x00A9; Yili Wen, Zhiqiang Wan, Huiling Ren, Xu Wang, Weijie Wang. Originally published in JMIR Medical Informatics (<ext-link ext-link-type="uri" xlink:href="https://medinform.jmir.org">https://medinform.jmir.org</ext-link>), 22.10.2025. </copyright-statement><copyright-year>2025</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Medical Informatics, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://medinform.jmir.org/">https://medinform.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://medinform.jmir.org/2025/1/e64979"/><abstract><sec><title>Background</title><p>Diabetic nephropathy (DN), a severe complication of diabetes, is characterized by proteinuria, hypertension, and progressive renal function decline, potentially leading to end-stage renal disease. The International Diabetes Federation projects that by 2045, 783 million people will have diabetes, with 30%&#x2010;40% of them developing DN. Current diagnostic approaches lack sufficient sensitivity and specificity for early detection and diagnosis, underscoring the need for an accurate, interpretable predictive model to enable timely intervention, reduce cardiovascular risks, and optimize health care costs.</p></sec><sec><title>Objective</title><p>This study aimed to develop and validate a machine learning&#x2013;based predictive model for DN in patients with type 2 diabetes, with a focus on achieving high predictive accuracy while ensuring transparency and interpretability through explainable artificial intelligence techniques, thereby supporting early diagnosis, risk assessment, and personalized clinical decision-making.</p></sec><sec sec-type="methods"><title>Methods</title><p>Our retrospective cohort study investigated 1000 patients with type 2 diabetes using data from electronic medical records collected between 2015 and 2020. The study design incorporated a sample of 444 patients with DN and 556 without, focusing on demographics, clinical metrics such as blood pressure and glucose levels, and renal function markers. Data collection relied on electronic records, with missing values handled via multiple imputation and dataset balance achieved using Synthetic Minority Oversampling Technique (SMOTE). In this study, advanced machine learning algorithms, namely Extreme Gradient Boosting (XGBoost), CatBoost, and Light Gradient-Boosting Machine (LightGBM), were used due to their robustness in handling complex datasets. Key metrics, including accuracy, precision, recall, <italic>F</italic><sub>1</sub>-score, specificity, and area under the curve, were used to provide a comprehensive assessment of model performance. In addition, explainable machine learning techniques, such as Local Interpretable Model-Agnostic Explanations (LIME) and Shapley Additive Explanations (SHAP), were applied to enhance the transparency and interpretability of the models, offering valuable insights into their decision-making processes.</p></sec><sec sec-type="results"><title>Results</title><p>XGBoost and LightGBM demonstrated superior performance, with XGBoost achieving the highest accuracy of 86.87%, a precision of 88.90%, a recall of 84.40%, an <italic>F</italic><sub>1</sub>-score of 86.44%, and a specificity of 89.12%. LIME and SHAP analyses provided insights into the contribution of individual features to elucidate the decision-making processes of these models, identifying serum creatinine, albumin, and lipoproteins as significant predictors.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>The developed machine learning model not only provides a robust predictive tool for early diagnosis and risk assessment of DN but also ensures transparency and interpretability, crucial for clinical integration. By enabling early intervention and personalized treatment strategies, this model has the potential to improve patient outcomes and optimize health care resource usage.</p></sec></abstract><kwd-group><kwd>type 2 diabetes</kwd><kwd>machine learning</kwd><kwd>interpretability analysis</kwd><kwd>ML</kwd><kwd>diabetes</kwd><kwd>risk assessment</kwd><kwd>diabetic nephropathy</kwd><kwd>hypertension</kwd><kwd>renal disease</kwd><kwd>renal function</kwd><kwd>glucose</kwd><kwd>oxidative stress</kwd><kwd>inflammation</kwd><kwd>fibrosis</kwd><kwd>kidney</kwd><kwd>quality of life</kwd><kwd>ML model</kwd><kwd>predictive tool</kwd><kwd>early diagnosis</kwd><kwd>patient outcomes</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><sec id="s1-1"><title>Background</title><p>Diabetic nephropathy (DN), severe microvascular complications of diabetes, primarily manifests as proteinuria, hypertension, and a progressive decline in renal function, potentially leading to end-stage renal disease. The pathogenesis of DN is attributed to a high-glucose milieu, oxidative stress, inflammation, and fibrosis, collectively contributing to substantial morphological changes in kidneys including thickening of glomerular basement membrane, glomerulosclerosis, tubular atrophy, interstitial inflammation, and renal fibrosis [<xref ref-type="bibr" rid="ref1">1</xref>]. The prevalence of diabetes and diabetic kidney disease has been increasing, and the International Diabetes Federation projected that the number of patients with diabetes would rise to 783 million by 2045. Notably, approximately 30%&#x2010;40% of these patients are expected to develop DN [<xref ref-type="bibr" rid="ref2">2</xref>], with a mortality rate 30 times higher than that of diabetic patients without kidney disease [<xref ref-type="bibr" rid="ref3">3</xref>]. Hence, the importance of early detection cannot be overstated in managing DN. Early diagnosis not only significantly reduces the reliance on costly medical resources such as dialysis and transplantation but also alleviates the economic burden on patients [<xref ref-type="bibr" rid="ref4">4</xref>]. By intervening in the early stages of DN, clinicians can effectively preserve renal function and slow disease progression, thereby enhancing the quality of life and reducing the risk of cardiovascular complications, a major cause of death among patients with diabetes. Moreover, from a perspective of health economics, early detection is crucial as it reduces the need for intensive later-stage treatments, allowing for the reallocation of medical resources to other pressing needs [<xref ref-type="bibr" rid="ref5">5</xref>]. Consequently, investing in the early detection of DN not only benefits patients but also enhances the efficiency of health care resource usage across society.</p><p>Machine learning (ML), as a significant branch of artificial intelligence (AI), has revolutionized the field of medical research by analyzing complex datasets to discover models and make predictions. Particularly in disease prediction and classification, ML algorithms can handle vast amounts of clinical and biological data, identify risk factors, predict disease onset, and accurately classify disease subtypes [<xref ref-type="bibr" rid="ref6">6</xref>]. In contrast, traditional statistical methods often perform poorly when dealing with the high dimensionality and nonlinearity of biomedical data, whereas ML algorithms, such as decision trees, random forests, support vector machines, and neural networks, exhibit advantages that traditional statistical methods cannot match [<xref ref-type="bibr" rid="ref7">7</xref>]. ML algorithms have been widely applied in clinical research and shown outstanding performance in various fields. For example, they have achieved significant results in the early prediction of acute kidney injury [<xref ref-type="bibr" rid="ref8">8</xref>], malaria prediction [<xref ref-type="bibr" rid="ref9">9</xref>], and cervical cancer survival prediction [<xref ref-type="bibr" rid="ref10">10</xref>]. Despite the tremendous potential of ML models in predictive analysis, their application in clinical environments is often hindered by the &#x201C;black box&#x201D; nature of many algorithms. This opacity limits clinicians&#x2019; understanding, trust, and effective use of ML predictions. To address this issue, explainable machine learning (XML) techniques have emerged, aiming to enhance the transparency and interpretability of models [<xref ref-type="bibr" rid="ref11">11</xref>]. By using techniques such as Shapley Additive Explanations (SHAP) and Local Interpretable Model-Agnostic Explanations (LIME), XML can elucidate the contribution of individual features to prediction outcomes, thereby increasing the model&#x2019;s transparency and interpretability [<xref ref-type="bibr" rid="ref12">12</xref>]. This interpretability is crucial for integrating ML into clinical workflows, as it allows health care providers to validate model outputs based on clinical knowledge, explain decisions to patients, and comply with regulatory standards [<xref ref-type="bibr" rid="ref13">13</xref>]. Ultimately, XML holds the promise of bridging the gap between advanced analytics and clinical applications, fostering more informed and more confident decision-making in patient care.</p></sec><sec id="s1-2"><title>Objective</title><p>In the clinical management of DN, early diagnosis and precise treatment are crucial for improving patient outcomes. However, traditional diagnostic methods often fall short in predicting the complex progression of the disease, necessitating new tools to enhance predictive accuracy and reliability. This study aims to develop and validate an ML-based model for predicting DN, emphasizing both high predictive accuracy and model interpretability to meet clinicians&#x2019; needs for transparency. By addressing the gap in both predictive performance and interpretability, this model provides a more holistic approach to managing DN. By creating a new predictive tool, we aim to provide clinicians with a deep understanding of the model&#x2019;s predictive logic, thereby enhancing trust and application of the predictions. We meticulously designed and integrated various ML algorithms, including decision trees, random forests, extra trees, Adaptive Boosting (AdaBoost), Extreme Gradient Boosting (XGBoost), and Light Gradient-Boosting Machine (LightGBM), to build a model with significant predictive accuracy. Concurrently, we used LIME and SHAP methods for in-depth analysis of the model&#x2019;s interpretability, ensuring transparency and fairness in the prediction process. The core contribution of this study lies in enhancing model interpretability, thereby increasing its credibility and practicality in real medical applications. This model not only provides a scientifically transparent decision support system for early diagnosis, risk assessment, and personalized treatment of DN but also aids doctors in devising more precise intervention strategies to improve patient outcomes. We believe that the generalizability and effectiveness of these methods will lay a solid foundation for the broader application of ML technologies in various medical scenarios and open new avenues for medical research. We outline the overall approach of our study as shown in <xref ref-type="fig" rid="figure1">Figure 1</xref>.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Overview of the research framework. This diagram illustrates the complete workflow from data preprocessing to model interpretability, where the orange module represents the data processing stage, the green module denotes the model construction phase, and the blue module signifies the result analysis stage. The directional arrows indicate the sequential order of processes. AdaBoost: Adaptive Boosting; AUC: area under the curve; LightGBM: Light Gradient-Boosting Machine; LIME: Local Interpretable Model-Agnostic Explanations; SHAP: Shapley Additive Explanations; XGBoost: Extreme Gradient Boosting.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="medinform_v13i1e64979_fig01.png"/></fig></sec><sec id="s1-3"><title>Related Work</title><p>XML has been introduced to enhance model transparency and reliability, helping clinicians better understand the predictions made by these models. Several studies have successfully demonstrated how interpretability techniques can improve the transparency of clinical decision-making. Chadaga et al [<xref ref-type="bibr" rid="ref14">14</xref>] used SHAP and LIME to predict COVID-19 prognosis using clinical markers, aiming to identify high-risk patients early and provide appropriate treatments to prevent severe outcomes, while also making AI predictions interpretable and trustworthy for medical professionals. Khanna et al [<xref ref-type="bibr" rid="ref15">15</xref>] built a decision support system using SHAP and LIME to predict osteoporosis risk, aiming to enhance early diagnosis and treatment while making ML models interpretable and reliable for medical professionals. Guan et al [<xref ref-type="bibr" rid="ref16">16</xref>] used SHAP to interpret venous thromboembolism risks in patients who are critically ill, with the goal of improving early risk identification, enabling timely interventions, and fostering transparency and trust in the decision-making process. Zhong et al [<xref ref-type="bibr" rid="ref17">17</xref>] introduced SHAP to significantly improve the accuracy of blood oxygen saturation estimation based on neck photoplethysmography, thereby enhancing the reliability of noninvasive oxygen monitoring. Suh et al [<xref ref-type="bibr" rid="ref18">18</xref>] used LIME to analyze a deep learning model for osteoporosis risk screening, identifying and ranking critical features that contribute to risk prediction. This approach enhances the interpretability of ML models, facilitating personalized health care and aiding clinicians in understanding the decision-making process. In the domain of XML, SHAP and LIME are among the most commonly used methods; however, other approaches, such as partial dependence plot (PDP), are also available. Bernard et al [<xref ref-type="bibr" rid="ref19">19</xref>] used PDP to visualize the influence of individual features on the model&#x2019;s output by isolating their effects while averaging over all other features. Zhang et al [<xref ref-type="bibr" rid="ref20">20</xref>] used PDP to analyze the marginal effect of individual features on the model&#x2019;s predictions by averaging their impacts while keeping other features constant. This method helps reveal how changes in a particular feature influence the predicted outcomes, providing clinicians with interpretable insights into feature significance and supporting informed decision-making in personalized health care.</p></sec></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Data Source</title><p>The dataset for this study originated from the National Population Health Data Center&#x2019;s &#x201C;Diabetes Complications Dataset&#x201D; and comprised detailed records of 1000 patients with type 2 diabetes. The complete dataset of 1000 patients was used for our analysis without applying any additional inclusion or exclusion criteria, as the dataset had already undergone an initial selection and screening process before public release by the data provider. The dataset covers 87 features, including patients&#x2019; basic demographic information such as age, gender, ethnicity, and marital status, biochemical test results such as blood glucose levels, lipid analysis, renal function indicators, and other relevant hematological parameters, and comorbidities such as kidney disease, cardiovascular diseases, fatty liver, and many other chronic conditions. The dataset contained information on 444 patients with DN and 556 patients without nephropathy, providing a rich empirical basis for the study of DN risk prediction.</p></sec><sec id="s2-2"><title>Data Preprocessing</title><p>In this study, features with a data missing proportion exceeding 75% (750/1000) were excluded due to their severely limited informational content and minimal impact on the research outcomes. For features with a data missing proportion less than 75% (750/1000), multiple imputation was used. This method, based on multivariate regression models and iterative algorithms, is well-suited for handling clinical data with diverse feature types due to its ability to preserve interfeature correlations and prevent the disruption of internal data relationships, which is typical for singular imputation methods. The fundamental approach involves initially filling each feature containing missing values with simple methods such as the mean to provide initial values, followed by the use of current filled values from other features to predict and update missing values through regression models. This process iterates until the imputation converges or the predefined maximum number of iterations is reached. In addition, we noted that the ratio of patients with DN to those without is 444:556, close to 4:5. Although this imbalance is not particularly severe, we used the Synthetic Minority Oversampling Technique (SMOTE) to enhance the accuracy and reliability of our model predictions. SMOTE generates new synthetic samples by randomly interpolating between minority class samples and their nearest neighbors. This method not only increases the number of minority class samples but also maintains intraclass diversity, thereby avoiding the potential overfitting issues associated with simple sample replication. Through this approach, SMOTE has effectively addressed the issue of data imbalance, laying a solid foundation for the development of a more robust and more accurate predictive model.</p></sec><sec id="s2-3"><title>Statistical Analysis</title><p>This study used Python version 3.9.5, originally developed by Guido van Rossum and currently maintained by the Python Software Foundation, for data analysis. In the statistical data analysis, the Shapiro-Wilk test was first conducted on continuous variables to determine whether they follow a normal distribution. If these variables did not follow a normal distribution, the Mann-Whitney <italic>U</italic> test was used to explore their associations with DN. The Mann-Whitney <italic>U</italic> test helped examine whether there are significant statistical differences between the patient and nonpatient groups, making it an effective nonparametric testing method. If these variables followed a normal distribution, the Student <italic>t</italic> test was used to analyze their associations with DN. This parametric testing method assessed the significance of differences in the means of 2 samples, aiding in the identification of key indicators related to the risk of DN. For categorical variables, chi-square tests were used to analyze their associations with DN, detecting distribution differences of categorical variables across different disease states. Through these tests, we gained a more comprehensive understanding of how different types of data characteristics influence the risk of DN. Statistical significance was defined as <italic>P</italic>&#x003C;.001.</p></sec><sec id="s2-4"><title>Feature Selection</title><p>Given the limited sample size and large number of features in the data involved in this study, we adopted the least absolute shrinkage and selection operator (lasso) method for feature selection to ensure the efficiency of the analysis and the accuracy of the results. Lasso introduces the L1 paradigm as a penalty term in regression analysis, making it widely used in high-dimensional data processing [<xref ref-type="bibr" rid="ref21">21</xref>]. Its objective function is:</p><disp-formula id="equWL1"><mml:math id="eqn1"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mi>m</mml:mi><mml:mi>i</mml:mi><mml:msub><mml:mi>n</mml:mi><mml:mrow><mml:mi>&#x03B2;</mml:mi></mml:mrow></mml:msub><mml:mrow><mml:mo>{</mml:mo><mml:mrow><mml:mfrac><mml:mn>1</mml:mn><mml:mrow><mml:mn>2</mml:mn><mml:mi>n</mml:mi></mml:mrow></mml:mfrac><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>i</mml:mi><mml:mo>=</mml:mo><mml:mn>0</mml:mn></mml:mrow><mml:mrow><mml:mi>n</mml:mi></mml:mrow></mml:munderover><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>y</mml:mi><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>1</mml:mn></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:munderover><mml:msub><mml:mi>x</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:msub><mml:mi>&#x03B2;</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:msup><mml:mo stretchy="false">)</mml:mo><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msup><mml:mo>+</mml:mo><mml:mi>&#x03BB;</mml:mi><mml:munderover><mml:mo>&#x2211;</mml:mo><mml:mrow><mml:mi>j</mml:mi><mml:mo>=</mml:mo><mml:mn>0</mml:mn></mml:mrow><mml:mrow><mml:mi>p</mml:mi></mml:mrow></mml:munderover><mml:msub><mml:mi>&#x03B2;</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>}</mml:mo></mml:mrow></mml:mstyle></mml:mstyle></mml:mrow></mml:mstyle></mml:math></disp-formula><p>where, <inline-formula><mml:math id="ieqn1"><mml:msub><mml:mrow><mml:mi>y</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> is the response variable of the ith observation, <inline-formula><mml:math id="ieqn2"><mml:msub><mml:mrow><mml:mi>x</mml:mi></mml:mrow><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> is the jth feature of the ith observation, <inline-formula><mml:math id="ieqn3"><mml:msub><mml:mrow><mml:mi>&#x03B2;</mml:mi></mml:mrow><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> is the regression coefficient of the jth feature, &#x03BB; is the regularization parameter used to control the complexity of the model, <inline-formula><mml:math id="ieqn4"><mml:mi>n</mml:mi></mml:math></inline-formula> is the number of samples, and <inline-formula><mml:math id="ieqn5"><mml:mi>p</mml:mi></mml:math></inline-formula> is the number of features.</p><p>By adjusting the regularization parameter &#x03BB;, lasso achieves a balance between model complexity and fitting accuracy. The core mechanism is to drive some of the regression coefficients to zero through the L1 paradigm penalty, enabling variable selection and feature downscaling. This capability gives lasso a significant advantage when dealing with clinical data, as it is able to automatically screen out important variables and eliminate redundant or irrelevant features, making it particularly suitable for high-dimensional data with a large number of features. Through this regularization, lasso enhances the stability and predictive power of the model, preventing overfitting and ensuring more robust model performance on test sets. In addition, by retaining only the most predictive features, lasso significantly improves the accuracy of the model&#x2019;s predictions.</p></sec><sec id="s2-5"><title>Model Construction</title><p>In this study, we used a series of powerful ensemble learning algorithms, including random forests, extra trees, AdaBoost, XGBoost, and LightGBM. These algorithms are widely used in a variety of ML applications due to their excellent predictive performance and efficient processing speed.</p><p>Decision tree is a fundamental ML algorithm used for classification and regression [<xref ref-type="bibr" rid="ref22">22</xref>]. The data are split into different subsets based on a series of conditions, ultimately forming a tree structure. Each node represents a feature, each branch represents one possible value of the feature, and each leaf node represents a category or regression value.</p><p>Random forest is an ensemble learning method that enhances prediction accuracy and stability by constructing multiple decision trees [<xref ref-type="bibr" rid="ref23">23</xref>]. The core idea is to train multiple decision trees using different subsets of data and features, and obtain the final result by voting or averaging the predictions of all trees. This approach effectively reduces overfitting and improves the model&#x2019;s generalization ability.</p><p>Extra trees, or extremely randomized trees, is an improved random forest method that constructs decision trees by randomly selecting features and split points [<xref ref-type="bibr" rid="ref24">24</xref>]. Unlike random forests, extra trees randomly selects multiple split points at each node, from which the best split point is chosen. This completely random strategy reduces the variance of the model and improves generalization while retaining the interpretability and training speed of the decision tree model.</p><p>AdaBoost is a boosting method based on an additive model that improves overall classification performance by progressively weighting the training of multiple weak classifiers [<xref ref-type="bibr" rid="ref25">25</xref>]. In each round of training, misclassified samples are given higher weights, prompting subsequent classifiers to focus more on these difficult-to-classify samples. The final model is a weighted sum of multiple weak classifiers, which gradually reduces error and significantly enhances performance.</p><p>XGBoost is an efficient gradient-boosting decision tree algorithm that combines parallel processing and regularization techniques to improve the speed and performance of the model [<xref ref-type="bibr" rid="ref26">26</xref>]. Key features include the use of second-order derivative information, the ability to handle missing values, and pruning operations on the decision tree. These features enable XGBoost to perform well when dealing with large-scale and high-dimensional data.</p><p>LightGBM is an efficient gradient-boosting framework that improves training speed and memory usage through a histogram-based decision tree learning algorithm [<xref ref-type="bibr" rid="ref27">27</xref>]. Its features include support for categorical features, a depth-first strategy using leaf-count limitation, and efficient parallel processing. These characteristics allow LightGBM to have a significant advantage in handling large-scale data.</p></sec><sec id="s2-6"><title>Model Evaluation</title><p>To comprehensively evaluate the model&#x2019;s performance, this study used multiple evaluation metrics, including accuracy, precision, recall, <italic>F</italic><sub>1</sub>-score, specificity, and area under the curve (AUC). These metrics reflect various aspects of the model&#x2019;s performance, ensuring an objective and thorough assessment.</p><p>Accuracy refers to the ratio of correctly predicted samples to the total number of samples and reflects the overall prediction accuracy of the model. The formula for accuracy is:</p><disp-formula id="equWL2"><mml:math id="eqn2"><mml:mi>A</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mi>u</mml:mi><mml:mi>r</mml:mi><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>y</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi><mml:mo>+</mml:mo><mml:mi>T</mml:mi><mml:mi>N</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi><mml:mo>+</mml:mo><mml:mi>T</mml:mi><mml:mi>N</mml:mi><mml:mo>+</mml:mo><mml:mi>F</mml:mi><mml:mi>P</mml:mi><mml:mo>+</mml:mo><mml:mi>F</mml:mi><mml:mi>N</mml:mi></mml:mrow></mml:mfrac></mml:math></disp-formula><p>where, TP is true positives, TN is true negatives, FP is false positives, and FN is false negatives.</p><p>A higher accuracy indicates that the model makes correct predictions in most cases.</p><p>Precision represents the proportion of true positives among all samples predicted as positive. It primarily measures the accuracy of the model&#x2019;s positive predictions. The formula for precision is:</p><disp-formula id="equWL3"><mml:math id="eqn3"><mml:mi>P</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mi>i</mml:mi><mml:mi>s</mml:mi><mml:mi>i</mml:mi><mml:mi>o</mml:mi><mml:mi>n</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi><mml:mo>+</mml:mo><mml:mi>F</mml:mi><mml:mi>P</mml:mi></mml:mrow></mml:mfrac></mml:math></disp-formula><p>Recall, also known as sensitivity, is the proportion of true positives among all actual positive samples. A higher recall indicates fewer false negatives and a better ability of the model to identify positive samples. The formula for recall is:</p><disp-formula id="equWL4"><mml:math id="eqn4"><mml:mi>R</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mi>a</mml:mi><mml:mi>l</mml:mi><mml:mi>l</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi><mml:mo>+</mml:mo><mml:mi>F</mml:mi><mml:mi>N</mml:mi></mml:mrow></mml:mfrac></mml:math></disp-formula><p><italic>F</italic><sub>1</sub>-score means the harmonic mean of precision and recall, and is used to evaluate the balance between these 2 metrics. It is suitable for datasets with class imbalance. The formula for <italic>F</italic><sub>1</sub>-score is:</p><disp-formula id="equWL5"><mml:math id="eqn5"><mml:mi>F</mml:mi><mml:mn>1</mml:mn><mml:mi>S</mml:mi><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mo>=</mml:mo><mml:mn>2</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mfrac><mml:mrow><mml:mi>P</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mi>i</mml:mi><mml:mi>s</mml:mi><mml:mi>i</mml:mi><mml:mi>o</mml:mi><mml:mi>n</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mi>R</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mi>a</mml:mi><mml:mi>l</mml:mi><mml:mi>l</mml:mi></mml:mrow><mml:mrow><mml:mi>P</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mi>i</mml:mi><mml:mi>s</mml:mi><mml:mi>i</mml:mi><mml:mi>o</mml:mi><mml:mi>n</mml:mi><mml:mo>&#xFF0B;</mml:mo><mml:mi>R</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mi>a</mml:mi><mml:mi>l</mml:mi><mml:mi>l</mml:mi></mml:mrow></mml:mfrac></mml:math></disp-formula><p>A higher <italic>F</italic><sub>1</sub>-score indicates better performance in both precision and recall.</p><p>Specificity represents the proportion of true negatives among all actual negative samples. It measures the model&#x2019;s ability to identify negative samples. The formula for specificity is:</p><disp-formula id="equWL6"><mml:math id="eqn6"><mml:mi>S</mml:mi><mml:mi>p</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mi>i</mml:mi><mml:mi>f</mml:mi><mml:mi>i</mml:mi><mml:mi>c</mml:mi><mml:mi>i</mml:mi><mml:mi>t</mml:mi><mml:mi>y</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mi>T</mml:mi><mml:mi>N</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mi>N</mml:mi><mml:mo>+</mml:mo><mml:mi>F</mml:mi><mml:mi>P</mml:mi></mml:mrow></mml:mfrac></mml:math></disp-formula><p>The AUC of the receiver operating characteristic (ROC) curve is used to evaluate the overall performance of the model. The ROC curve reflects the trade-off between the true-positive rate and false-positive rate at different thresholds. A higher AUC indicates better discrimination ability of the model, balancing the true-positive rate and false-positive rate more effectively. AUC is especially useful for comparing the performance of different models, providing a global perspective on evaluation.</p><p>By comprehensively assessing these metrics, we can understand the models&#x2019; performance in various aspects, identify their strengths and weaknesses, and provide a solid basis for further optimization and improvement. This multiangle, multidimensional evaluation approach ensures a more accurate and comprehensive evaluation of the model&#x2019;s performance, enhancing its reliability and effectiveness in practical applications.</p></sec><sec id="s2-7"><title>Model Interpretability</title><p>For the interpretability of ML models, we primarily used 2 widely recognized methods, LIME [<xref ref-type="bibr" rid="ref28">28</xref>] and SHAP [<xref ref-type="bibr" rid="ref29">29</xref>]. LIME approximates complex ML models by fitting an interpretable linear model within a local neighborhood of the target prediction point. This approach provides individualized feature importance interpretations for each instance, thus elucidating the logic behind specific model predictions. Unlike LIME, which focuses on local interpretation, SHAP quantifies the global contribution of each feature to the model prediction based on the Shapley value from game theory. The SHAP value ensures that the contribution of each feature is consistent and fair across all possible feature combinations. By calculating the SHAP value for each feature, SHAP reveals the interactions and dependencies between features and model predictions. Both methods have their respective strengths. SHAP offers a more comprehensive and in-depth interpretation, while LIME is more intuitive and flexible for instance-specific explanations. Combining the local interpretability of LIME with the global analysis of SHAP provides a more thorough and detailed interpretative support for ML models in clinical data. This combination is crucial for enhancing the transparency of the models, increasing the trust of health care professionals, and optimizing the clinical decision-making process.</p></sec><sec id="s2-8"><title>Ethical Considerations</title><p>This study conducted a secondary analysis using the publicly available Diabetes Complications Dataset [<xref ref-type="bibr" rid="ref30">30</xref>] from the National Population Health Data Center. As this study exclusively used anonymized secondary data without direct human participant involvement, it is exempt from additional institutional ethics review. The original data collection by the National Population Health Data Center complies with the &#x201C;Regulations on the Management of Human Genetic Resources of the People&#x2019;s Republic of China&#x201D; and relevant ethical guidelines. All data are completely deidentified with no personally identifiable information included, and formal data access approval was obtained from the data provider following their established protocols. The original informed consent permits secondary analysis of the deidentified data for research purposes.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Feature Selection</title><p>As illustrated in <xref ref-type="fig" rid="figure2">Figures 2</xref> and <xref ref-type="fig" rid="figure3">3</xref>, lasso regression effectively filters and optimizes the variables in the model by adjusting the regularization parameter &#x03BB; (Log Lambda). Each curve represents the coefficients of a feature. It can be observed that as &#x03BB; increases, most coefficients gradually approach zero (<xref ref-type="fig" rid="figure3">Figure 3A</xref>). This phenomenon demonstrates the unique feature selection capability of lasso regression, to reduce the coefficients of unimportant features by increasing &#x03BB;, and thereby simplify the model. Simultaneously, it can be seen that the mean squared error remains low when &#x03BB; is small (<xref ref-type="fig" rid="figure3">Figure 3B</xref>), indicating that the model has a good fitting performance. However, as &#x03BB; continues to increase, the mean squared error value starts to rise significantly after a relatively stable period. This trend indicates that excessive regularization will result in an overly simplified model, impairing its predictive ability. Through lasso regression, we eventually selected the 24 features, including 14 numerical features and 10 categorical ones.</p><p><xref ref-type="table" rid="table1">Table 1</xref> summarizes the differences in features between the DN group and the non-DN group. The statistical data reveal significant disparities between the two groups across various biochemical indicators and health conditions. Specifically, the albumin level is significantly higher in the non-DN group (mean 41.9, range 39.7-44.3) than that in the DN group (mean 38.2, range 32.4-41.4), with a <italic>P</italic> value of less than .001. Similar trends are observed for albumin creatinine ratio, blood pressure high, and blood pressure low. For instance, the albumin creatinine ratio is significantly lower in the non-DN group (mean 12.0, range 4.0-60.2) than that in the DN group (mean 272.7, range 79.0-472.1), with a <italic>P</italic> value of less than .001. Blood pressure high and blood pressure low are also lower in the non-DN group with means of 130.0 (range 120.0-142.0) and 80.0 (range 70.0-86.0) compared to the DN group with means of 142.0 (range 130.0-160.0) and 80.0 (range 75.0-90.0), respectively, both with <italic>P</italic> values less than .001. The levels of CA199, lactate dehydrogenase L, and lipoproteins indicate more severe renal impairment in the DN group compared to the non-DN group. For example, the CA199 level is significantly higher in the DN group (mean 16.5, range 9.5-26.3) than that in the non-DN group (mean 13.0, range 7.8-21.9), with a <italic>P</italic> value of less than .001. Similarly, the lactate dehydrogenase L level is higher in the DN group (mean 169.8, range 142.5-203.2) than that in the non-DN group (mean 153.2, range 135.6-176.6), with a <italic>P</italic> value of less than .001. Furthermore, patients with DN exhibit a higher incidence of certain clinical conditions compared to those without DN. For instance, the incidence of digestive carcinoma in the DN group is 18.33%, whereas it is 46.06% in the non-DN group, with a <italic>P</italic> value of less than .001. Similar significant differences are observed for other conditions such as rheumatic immunity disease, other tumors, cerebral apoplexy, and hypertension. <xref ref-type="table" rid="table1">Table 1</xref> provides a detailed comparison of these features between the DN and non-DN groups, highlighting significant health disparities and emphasizing the need for targeted clinical interventions. <xref ref-type="table" rid="table2">Table 2</xref> lists the variables and their corresponding abbreviations used throughout the analysis, providing a clear reference for understanding the various health indicators assessed in this study.</p><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>Absolute values of feature coefficients in lasso regression. The color gradient reflects the magnitude of coefficients, with darker shades indicating higher absolute values. Key features (ALB_CR, CP, SCR, ENDOCRINE_DISEASE, ALB) are highlighted as dominant contributors to model performance. ALB: albumin; ALB_CR: albumin creatinine ratio; BP_HIGH: blood pressure high; BP_low: blood pressure low; CLD: chronic liver disease; CP: C-reactive protein; LDH_L: lactate dehydrogenase L; LPS: lipoproteins; MEN: endocrine gland tumors; SCR: serum creatinine; SUA: serum uric acid; TP: total protein.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="medinform_v13i1e64979_fig02.png"/></fig><fig position="float" id="figure3"><label>Figure 3.</label><caption><p>Variable selection via lasso regression. (A) Coefficient paths illustrate the trajectory of feature coefficients as a function of log(&#x03BB;), demonstrating variable shrinkage and exclusion. (B) The mean-squared error curve identifies the optimal &#x03BB; value at its minimum, balancing model complexity and predictive accuracy.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="medinform_v13i1e64979_fig03.png"/></fig><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Feature differences between diabetic nephropathy (DN) and non-DN groups.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Variable name</td><td align="left" valign="bottom">DN (n=444), median (IQR)</td><td align="left" valign="bottom">Non-DN (n=556), median (IQR)</td><td align="left" valign="bottom"><italic>P</italic> value</td></tr></thead><tbody><tr><td align="left" valign="top">Albumin</td><td align="left" valign="top">38.2 (32.4-41.4)</td><td align="left" valign="top">41.9 (39.7-44.3)</td><td align="left" valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top">Albumin_Creatinine_Ratio</td><td align="left" valign="top">272.7 (79.0-472.1)</td><td align="left" valign="top">12.0 (4.0-60.2)</td><td align="left" valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top">Blood_Pressure_High</td><td align="left" valign="top">142.0 (130.0-160.0)</td><td align="left" valign="top">130.0 (120.0-142.0)</td><td align="left" valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top">Blood_Pressure_Low</td><td align="left" valign="top">80.0 (75.0-90.0)</td><td align="left" valign="top">80.0 (70.0-86.0)</td><td align="left" valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top">CA199</td><td align="left" valign="top">16.5 (9.5-26.3)</td><td align="left" valign="top">13.0 (7.8-21.9)</td><td align="left" valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top">C-Peptide</td><td align="left" valign="top">2.4 (1.5-3.5)</td><td align="left" valign="top">2.4 (1.5-3.1)</td><td align="left" valign="top">.25</td></tr><tr><td align="left" valign="top">Lactate_Dehydrogenase_L</td><td align="left" valign="top">169.8 (142.5-203.2)</td><td align="left" valign="top">153.2 (135.6-176.6)</td><td align="left" valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top">Lipoproteins</td><td align="left" valign="top">134.8 (78.6-212.7)</td><td align="left" valign="top">100.2 (48.8-163.1)</td><td align="left" valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top">Serum_Creatinine</td><td align="left" valign="top">95.8 (68.7-161.0)</td><td align="left" valign="top">67.5 (55.2-78.7)</td><td align="left" valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top">Serum_Uric_Acid</td><td align="left" valign="top">352.6 (292.1-416.1)</td><td align="left" valign="top">302.8 (242.6-363.2)</td><td align="left" valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top">Total_Protein</td><td align="left" valign="top">63.3 (58.2-68.1)</td><td align="left" valign="top">67.2 (63.9-70.9)</td><td align="left" valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top">Weight</td><td align="left" valign="top">74.0 (66.0-83.0)</td><td align="left" valign="top">72.9 (66.0-79.0)</td><td align="left" valign="top">.10</td></tr><tr><td align="left" valign="top" colspan="3">Other_Tumor, n (%)</td><td align="left" valign="top">.36</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>No</td><td align="left" valign="top">46 (50.55)</td><td align="left" valign="top">510 (56.11)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Yes</td><td align="left" valign="top">45 (49.45)</td><td align="left" valign="top">399 (43.89)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top" colspan="3">Endocrine_Disease, n (%)</td><td align="left" valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>No</td><td align="left" valign="top">169 (47.08)</td><td align="left" valign="top">387 (60.37)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Yes</td><td align="left" valign="top">190 (52.92)</td><td align="left" valign="top">254 (39.63)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top" colspan="3">Gynecological_Tumor, n (%)</td><td align="left" valign="top">&#x003E;.99</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>No</td><td align="left" valign="top">16 (55.17)</td><td align="left" valign="top">540 (55.61)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Yes</td><td align="left" valign="top">13 (44.83)</td><td align="left" valign="top">431 (44.39)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top" colspan="3">Cerebral_Apoplexy, n (%)</td><td align="left" valign="top">.09</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>No</td><td align="left" valign="top">29 (44.62)</td><td align="left" valign="top">527 (56.36)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Yes</td><td align="left" valign="top">36 (55.38)</td><td align="left" valign="top">408 (43.64)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top" colspan="3">Endocrine_Gland_Tumors, n (%)</td><td align="left" valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>No</td><td align="left" valign="top">9 (21.95)</td><td align="left" valign="top">547 (57.04)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Yes</td><td align="left" valign="top">32 (78.05)</td><td align="left" valign="top">412 (42.96)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top" colspan="3">Renal_Failure, n (%)</td><td align="left" valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>No</td><td align="left" valign="top">0 (0.00)</td><td align="left" valign="top">556 (59.15)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Yes</td><td align="left" valign="top">60 (100.00)</td><td align="left" valign="top">384 (40.85)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top" colspan="3">Lung_Tumor, n (%)</td><td align="left" valign="top">.08</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>No</td><td align="left" valign="top">18 (75.00)</td><td align="left" valign="top">538 (55.12)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Yes</td><td align="left" valign="top">6 (25.00)</td><td align="left" valign="top">438 (44.88)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top" colspan="3">Urologic_Neoplasms, n (%)</td><td align="left" valign="top">.04</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>No</td><td align="left" valign="top">1 (12.50)</td><td align="left" valign="top">555 (59.15)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Yes</td><td align="left" valign="top">7 (87.50)</td><td align="left" valign="top">437 (44.05)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top" colspan="3">Gender, n (%)</td><td align="left" valign="top">.06</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Male</td><td align="left" valign="top">347 (53.38)</td><td align="left" valign="top">209 (59.71)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Female</td><td align="left" valign="top">303 (46.62)</td><td align="left" valign="top">141 (40.29)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top" colspan="3">Chronic_Liver_Disease, n (%)</td><td align="left" valign="top">.002</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>No</td><td align="left" valign="top">67 (43.79)</td><td align="left" valign="top">489 (57.73)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Yes</td><td align="left" valign="top">86 (56.21)</td><td align="left" valign="top">358 (42.27)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top" colspan="3">Digestive_Carcinoma, n (%)</td><td align="left" valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>No</td><td align="left" valign="top">49 (81.67)</td><td align="left" valign="top">507 (53.94)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Yes</td><td align="left" valign="top">11 (18.33)</td><td align="left" valign="top">433 (46.06)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top" colspan="3">Diabetes_Disease_Type, n (%)</td><td align="left" valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top">Type 2 diabetes</td><td align="left" valign="top">182 (36.4)</td><td align="left" valign="top">374 (74.8)</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top">Diabetic retinopathy</td><td align="left" valign="top">318 (63.6)</td><td align="left" valign="top">126 (25.2)</td><td align="left" valign="top"/></tr></tbody></table></table-wrap><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>Variable names and their abbreviations.</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Variable name</td><td align="left" valign="bottom">Abbreviation</td></tr></thead><tbody><tr><td align="left" valign="top">Albumin</td><td align="left" valign="top">ALB</td></tr><tr><td align="left" valign="top">Albumin creatinine ratio</td><td align="left" valign="top">BU</td></tr><tr><td align="left" valign="top">Blood pressure high</td><td align="left" valign="top">BP_HIGH</td></tr><tr><td align="left" valign="top">Blood pressure low</td><td align="left" valign="top">BP_LOW</td></tr><tr><td align="left" valign="top">CA199</td><td align="left" valign="top">CA199</td></tr><tr><td align="left" valign="top">C-reactive protein</td><td align="left" valign="top">CP</td></tr><tr><td align="left" valign="top">Lactate dehydrogenase L</td><td align="left" valign="top">LDH_L</td></tr><tr><td align="left" valign="top">Lipoproteins</td><td align="left" valign="top">LPS</td></tr><tr><td align="left" valign="top">Serum creatinine</td><td align="left" valign="top">SCR</td></tr><tr><td align="left" valign="top">Serum uric acid</td><td align="left" valign="top">SUA</td></tr><tr><td align="left" valign="top">Total protein</td><td align="left" valign="top">TP</td></tr><tr><td align="left" valign="top">Weight</td><td align="left" valign="top">WEIGHT</td></tr><tr><td align="left" valign="top">Other tumor</td><td align="left" valign="top">OTHER_TUMOR</td></tr><tr><td align="left" valign="top">Other endocrine diseases</td><td align="left" valign="top">ENDOCRINE_DISEASE</td></tr><tr><td align="left" valign="top">Gynecological tumor</td><td align="left" valign="top">GYNECOLOGICAL_TUMOR</td></tr><tr><td align="left" valign="top">Cerebral apoplexy</td><td align="left" valign="top">CEREBRAL_APOPLEXTY</td></tr><tr><td align="left" valign="top">Endocrine gland tumors</td><td align="left" valign="top">MEN</td></tr><tr><td align="left" valign="top">Renal failure</td><td align="left" valign="top">RENAL_FAILURE</td></tr><tr><td align="left" valign="top">Lung tumor</td><td align="left" valign="top">LUNG_TUMOR</td></tr><tr><td align="left" valign="top">Urologic neoplasms</td><td align="left" valign="top">UROLOGIC_NEOPLASMS</td></tr><tr><td align="left" valign="top">Gender</td><td align="left" valign="top">SEX</td></tr><tr><td align="left" valign="top">Chronic liver disease</td><td align="left" valign="top">CHRONIC_LIVER_DISEASE</td></tr><tr><td align="left" valign="top">Digestive carcinoma</td><td align="left" valign="top">DIGESTIVE_CARCINOMA</td></tr><tr><td align="left" valign="top">Diabetes disease type</td><td align="left" valign="top">DIABETES_DISEASE_TYPE</td></tr></tbody></table></table-wrap></sec><sec id="s3-2"><title>Model Performance</title><p>In this study, we used various ML algorithms to construct predictive models, including decision tree, random forest, extra trees, AdaBoost, XGBoost, and LightGBM. In order to determine the optimal model parameters, we used ten-fold cross-validation and grid search strategies, ensuring thorough and accurate model tuning. The final results, presented in <xref ref-type="table" rid="table3">Table 3</xref>, show that XGBoost and LightGBM exhibited superior performance across multiple evaluation metrics, including accuracy, precision, recall, <italic>F</italic><sub>1</sub>-score, and specificity. In particular, XGBoost achieved the highest accuracy of 86.87%, with a precision of 88.90%, a recall of 84.40%, an <italic>F</italic><sub>1</sub>-score of 86.44%, and a specificity of 89.12%. LightGBM followed closely with an accuracy of 86.78%, a precision of 88.72%, a recall of 84.37%, an <italic>F</italic><sub>1</sub>-score of 86.35%, and a specificity of 88.88%. These results highlight the exceptional capability of both XGBoost and LightGBM in handling complex datasets, making them ideal for predictive modeling in this context. The other effective models, however, did not match the overall performance of XGBoost and LightGBM.</p><table-wrap id="t3" position="float"><label>Table 3.</label><caption><p>Performance comparison of different machine learning models.</p></caption><table id="table3" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Model</td><td align="left" valign="bottom">Accuracy (%)</td><td align="left" valign="bottom">Precision (%)</td><td align="left" valign="bottom">Recall (%)</td><td align="left" valign="bottom"><italic>F</italic><sub>1</sub>-score (%)</td><td align="left" valign="bottom">Specificity (%)</td></tr></thead><tbody><tr><td align="left" valign="top">Decision tree</td><td align="left" valign="top">78.24</td><td align="left" valign="top">85.02</td><td align="left" valign="top">69.88</td><td align="left" valign="top">76.07</td><td align="left" valign="top">86.26</td></tr><tr><td align="left" valign="top">Random forest</td><td align="left" valign="top">85.07</td><td align="left" valign="top">88.25</td><td align="left" valign="top">80.72</td><td align="left" valign="top">84.22</td><td align="left" valign="top">89.16</td></tr><tr><td align="left" valign="top">Extra trees</td><td align="left" valign="top">84.26</td><td align="left" valign="top">89.06</td><td align="left" valign="top">78.32</td><td align="left" valign="top">83.16</td><td align="left" valign="top">89.98</td></tr><tr><td align="left" valign="top">AdaBoost<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></td><td align="left" valign="top">83.10</td><td align="left" valign="top">85.44</td><td align="left" valign="top">79.77</td><td align="left" valign="top">82.29</td><td align="left" valign="top">86.31</td></tr><tr><td align="left" valign="top">XGBoost<sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></td><td align="left" valign="top">86.87</td><td align="left" valign="top">88.90</td><td align="left" valign="top">84.40</td><td align="left" valign="top">86.44</td><td align="left" valign="top">89.12</td></tr><tr><td align="left" valign="top">LightGBM<sup><xref ref-type="table-fn" rid="table3fn3">c</xref></sup></td><td align="left" valign="top">86.78</td><td align="left" valign="top">88.72</td><td align="left" valign="top">84.37</td><td align="left" valign="top">86.35</td><td align="left" valign="top">88.88</td></tr></tbody></table><table-wrap-foot><fn id="table3fn1"><p><sup>a</sup>AdaBoost: Adaptive Boosting.</p></fn><fn id="table3fn2"><p><sup>b</sup>XGBoost: Extreme Gradient Boosting.</p></fn><fn id="table3fn3"><p><sup>c</sup>LightGBM: Light Gradient-Boosting Machine.</p></fn></table-wrap-foot></table-wrap><p>As shown in <xref ref-type="fig" rid="figure4">Figure 4</xref>, through a comprehensive performance evaluation that included ROC curves, precision-recall curves, decision curve analysis, and calibration curves, we found that LightGBM and XGBoost performed exceptionally well. Both models demonstrated strong discriminative abilities with an AUC of 0.93, indicating their effectiveness at distinguishing between positive and negative classes. The precision-recall curves, particularly valuable for imbalanced datasets, also showed an AUC of 0.93 for both models, reflecting their capability to maintain high precision and recall, which minimizes false positives and negatives. In the decision curve analysis, XGBoost provided the highest net benefit, highlighting its robustness and wide applicability across various clinical decision thresholds. This indicates that XGBoost can be particularly useful in diverse clinical scenarios, offering reliable support for decision-making processes. The calibration curves further confirmed the reliability of the predicted probabilities from both models, showing that their predictions closely matched the actual outcomes, especially in the high probability prediction range. This means that when the models predict a high probability for an event, the prediction is likely to be accurate, which is essential for building trust in model predictions. In summary, LightGBM and XGBoost excelled across all evaluated metrics, demonstrating superior performance and robustness. Their high AUC values, significant net benefit, and accurate probability predictions collectively underscore their effectiveness in handling complex clinical datasets, making them highly suitable for predictive modeling in health care where accurate and reliable predictions are critical for effective patient management and treatment planning.</p><fig position="float" id="figure4"><label>Figure 4.</label><caption><p>Comprehensive evaluation of model performance. (A) Receiver operating characteristic (ROC) curves demonstrate discriminative ability, with LightGBM achieving the highest AUC (0.93) [<xref ref-type="bibr" rid="ref6">6</xref>,<xref ref-type="bibr" rid="ref10">10</xref>]. (B) Precision-recall curves highlight performance in imbalanced datasets, where XGBoost and LightGBM maintain high precision across recall ranges. (C) Decision curve analysis reveals net clinical benefit, indicating LightGBM&#x2019;s superiority over alternative models at most threshold probabilities. (D) Calibration curves assess predictive accuracy, showing LightGBM aligns closest to the ideal calibration line. AdaBoost: Adaptive Boosting; AUC: area under the curve; LightGBM: Light Gradient-Boosting Machine; XGBoost: Extreme Gradient Boosting.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="medinform_v13i1e64979_fig04.png"/></fig></sec><sec id="s3-3"><title>Model Explainability</title><p>In the analysis of the LIME diagram, we identified a series of key biomarkers and clinical indicators closely associated with the occurrence and progression of DN. As depicted in <xref ref-type="fig" rid="figure5">Figure 5A</xref>, the values next to each feature represent the contribution of that feature to the model&#x2019;s prediction outcomes, denoted as the importance score. Blue indicates a positive correlation between the feature value and the model&#x2019;s prediction outcomes, meaning that as the feature value increases, the likelihood of the model predicting the positive class also increases. Conversely, red indicates a negative correlation, suggesting that as the feature value increases, the likelihood of the model predicting the negative class increases. LIME technology provides an intuitive explanation of the factors influencing model predictions by constructing local surrogate models around the model&#x2019;s predictions in the form of linear models, approximating the original complex model. Specifically, the level of serum creatinine was identified as a significant positive predictive factor, with a threshold exceeding 99.78 &#x00B5;mol/L associated with an increased risk of DN. This finding aligns with the clinical understanding that elevated creatinine levels typically indicate decreased kidney filtration capacity. Albumin levels within the range of 9.00 to 64.95 g/L were also associated with the risk of DN. As the main protein in plasma, abnormal albumin levels may indicate malnutrition or an inflammatory state, both of which can impact kidney health. The lipopolysaccharide level exceeding 191.68 mg/dL was identified as another positive predictive factor. As an inflammatory mediator, elevated lipopolysaccharide levels may be linked to the inflammatory processes in DN. The level of serum uric acid exceeding 392.32 &#x00B5;mol/L was also identified as a positive predictive factor. Hyperuricemia is associated with various kidney diseases and may increase the risk of DN through mechanisms such as promoting inflammation and oxidative stress. Blood pressure indicators, including diastolic blood pressure (BP_LOW) at 72.24 mmHg and systolic blood pressure (BP_HIGH) not exceeding 125.00 mmHg, were also considered to be significant in the LIME analysis. Effective blood pressure control is crucial for slowing the progression of DN. In addition, the levels of total protein within the range of 61.30 to 65.70 g/L and lactate dehydrogenase within the range of 160.55 to 188.66 U/L played a role in the model&#x2019;s predictions. Abnormalities in these indicators may reflect systemic metabolic disorders or tissue damage, both of which are related to the risk of DN. The presence of other medical conditions such as gynecological tumors, endocrine diseases, and lung tumors also showed a negative correlation with the risk of DN, suggesting that these factors may have a protective or neutralizing effect in this specific dataset. Through this detailed analysis, LIME technology provided a comprehensive and intuitive explanation of the key factors influencing model predictions, thereby enhancing our understanding of the biomarkers and clinical indicators associated with DN.</p><fig position="float" id="figure5"><label>Figure 5.</label><caption><p>Feature importance analysis of the LightGBM model. (A) LIME explanation highlights individual feature contributions, where blue/red bars indicate positive/negative impacts on predictions. (B) SHAP values reveal global feature influence, with ALB_CR and SCR exhibiting the strongest associations (purple/blue gradient reflects feature magnitude). ALB: albumin; ALB_CR: albumin creatinine ratio; BP_HIGH: blood pressure high; BP_LOW: blood pressure low; CLD: chronic liver disease; CP: C-reactive protein; LDL_L: lactate dehydrogenase L; LightGBM: Light Gradient-Boosting Machine; LIME: Local Interpretable Model-Agnostic Explanations; LPS: lipoproteins; MEN: endocrine gland tumors; SCR: serum creatinine; SHAP: Shapley Additive Explanations; SUA: serum uric acid; TP: total protein.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="medinform_v13i1e64979_fig05.png"/></fig><p><xref ref-type="fig" rid="figure5">Figure 5B</xref> illustrates the SHAP importance of features within the LightGBM model, identifying primary factors such as lipoprotein A, serum creatinine, C-reactive protein, albumin, and blood pressure high as crucial ones in evaluating DN. These features are pivotal across models, underscoring their integral role in disease assessment. Lipoprotein_A and other lipoproteins serve as critical lipid markers, essential for gauging the cardiovascular risks and progression of kidney disease in patients with diabetes. Abnormal lipid metabolism is intimately linked with the onset of DN, highlighting a potential need for enhanced lipid management strategies to thwart disease advancement. Furthermore, Serum_Creatinine and Blood_Urea nitrogen are paramount biochemical indicators for renal function assessment. Elevated concentrations of these markers generally denote diminished kidney filtration ability, serving as significant indicators of DN progression. Regular monitoring of these parameters is vital for the early detection of renal impairment and timely medical intervention. Albumin serves as a sensitive biomarker for early DN. The abnormal rate of urinary albumin excretion reflects the extent of renal damage. Early detection and continual monitoring of urinary albumin levels enable physicians to assess disease progression and the efficacy of their treatment protocols, thereby facilitating the development of customized therapeutic strategies aimed at decelerating disease progression. Hypertension significantly accelerates DN development; thus, effective blood pressure management is imperative for delaying or preventing disease progression. Optimal control of hypertension not only preserves renal function but also mitigates the risk of cardiovascular incidents. The collective significance of these features underscores their pivotal role in DN assessment. Through a comprehensive analysis of these factors, physicians can obtain a deeper understanding of the disease&#x2019;s complexity, and develop more effective monitoring and treatment modalities that enhance patients&#x2019; management and prognosis.</p><p><xref ref-type="fig" rid="figure6">Figure 6</xref> delineates the correlation between various biochemical indicators and their SHAP values in interpretable ML models designed for predicting and evaluating the risk of DN. An in-depth analysis of these biochemical markers furnishes critical scientific insights into their influence on DN risk predictions, thereby informing clinical intervention strategies. The analysis reveals a pronounced negative correlation between the levels of blood urea nitrogen (blood urea) and the model&#x2019;s predictive accuracy. As blood urea nitrogen levels increase, the associated SHAP values decline markedly, signifying an augmented risk of DN. This trend underscores the prognostic importance of blood urea nitrogen in the early detection of renal dysfunction, establishing it as an essential indicator for kidney disease risk assessments. Similarly, fluctuations in albumin levels are critical for forecasting DN. The presented data indicate that rising albumin levels correlate with decreasing SHAP values, underscoring albumin&#x2019;s predictive utility, particularly in detecting microalbuminuria&#x2014;an early manifestation of kidney damage. Elevated levels of C-reactive protein, a marker of systemic inflammation, also correspond with reduced SHAP values, suggesting that high concentrations of this protein are linked to an increased risk of DN. This relationship may stem from chronic inflammatory states that foster the progression of DN. The findings concerning serum creatinine demonstrate that increased creatinine levels are indicative of reduced kidney filtration capabilities, mirrored by a decline in SHAP values. This reinforces the value of serum creatinine as a crucial indicator of renal insufficiency and a metric for assessing the risk of DN. The analysis also highlights the significant negative impact of elevated lipoprotein markers on the model&#x2019;s predictions, emphasizing the imperative role of lipid management in the prevention and treatment of DN. The perturbations in these indicators not only relate to cardiovascular diseases but also pose a substantial risk factor for DN. Furthermore, the notable SHAP values for endocrine disease and diabetes type underscore the critical role of these conditions in assessing DN risk, indicating that they are significant contributors to the disease&#x2019;s development. By synthesizing these data, we can enhance our comprehension of the risk factors associated with DN. This integration aids medical professionals in more accurately identifying patients at high risk, thereby facilitating the development of more effective treatment strategies and management practices to improve the renal health of patients with diabetes.</p><p>For the purpose of evaluating the predictive models for DN, the SHAP value plots for three distinct samples (<xref ref-type="fig" rid="figure7">Figure 7</xref>) offer a nuanced understanding of the contribution of various biochemical and physiological indicators to disease risks prediction. These comprehensive data enable insights into how different indicators, either individually or synergistically, influence disease risk assessment, thereby furnishing a scientific foundation for clinical decision-making. The analysis of the first sample elucidates that elevated levels of serum creatinine and C-reactive protein markedly enhance disease risks. Serum creatinine, a key marker of renal function impairment, signals a significant reduction in kidney function, which is particularly crucial in DN where renal lesions often correlate with prolonged suboptimal diabetes control. C-reactive protein, indicative of the body&#x2019;s inflammatory response, suggests the presence of an inflammatory state that is instrumental in accelerating renal damage. Furthermore, hypertension is a significant risk factor for DN, and the notably increased low blood pressure might suggest inadequate blood pressure management in this sample, exacerbating the renal burden. Conversely, higher levels of serum uric acid and albumin are generally viewed as protective in the first sample. Although elevated serum uric acid levels are linked to other conditions like gout, they may indicate preserved renal excretion function to a degree. Albumin, a crucial component of plasma proteins, with maintained levels suggests a favorable nutritional state and some preservation of kidney filtration function. The analysis of the second sample, with its positive prediction value indicating disease absence, highlights the critical role of lipoprotein A in increasing disease risks. Lipoprotein A, an independent risk factor for cardiovascular diseases, underscores a possible link between cardiovascular and renal health in the context of DN. Moreover, an elevated level of serum lipase (lipopolysaccharides) contributes to lowering disease risks in this sample, possibly due to its role in modulating immune responses and inflammation. The third sample&#x2019;s negative prediction value signals a heightened disease risk. In this context, increased serum creatinine and high blood pressure significantly elevate disease risks, emphasizing the importance of vigilant monitoring and control of these indicators. Although blood urea contributes less significantly to the prediction value, its elevation is typically associated with renal insufficiency and warrants clinical attention. Through this meticulous SHAP value&#x2013;based analysis, we unveil not only the specific contributions of each biochemical indicator to disease risks but also the intricate interplay and connections among these indicators. This profound understanding provides invaluable information for medical professionals in the early diagnosis, risk assessment, and therapeutic decision-making of DN, facilitating personalized medicine and enhancing treatment outcomes of patients.</p><fig position="float" id="figure6"><label>Figure 6.</label><caption><p>SHAP value distributions for key biochemical indicators in nephropathy risk prediction. Each subplot illustrates the relationship between feature values (x-axis) and SHAP values (y-axis), with color gradients indicating feature magnitude. Red dashed lines denote baseline thresholds, where features above/below these thresholds significantly influence model predictions. Notably, ALB_CR and SCR exhibit the strongest associations with nephropathy risk due to their wide SHAP value ranges and distinct clustering patterns. Detailed subfigures are available in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendices 1</xref><xref ref-type="supplementary-material" rid="app2"/><xref ref-type="supplementary-material" rid="app3"/><xref ref-type="supplementary-material" rid="app4"/><xref ref-type="supplementary-material" rid="app5"/><xref ref-type="supplementary-material" rid="app6"/><xref ref-type="supplementary-material" rid="app7"/><xref ref-type="supplementary-material" rid="app8"/>-<xref ref-type="supplementary-material" rid="app9">9</xref>. ALB_CR: albumin creatinine ratio; BP_HIGH: blood pressure high; CP: C-reactive protein; LDL_L: lactate dehydrogenase L; SCR: serum creatinine; SHAP: Shapley Additive Explanations; SUA: serum uric acid; TP: total protein.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="medinform_v13i1e64979_fig06.png"/></fig><fig position="float" id="figure7"><label>Figure 7.</label><caption><p>Comparative analysis of Shapley Additive Explanations (SHAP) values across three patient samples for diabetic nephropathy prediction. Each waterfall plot illustrates individual feature contributions to the final prediction score, where red/blue segments indicate positive/negative impacts on diabetic nephropathy risk. The base value represents the average model output without feature effects, while the final f(x) reflects personalized risk assessment. Notably, SUA and ALB exhibit consistent positive associations in high-risk samples (first/second), whereas MEN and HTN dominate negative contributions in low-risk cases (third). ALB: albumin; BP_HIGH: blood pressure high; BP_LOW: blood pressure low; BU: albumin creatinine ratio; CP: C-peptide; CRP: C-reactive protein; LP_A: lysophosphatidic acid; LPS: lipoproteins; DC: digestive carcinoma; HTN: hypertension; LPS: lipoproteins; MEN: endocrine gland tumors; OT: other tumor; PL: phospholipid; SCR: serum creatinine; SUA: serum uric acid.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="medinform_v13i1e64979_fig07.png"/></fig></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><p>In this study, we focused on developing a predictive model for DN risks and conducted an extensive feature analysis, aiming to identify patients at risk of progressing to severe kidney disease in a clinical setting. To achieve this goal, we thoroughly analyzed a range of biomarkers and clinical indicators and evaluated several popular ML algorithms. Among these various algorithms tested, we found that the gradient-boosting&#x2013;based algorithms, XGBoost and LightGBM, performed exceptionally well on our dataset. The models not only demonstrated significant advantages in training speed and handling large datasets but also exhibited superior predictive performance across key metrics such as accuracy, precision, recall, and AUC, showcasing their outstanding predictive capabilities. Our study also found that serum creatinine, the albumin-creatinine ratio (ACR), abnormal levels of lipoproteins, C-peptide, and hypertension were the most relevant factors associated with DN, aligning with findings from clinical research on the key risk factors for DN progression.</p></sec><sec id="s4-2"><title>Comparison With Prior Work</title><p>Serum creatinine is a key biomarker for evaluating DN and core indicator of kidney function, and its elevated levels are often closely associated with kidney damage. In patients with diabetes, even a slight increase in serum creatinine levels is sufficient to indicate early kidney function impairment [<xref ref-type="bibr" rid="ref31">31</xref>]. As serum creatinine levels rise, the risk of kidney damage also increases; thus, it is recommended to closely monitor serum creatinine levels in diabetes management [<xref ref-type="bibr" rid="ref32">32</xref>]. Serum creatinine levels have independent predictive value in the progression of DN. Regular monitoring of serum creatinine levels helps the early identification of potential kidney disease risks and the taking of appropriate preventive measures [<xref ref-type="bibr" rid="ref33">33</xref>]. By optimizing medical management for patients with diabetes and regularly assessing serum creatinine levels, clinicians can detect and address kidney function damage earlier, thereby reducing the progression risk of DN.</p><p>The ACR is an important method for detecting microalbuminuria, used to evaluate early changes in kidney disease in patients with diabetes. An increase in ACR is closely related to the progression of DN, and even a slight elevation in ACR should be considered as a warning signal for the development of DN [<xref ref-type="bibr" rid="ref34">34</xref>]. Elevated ACR is an independent predictor for the progression of DN, and regular monitoring of ACR levels helps the early identification of potential kidney disease risks, aiding clinicians in assessing disease progression and treatment response [<xref ref-type="bibr" rid="ref35">35</xref>]. Moreover, a reduction in ACR is associated with a slowdown in the progression of DN and a decrease in cardiovascular event rates. By optimizing medical management for patients with diabetes and regularly monitoring ACR levels, clinicians can detect and address kidney function damage earlier, thus reducing the risk of progression of DN [<xref ref-type="bibr" rid="ref36">36</xref>].</p><p>Abnormal levels of lipoproteins, particularly elevated low-density lipoprotein (LDL) and very low&#x2013;density lipoprotein (VLDL), are significant risk factors in the development of diabetic kidney disease. These lipid particles damage vascular endothelium, thereby accelerating the progression of diabetic kidney disease. Research indicates that changes in the size and number of LDL particles in patients with diabetes are significantly correlated with renal function impairment [<xref ref-type="bibr" rid="ref37">37</xref>]. Elevated LDL levels not only lead to atherosclerosis but also directly harm renal function by promoting inflammation and oxidative stress. This damage mechanism is primarily manifested by the thickening of the glomerular basement membrane and glomerulosclerosis, which may ultimately result in renal failure [<xref ref-type="bibr" rid="ref38">38</xref>]. Similarly, elevated VLDL levels are closely associated with the progression of kidney disease. VLDL carries a large amount of triglycerides and can be absorbed by renal tubular cells, causing cytotoxic reactions that further exacerbate tubular damage and accelerate the progression of kidney disease [<xref ref-type="bibr" rid="ref39">39</xref>]. Even after other comorbid factors are controlled, high LDL levels remain significantly associated with the progression of kidney disease. Multiple studies have also shown that high LDL levels are closely related to increased urinary protein excretion and decreased renal function [<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref41">41</xref>]. Therefore, managing lipoprotein levels is crucial to slowing the progression of diabetic kidney disease. Dietary adjustments, medication treatments such as statins, and lifestyle changes can effectively lower LDL and VLDL levels, thus protecting renal function.</p><p>C-peptide is an important marker for evaluating &#x03B2;-cell function in the pancreas, with its levels closely associated with metabolic control in diabetes. Low C-peptide levels are linked to an increased risk of DN, possibly due to metabolic disorders caused by insufficient insulin secretion. In contrast, high C-peptide levels are associated with a reduced risk of renal function deterioration in patients with type 2 diabetes. Research indicates that patients with higher C-peptide levels would experience slower declines in renal function and less increase in albuminuria [<xref ref-type="bibr" rid="ref42">42</xref>]. Moreover, C-peptide shows potential in the treatment of DN. C-peptide replacement therapy can reduce glomerular hyperfiltration, decrease albumin excretion, and prevent glomerular and renal hypertrophy [<xref ref-type="bibr" rid="ref43">43</xref>]. Supporting this view, another study demonstrates that C-peptide at physiological concentrations can improve glomerular filtration rate and increase renal plasma flow, and effect further validation in clinical trials [<xref ref-type="bibr" rid="ref44">44</xref>]. C-peptide levels play a crucial role in the progression and management of DN. Monitoring and potentially modulating these levels through therapeutic interventions can provide significant benefits for the prevention and treatment of DN.</p><p>Hypertension is a major risk factor for the development and progression of DN. Persistent hypertension can lead to structural and functional changes in the renal vasculature, accelerating the decline in renal function. Patients with hypertension experience faster progression of kidney damage and thus require earlier intervention to prevent further deterioration of renal function [<xref ref-type="bibr" rid="ref45">45</xref>]. In the context of hypertension, a sustained high pressure load on the renal blood vessels results in the thickening and hardening of the vessel walls. These structural changes weaken the kidney&#x2019;s filtration function and accelerate glomerulosclerosis and renal fibrosis [<xref ref-type="bibr" rid="ref34">34</xref>]. Effective hypertension management typically includes the use of angiotensin-converting enzyme inhibitors and angiotensin receptor blockers, which not only lower blood pressure but also provide renal protection [<xref ref-type="bibr" rid="ref46">46</xref>]. On the other hand, although hypotension is less common in patients with diabetes, it can have negative effects on renal function in certain situations. In severe circulatory failure or shock, hypotension can lead to inadequate renal perfusion, reducing blood flow to the kidneys and exacerbating renal damage. In clinical practice, the management strategies for hypertension and hypotension need to be individualized. For patients with hypertension, the goal is to reduce blood pressure to recommended levels while avoiding excessive hypotension. For patients at risk of hypotension, particularly older adults, close monitoring of blood pressure fluctuations is essential to prevent inadequate renal perfusion. Overall, blood pressure management plays a crucial role in the prevention and treatment of DN. Appropriate pharmacological treatment and lifestyle interventions can control blood pressure effectively, protect renal function, and delay the progression of DN.</p></sec><sec id="s4-3"><title>Limitations</title><p>This study has made notable progress in predicting DN; however, certain limitations remain. First, the data used in the study were sourced from a single medical institution. Although the model performed well on this dataset, validation with data from other institutions is needed to assess its effectiveness and generalizability. Additionally, the model has not yet undergone comprehensive clinical validation, and some features selected by the algorithm may lack diagnostic significance, while commonly used clinical indicators might not have been included.</p></sec><sec id="s4-4"><title>Future Directions</title><p>Future research should focus on the following improvements: collecting data from multiple institutions to validate the model&#x2019;s robustness, strengthening collaboration with clinical experts to optimize feature selection based on clinical knowledge, and investigating potential nonlinear relationships among features. Developing a user-friendly online platform and a simplified scoring system would make the model more intuitive and accessible for clinical use. Furthermore, incorporating multimodal data, such as imaging and genomic information, could enhance the model&#x2019;s predictive performance and clinical value, providing robust support for the early diagnosis and personalized treatment of DN.</p></sec><sec id="s4-5"><title>Conclusion</title><p>In this study, we developed a robust predictive model for DN using various ML techniques. Among the models tested, XGBoost and LightGBM demonstrated superior performance, achieving notable metrics across other performance indicators. The integration of XML techniques, such as LIME and SHAP, provided valuable insights into the contribution of individual features, enhancing the model&#x2019;s transparency and interpretability, which is crucial for clinical application. Our analysis identified several significant risk factors for DN, including serum creatinine, C-peptide, albumin, and lipoproteins. These findings are well-supported by extensive literature, reinforcing the reliability and relevance of our predictive model. The ability to accurately predict DN and understand the underlying risk factors allows for early intervention and personalized treatment strategies, ultimately improving patients&#x2019; outcomes and optimizing health care resource usage.</p></sec></sec></body><back><ack><p>The authors would like to thank the Population Health Data Archive for providing the Diabetes Complications Dataset, which was instrumental in this research. This work is supported by the Innovation Engineering project "Construction of a Digital Resource System for Biomedical Literature" (grant 24C0001-1).</p></ack><notes><sec><title>Data Availability</title><p>The data used in this study are publicly available from the National Population Health Data Center under the title &#x201C;Diabetes Complications Dataset&#x201D; (DOI: 10.12213/11.A0005.201905.000282). The dataset can be accessed at [<xref ref-type="bibr" rid="ref46">46</xref>]. This study was conducted with formal approval from the data provider following submission of a data access request through the National Population Health Data Center platform. All data have been deidentified and privacy-protected by the data provider, with assurance that no personally identifiable information or other sensitive data are included in the deposited dataset. Researchers interested in accessing this dataset should submit a formal request through the National Population Health Data Center platform according to their data-sharing policies. Further details regarding the dataset and its structure are provided in <xref ref-type="supplementary-material" rid="app10">Multimedia Appendix 10</xref>.</p></sec></notes><fn-group><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">ACR</term><def><p>albumin-creatinine ratio</p></def></def-item><def-item><term id="abb2">AdaBoost</term><def><p>Adaptive Boosting</p></def></def-item><def-item><term id="abb3">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb4">AUC</term><def><p>area under the curve</p></def></def-item><def-item><term id="abb5">DN</term><def><p>diabetic nephropathy</p></def></def-item><def-item><term id="abb6">lasso</term><def><p>least absolute shrinkage and selection operator</p></def></def-item><def-item><term id="abb7">LDL</term><def><p>low-density lipoprotein</p></def></def-item><def-item><term id="abb8">LightGBM</term><def><p>Light Gradient-Boosting Machine</p></def></def-item><def-item><term id="abb9">LIME</term><def><p>Local Interpretable Model-Agnostic Explanations</p></def></def-item><def-item><term id="abb10">ML</term><def><p>machine learning</p></def></def-item><def-item><term id="abb11">PDP</term><def><p>partial dependence plot</p></def></def-item><def-item><term id="abb12">ROC</term><def><p>receiver operating characteristic</p></def></def-item><def-item><term id="abb13">SHAP</term><def><p>Shapley Additive Explanations</p></def></def-item><def-item><term id="abb14">SMOTE</term><def><p>Synthetic Minority Oversampling Technique</p></def></def-item><def-item><term id="abb15">VLDL</term><def><p>very low&#x2013;density lipoprotein</p></def></def-item><def-item><term id="abb16">XGBoost</term><def><p>Extreme Gradient Boosting</p></def></def-item><def-item><term id="abb17">XML</term><def><p>explainable machine learning</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Agarwal</surname><given-names>R</given-names> </name></person-group><article-title>Pathogenesis of diabetic nephropathy</article-title><source>Compendia</source><year>2021</year><month>06</month><volume>2021</volume><issue>1</issue><fpage>2</fpage><lpage>7</lpage><pub-id pub-id-type="doi">10.2337/db20211-2</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Umanath</surname><given-names>K</given-names> </name><name name-style="western"><surname>Lewis</surname><given-names>JB</given-names> </name></person-group><article-title>Update on diabetic nephropathy: core curriculum 2018</article-title><source>Am J Kidney Dis</source><year>2018</year><month>06</month><volume>71</volume><issue>6</issue><fpage>884</fpage><lpage>895</lpage><pub-id pub-id-type="doi">10.1053/j.ajkd.2017.10.026</pub-id><pub-id pub-id-type="medline">29398179</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhang</surname><given-names>L</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>X</given-names> </name><etal/></person-group><article-title>Alterations of the gut microbiota in patients with diabetic nephropathy</article-title><source>Microbiol Spectr</source><year>2022</year><month>08</month><day>31</day><volume>10</volume><issue>4</issue><fpage>e0032422</fpage><pub-id pub-id-type="doi">10.1128/spectrum.00324-22</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Pereira</surname><given-names>PR</given-names> </name><name name-style="western"><surname>Carrageta</surname><given-names>DF</given-names> </name><name name-style="western"><surname>Oliveira</surname><given-names>PF</given-names> </name><name name-style="western"><surname>Rodrigues</surname><given-names>A</given-names> </name><name name-style="western"><surname>Alves</surname><given-names>MG</given-names> </name><name name-style="western"><surname>Monteiro</surname><given-names>MP</given-names> </name></person-group><article-title>Metabolomics as a tool for the early diagnosis and prognosis of diabetic kidney disease</article-title><source>Med Res Rev</source><year>2022</year><month>07</month><volume>42</volume><issue>4</issue><fpage>1518</fpage><lpage>1544</lpage><pub-id pub-id-type="doi">10.1002/med.21883</pub-id><pub-id pub-id-type="medline">35274315</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Naaman</surname><given-names>SC</given-names> </name><name name-style="western"><surname>Bakris</surname><given-names>GL</given-names> </name></person-group><article-title>Diabetic nephropathy: update on pillars of therapy slowing progression</article-title><source>Diabetes Care</source><year>2023</year><month>09</month><day>1</day><volume>46</volume><issue>9</issue><fpage>1574</fpage><lpage>1586</lpage><pub-id pub-id-type="doi">10.2337/dci23-0030</pub-id><pub-id pub-id-type="medline">37625003</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Richens</surname><given-names>JG</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>CM</given-names> </name><name name-style="western"><surname>Johri</surname><given-names>S</given-names> </name></person-group><article-title>Improving the accuracy of medical diagnosis with causal machine learning</article-title><source>Nat Commun</source><year>2020</year><month>08</month><day>11</day><volume>11</volume><issue>1</issue><fpage>3923</fpage><pub-id pub-id-type="doi">10.1038/s41467-020-17419-7</pub-id><pub-id pub-id-type="medline">32782264</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rajula</surname><given-names>HSR</given-names> </name><name name-style="western"><surname>Verlato</surname><given-names>G</given-names> </name><name name-style="western"><surname>Manchia</surname><given-names>M</given-names> </name><name name-style="western"><surname>Antonucci</surname><given-names>N</given-names> </name><name name-style="western"><surname>Fanos</surname><given-names>V</given-names> </name></person-group><article-title>Comparison of conventional statistical methods with machine learning in medicine: diagnosis, drug development, and treatment</article-title><source>Med Bogota Colomb</source><year>2020</year><volume>56</volume><issue>9</issue><fpage>455</fpage><pub-id pub-id-type="doi">10.3390/medicina56090455</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dong</surname><given-names>J</given-names> </name><name name-style="western"><surname>Feng</surname><given-names>T</given-names> </name><name name-style="western"><surname>Thapa-Chhetry</surname><given-names>B</given-names> </name><etal/></person-group><article-title>Machine learning model for early prediction of acute kidney injury (AKI) in pediatric critical care</article-title><source>Crit Care</source><year>2021</year><month>12</month><volume>25</volume><issue>1</issue><fpage>288</fpage><pub-id pub-id-type="doi">10.1186/s13054-021-03724-0</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lee</surname><given-names>YW</given-names> </name><name name-style="western"><surname>Choi</surname><given-names>JW</given-names> </name><name name-style="western"><surname>Shin</surname><given-names>EH</given-names> </name></person-group><article-title>Machine learning model for predicting malaria using clinical information</article-title><source>Comput Biol Med</source><year>2021</year><month>02</month><volume>129</volume><fpage>104151</fpage><pub-id pub-id-type="doi">10.1016/j.compbiomed.2020.104151</pub-id><pub-id pub-id-type="medline">33290932</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rahimi</surname><given-names>M</given-names> </name><name name-style="western"><surname>Akbari</surname><given-names>A</given-names> </name><name name-style="western"><surname>Asadi</surname><given-names>F</given-names> </name><name name-style="western"><surname>Emami</surname><given-names>H</given-names> </name></person-group><article-title>Cervical cancer survival prediction by machine learning algorithms: a systematic review</article-title><source>BMC Cancer</source><year>2023</year><month>04</month><day>13</day><volume>23</volume><issue>1</issue><fpage>341</fpage><pub-id pub-id-type="doi">10.1186/s12885-023-10808-3</pub-id><pub-id pub-id-type="medline">37055741</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Farah</surname><given-names>L</given-names> </name><name name-style="western"><surname>Murris</surname><given-names>JM</given-names> </name><name name-style="western"><surname>Borget</surname><given-names>I</given-names> </name><name name-style="western"><surname>Guilloux</surname><given-names>A</given-names> </name><name name-style="western"><surname>Martelli</surname><given-names>NM</given-names> </name><name name-style="western"><surname>Katsahian</surname><given-names>SIM</given-names> </name></person-group><article-title>Assessment of performance, interpretability, and explainability in artificial intelligence-based health technologies: what healthcare stakeholders need to know</article-title><source>Mayo Clin Proc Digit Health</source><year>2023</year><month>06</month><volume>1</volume><issue>2</issue><fpage>120</fpage><lpage>138</lpage><pub-id pub-id-type="doi">10.1016/j.mcpdig.2023.02.004</pub-id><pub-id pub-id-type="medline">40206724</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Li</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Bouazizi</surname><given-names>M</given-names> </name><name name-style="western"><surname>Ohtsuki</surname><given-names>T</given-names> </name><name name-style="western"><surname>Ishii</surname><given-names>M</given-names> </name><name name-style="western"><surname>Nakahara</surname><given-names>E</given-names> </name></person-group><article-title>Toward building trust in machine learning models: quantifying the explainability by SHAP and references to human strategy</article-title><source>IEEE Access</source><year>2024</year><volume>12</volume><fpage>11010</fpage><lpage>11023</lpage><pub-id pub-id-type="doi">10.1109/ACCESS.2023.3347796</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>El Shawi</surname><given-names>R</given-names> </name><name name-style="western"><surname>Sherif</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Al-Mallah</surname><given-names>M</given-names> </name><name name-style="western"><surname>Sakr</surname><given-names>S</given-names> </name></person-group><article-title>Interpretability in healthcare a comparative study of local machine learning interpretability techniques</article-title><conf-name>2019 IEEE 32nd International Symposium on Computer-Based Medical Systems (CBMS)</conf-name><conf-date>Jun 5-7, 2019</conf-date><conf-loc>Cordoba, Spain</conf-loc><fpage>275</fpage><lpage>280</lpage><pub-id pub-id-type="doi">10.1109/CBMS.2019.00065</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chadaga</surname><given-names>K</given-names> </name><name name-style="western"><surname>Prabhu</surname><given-names>S</given-names> </name><name name-style="western"><surname>Sampathila</surname><given-names>N</given-names> </name><etal/></person-group><article-title>Explainable artificial intelligence approaches for COVID-19 prognosis prediction using clinical markers</article-title><source>Sci Rep</source><year>2024</year><month>01</month><day>20</day><volume>14</volume><issue>1</issue><fpage>1783</fpage><pub-id pub-id-type="doi">10.1038/s41598-024-52428-2</pub-id><pub-id pub-id-type="medline">38245638</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Khanna</surname><given-names>VV</given-names> </name><name name-style="western"><surname>Chadaga</surname><given-names>K</given-names> </name><name name-style="western"><surname>Sampathila</surname><given-names>N</given-names> </name><etal/></person-group><article-title>A decision support system for osteoporosis risk prediction using machine learning and explainable artificial intelligence</article-title><source>Heliyon</source><year>2023</year><month>12</month><volume>9</volume><issue>12</issue><fpage>e22456</fpage><pub-id pub-id-type="doi">10.1016/j.heliyon.2023.e22456</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Guan</surname><given-names>C</given-names> </name><name name-style="western"><surname>Ma</surname><given-names>F</given-names> </name><name name-style="western"><surname>Chang</surname><given-names>S</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>J</given-names> </name></person-group><article-title>Interpretable machine learning models for predicting venous thromboembolism in the intensive care unit: an analysis based on data from 207 centers</article-title><source>Crit Care</source><year>2023</year><volume>27</volume><issue>1</issue><fpage>406</fpage><pub-id pub-id-type="doi">10.1186/s13054-023-04683-4</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhong</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Jatav</surname><given-names>A</given-names> </name><name name-style="western"><surname>Afrin</surname><given-names>K</given-names> </name><name name-style="western"><surname>Shivaram</surname><given-names>T</given-names> </name><name name-style="western"><surname>Bukkapatnam</surname><given-names>STS</given-names> </name></person-group><article-title>Enhanced SpO<sub>2</sub> estimation using explainable machine learning and neck photoplethysmography</article-title><source>Artif Intell Med</source><year>2023</year><month>11</month><volume>145</volume><fpage>102685</fpage><pub-id pub-id-type="doi">10.1016/j.artmed.2023.102685</pub-id><pub-id pub-id-type="medline">37925216</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Suh</surname><given-names>B</given-names> </name><name name-style="western"><surname>Yu</surname><given-names>H</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>H</given-names> </name><etal/></person-group><article-title>Interpretable deep-learning approaches for osteoporosis risk screening and individualized feature analysis using large population-based data: model development and performance evaluation</article-title><source>J Med Internet Res</source><year>2023</year><month>01</month><day>13</day><volume>25</volume><fpage>e40179</fpage><pub-id pub-id-type="doi">10.2196/40179</pub-id><pub-id pub-id-type="medline">36482780</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bernard</surname><given-names>D</given-names> </name><name name-style="western"><surname>Doumard</surname><given-names>E</given-names> </name><name name-style="western"><surname>Ader</surname><given-names>I</given-names> </name><etal/></person-group><article-title>Explainable machine learning framework to predict personalized physiological aging</article-title><source>Aging Cell</source><year>2023</year><month>08</month><volume>22</volume><issue>8</issue><fpage>e13872</fpage><pub-id pub-id-type="doi">10.1111/acel.13872</pub-id><pub-id pub-id-type="medline">37300327</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhang</surname><given-names>J</given-names> </name><name name-style="western"><surname>Cui</surname><given-names>X</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>C</given-names> </name><etal/></person-group><article-title>A deep learning&#x2010;based interpretable decision tool for predicting high risk of chemotherapy&#x2010;induced nausea and vomiting in cancer patients prescribed highly emetogenic chemotherapy</article-title><source>Cancer Med</source><year>2023</year><month>09</month><volume>12</volume><issue>17</issue><fpage>18306</fpage><lpage>18316</lpage><pub-id pub-id-type="doi">10.1002/cam4.6428</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tibshirani</surname><given-names>R</given-names> </name></person-group><article-title>Regression shrinkage and selection via the lasso</article-title><source>J R Stat Soc Series B Stat Methodol</source><year>1996</year><month>01</month><day>1</day><volume>58</volume><issue>1</issue><fpage>267</fpage><lpage>288</lpage><pub-id pub-id-type="doi">10.1111/j.2517-6161.1996.tb02080.x</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Quinlan</surname><given-names>JR</given-names> </name></person-group><article-title>Induction of decision trees</article-title><source>Mach Learn</source><year>1986</year><month>03</month><volume>1</volume><issue>1</issue><fpage>81</fpage><lpage>106</lpage><pub-id pub-id-type="doi">10.1007/BF00116251</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Breiman</surname><given-names>L</given-names> </name></person-group><article-title>Random forests</article-title><source>Mach Learn</source><year>2001</year><month>10</month><volume>45</volume><issue>1</issue><fpage>5</fpage><lpage>32</lpage><pub-id pub-id-type="doi">10.1023/A:1010933404324</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Geurts</surname><given-names>P</given-names> </name><name name-style="western"><surname>Ernst</surname><given-names>D</given-names> </name><name name-style="western"><surname>Wehenkel</surname><given-names>L</given-names> </name></person-group><article-title>Extremely randomized trees</article-title><source>Mach Learn</source><year>2006</year><month>04</month><volume>63</volume><issue>1</issue><fpage>3</fpage><lpage>42</lpage><pub-id pub-id-type="doi">10.1007/s10994-006-6226-1</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Freund</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Schapire</surname><given-names>RE</given-names> </name></person-group><article-title>A decision-theoretic generalization of on-line learning and an application to boosting</article-title><source>J Comput Syst Sci</source><year>1997</year><month>08</month><volume>55</volume><issue>1</issue><fpage>119</fpage><lpage>139</lpage><pub-id pub-id-type="doi">10.1006/jcss.1997.1504</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Chen</surname><given-names>T</given-names> </name><name name-style="western"><surname>Guestrin</surname><given-names>C</given-names> </name></person-group><article-title>XGBoost: a scalable tree boosting system</article-title><conf-name>Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mini</conf-name><conf-date>Aug 13-17, 2016</conf-date><conf-loc>San Francisco, CA</conf-loc><fpage>785</fpage><lpage>794</lpage><pub-id pub-id-type="doi">10.1145/2939672.2939785</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Ke</surname><given-names>G</given-names> </name><name name-style="western"><surname>Meng</surname><given-names>Q</given-names> </name><name name-style="western"><surname>Finley</surname><given-names>T</given-names> </name><etal/></person-group><person-group person-group-type="editor"><name name-style="western"><surname>Guyon</surname><given-names>I</given-names> </name><name name-style="western"><surname>Von Luxburg</surname><given-names>U</given-names> </name><name name-style="western"><surname>Bengio</surname><given-names>S</given-names> </name><name name-style="western"><surname>Wallach</surname><given-names>H</given-names> </name><name name-style="western"><surname>Fergus</surname><given-names>R</given-names> </name><name name-style="western"><surname>Vishwanathan</surname><given-names>S</given-names> </name><name name-style="western"><surname>Garnett</surname><given-names>R</given-names> </name></person-group><article-title>LightGBM: a highly efficient gradient boosting decision tree</article-title><source>Advances in Neural Information Processing Systems 30 (NIPS 2017)</source><year>2017</year><fpage>3149</fpage><lpage>3157</lpage></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Ribeiro</surname><given-names>MT</given-names> </name><name name-style="western"><surname>Singh</surname><given-names>S</given-names> </name><name name-style="western"><surname>Guestrin</surname><given-names>C</given-names> </name></person-group><article-title>&#x201C;Why Should I Trust You?&#x201D;: explaining the predictions of any classifier</article-title><source>arXiv</source><comment>Preprint posted online on  Aug 9, 2016</comment><pub-id pub-id-type="doi">10.48550/arXiv.1602.04938</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Lundberg</surname><given-names>S</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>SI</given-names> </name></person-group><article-title>A unified approach to interpreting model predictions</article-title><comment>Preprint posted online on  Nov 24, 2017</comment><pub-id pub-id-type="doi">10.48550/arXiv.1705.07874</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="web"><article-title>Diabetes Complications Data Set</article-title><source>Population Health Data Archive</source><access-date>2025-10-20</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.ncmi.cn/phda/dataDetails.do?id=CSTR:A0006.11.A0005.201905.000282-V1.0">https://www.ncmi.cn/phda/dataDetails.do?id=CSTR:A0006.11.A0005.201905.000282-V1.0</ext-link></comment></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Narva</surname><given-names>AS</given-names> </name><name name-style="western"><surname>Bilous</surname><given-names>RW</given-names> </name></person-group><article-title>Laboratory assessment of diabetic kidney disease</article-title><source>Diabetes Spectr</source><year>2015</year><month>08</month><volume>28</volume><issue>3</issue><fpage>162</fpage><lpage>166</lpage><pub-id pub-id-type="doi">10.2337/diaspect.28.3.162</pub-id><pub-id pub-id-type="medline">26300608</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Neumiller</surname><given-names>JJ</given-names> </name><name name-style="western"><surname>Alicic</surname><given-names>RZ</given-names> </name><name name-style="western"><surname>Tuttle</surname><given-names>KR</given-names> </name></person-group><article-title>Optimization of guideline-directed medical therapies in patients with diabetes and chronic kidney disease</article-title><source>Clin Kidney J</source><year>2024</year><month>01</month><volume>17</volume><issue>1</issue><fpage>sfad285</fpage><pub-id pub-id-type="doi">10.1093/ckj/sfad285</pub-id><pub-id pub-id-type="medline">38213492</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><collab>American Diabetes Association Professional Practice Committee</collab></person-group><article-title>11. Chronic kidney disease and risk management: standards of care in diabetes&#x2014;2024</article-title><source>Diabetes Care</source><year>2023</year><volume>47</volume><issue>Supplement_1</issue><fpage>S219</fpage><lpage>S230</lpage><pub-id pub-id-type="doi">10.2337/dc24-S011</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Davis</surname><given-names>KN</given-names> </name><name name-style="western"><surname>Hines</surname><given-names>AE</given-names> </name><name name-style="western"><surname>Schaefer</surname><given-names>MC</given-names> </name><name name-style="western"><surname>Naseman</surname><given-names>KW</given-names> </name></person-group><article-title>Protecting the kidneys: update on therapies to treat diabetic nephropathy</article-title><source>Clin Diabetes</source><year>2022</year><volume>40</volume><issue>3</issue><fpage>305</fpage><lpage>311</lpage><pub-id pub-id-type="doi">10.2337/cd21-0090</pub-id><pub-id pub-id-type="medline">35983418</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Li</surname><given-names>B</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>J</given-names> </name><name name-style="western"><surname>Ye</surname><given-names>W</given-names> </name></person-group><article-title>A meta-analysis of urinary transferrin for early diagnosis of diabetic nephropathy</article-title><source>Lab Med</source><year>2024</year><month>07</month><day>3</day><volume>55</volume><issue>4</issue><fpage>413</fpage><lpage>419</lpage><pub-id pub-id-type="doi">10.1093/labmed/lmad115</pub-id><pub-id pub-id-type="medline">38335130</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sulaiman</surname><given-names>MK</given-names> </name></person-group><article-title>Diabetic nephropathy: recent advances in pathophysiology and challenges in dietary management</article-title><source>Diabetol Metab Syndr</source><year>2019</year><volume>11</volume><issue>1</issue><fpage>7</fpage><pub-id pub-id-type="doi">10.1186/s13098-019-0403-4</pub-id><pub-id pub-id-type="medline">30679960</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Barbagallo</surname><given-names>CM</given-names> </name><name name-style="western"><surname>Cefal&#x00F9;</surname><given-names>AB</given-names> </name><name name-style="western"><surname>Giammanco</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Lipoprotein abnormalities in chronic kidney disease and renal transplantation</article-title><source>Life (Basel)</source><year>2021</year><month>04</month><day>5</day><volume>11</volume><issue>4</issue><fpage>315</fpage><pub-id pub-id-type="doi">10.3390/life11040315</pub-id><pub-id pub-id-type="medline">33916487</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gupta</surname><given-names>A</given-names> </name><name name-style="western"><surname>Gupta</surname><given-names>R</given-names> </name></person-group><article-title>Current understanding of diabetic dyslipidemia: a review</article-title><source>J Indian Inst Sci</source><year>2023</year><month>01</month><volume>103</volume><issue>1</issue><fpage>287</fpage><lpage>307</lpage><pub-id pub-id-type="doi">10.1007/s41745-022-00346-5</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chen</surname><given-names>J</given-names> </name><name name-style="western"><surname>Fang</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Luo</surname><given-names>Q</given-names> </name><etal/></person-group><article-title>Unlocking the mysteries of VLDL: exploring its production, intracellular trafficking, and metabolism as therapeutic targets</article-title><source>Lipids Health Dis</source><year>2024</year><month>01</month><day>12</day><volume>23</volume><issue>1</issue><fpage>14</fpage><pub-id pub-id-type="doi">10.1186/s12944-023-01993-y</pub-id><pub-id pub-id-type="medline">38216994</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Weldegiorgis</surname><given-names>M</given-names> </name><name name-style="western"><surname>Woodward</surname><given-names>M</given-names> </name></person-group><article-title>Elevated triglycerides and reduced high-density lipoprotein cholesterol are independently associated with the onset of advanced chronic kidney disease: a cohort study of 911,360 individuals from the United Kingdom</article-title><source>BMC Nephrol</source><year>2022</year><month>09</month><day>15</day><volume>23</volume><issue>1</issue><fpage>312</fpage><pub-id pub-id-type="doi">10.1186/s12882-022-02932-2</pub-id><pub-id pub-id-type="medline">36109725</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bauer</surname><given-names>F</given-names> </name><name name-style="western"><surname>Seibert</surname><given-names>FS</given-names> </name><name name-style="western"><surname>Rohn</surname><given-names>B</given-names> </name><name name-style="western"><surname>Babel</surname><given-names>N</given-names> </name><name name-style="western"><surname>Westhoff</surname><given-names>TH</given-names> </name></person-group><article-title>Estimation of LDL cholesterol in chronic kidney disease</article-title><source>Eur J Prev Cardiol</source><year>2021</year><month>10</month><day>13</day><volume>28</volume><issue>12</issue><fpage>1402</fpage><lpage>1408</lpage><pub-id pub-id-type="doi">10.1093/eurjpc/zwaa003</pub-id><pub-id pub-id-type="medline">33624033</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yang</surname><given-names>Q</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Peng</surname><given-names>J</given-names> </name><etal/></person-group><article-title>High levels of serum C-peptide are associated with a decreased risk for incident renal progression in patients with type 2 diabetes: a retrospective cohort study</article-title><source>BMJ Open Diabetes Res Care</source><year>2023</year><month>03</month><volume>11</volume><issue>2</issue><fpage>e003201</fpage><pub-id pub-id-type="doi">10.1136/bmjdrc-2022-003201</pub-id><pub-id pub-id-type="medline">36958752</pub-id></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wahren</surname><given-names>J</given-names> </name><name name-style="western"><surname>Ekberg</surname><given-names>K</given-names> </name><name name-style="western"><surname>Samneg&#x00E5;rd</surname><given-names>B</given-names> </name><name name-style="western"><surname>Johansson</surname><given-names>BL</given-names> </name></person-group><article-title>C-peptide: a new potential in the treatment of diabetic nephropathy</article-title><source>Curr Diab Rep</source><year>2001</year><month>12</month><volume>1</volume><issue>3</issue><fpage>261</fpage><lpage>266</lpage><pub-id pub-id-type="doi">10.1007/s11892-001-0044-4</pub-id><pub-id pub-id-type="medline">12643208</pub-id></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hills</surname><given-names>CE</given-names> </name><name name-style="western"><surname>Brunskill</surname><given-names>NJ</given-names> </name><name name-style="western"><surname>Squires</surname><given-names>PE</given-names> </name></person-group><article-title>C-peptide as a therapeutic tool in diabetic nephropathy</article-title><source>Am J Nephrol</source><year>2010</year><volume>31</volume><issue>5</issue><fpage>389</fpage><lpage>397</lpage><pub-id pub-id-type="doi">10.1159/000289864</pub-id><pub-id pub-id-type="medline">20357430</pub-id></nlm-citation></ref><ref id="ref45"><label>45</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Colbert</surname><given-names>GB</given-names> </name><name name-style="western"><surname>Elrggal</surname><given-names>ME</given-names> </name><name name-style="western"><surname>Gaddy</surname><given-names>A</given-names> </name><name name-style="western"><surname>Madariaga</surname><given-names>HM</given-names> </name><name name-style="western"><surname>Lerma</surname><given-names>EV</given-names> </name></person-group><article-title>Management of hypertension in diabetic kidney disease</article-title><source>J Clin Med</source><year>2023</year><month>10</month><day>31</day><volume>12</volume><issue>21</issue><fpage>6868</fpage><pub-id pub-id-type="doi">10.3390/jcm12216868</pub-id><pub-id pub-id-type="medline">37959333</pub-id></nlm-citation></ref><ref id="ref46"><label>46</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Steigerwalt</surname><given-names>S</given-names> </name></person-group><article-title>Management of hypertension in diabetic patients with chronic kidney disease</article-title><source>Diabetes Spectr</source><year>2008</year><month>01</month><day>1</day><volume>21</volume><issue>1</issue><fpage>30</fpage><lpage>36</lpage><pub-id pub-id-type="doi">10.2337/diaspect.21.1.30</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Shapley Additive Explanations (SHAP) scatter plot for CA199.</p><media xlink:href="medinform_v13i1e64979_app1.png" xlink:title="PNG File, 104 KB"/></supplementary-material><supplementary-material id="app2"><label>Multimedia Appendix 2</label><p>Shapley Additive Explanations (SHAP) scatter plot for lactate dehydrogenase L.</p><media xlink:href="medinform_v13i1e64979_app2.png" xlink:title="PNG File, 131 KB"/></supplementary-material><supplementary-material id="app3"><label>Multimedia Appendix 3</label><p>Shapley Additive Explanations (SHAP) scatter plot for total protein.</p><media xlink:href="medinform_v13i1e64979_app3.png" xlink:title="PNG File, 187 KB"/></supplementary-material><supplementary-material id="app4"><label>Multimedia Appendix 4</label><p>Shapley Additive Explanations (SHAP) scatter plot for weight.</p><media xlink:href="medinform_v13i1e64979_app4.png" xlink:title="PNG File, 164 KB"/></supplementary-material><supplementary-material id="app5"><label>Multimedia Appendix 5</label><p>Shapley Additive Explanations (SHAP) scatter plot for endocrine disease.</p><media xlink:href="medinform_v13i1e64979_app5.png" xlink:title="PNG File, 80 KB"/></supplementary-material><supplementary-material id="app6"><label>Multimedia Appendix 6</label><p>Shapley Additive Explanations (SHAP) scatter plot for C-reactive protein.</p><media xlink:href="medinform_v13i1e64979_app6.png" xlink:title="PNG File, 106 KB"/></supplementary-material><supplementary-material id="app7"><label>Multimedia Appendix 7</label><p>Shapley Additive Explanations (SHAP) scatter plot for lipoproteins.</p><media xlink:href="medinform_v13i1e64979_app7.png" xlink:title="PNG File, 107 KB"/></supplementary-material><supplementary-material id="app8"><label>Multimedia Appendix 8</label><p>Shapley Additive Explanations (SHAP) scatter plot for serum creatinine.</p><media xlink:href="medinform_v13i1e64979_app8.png" xlink:title="PNG File, 128 KB"/></supplementary-material><supplementary-material id="app9"><label>Multimedia Appendix 9</label><p>Shapley Additive Explanations (SHAP) scatter plot for albumin creatinine ratio.</p><media xlink:href="medinform_v13i1e64979_app9.png" xlink:title="PNG File, 158 KB"/></supplementary-material><supplementary-material id="app10"><label>Multimedia Appendix 10</label><p>Diabetes Complications Dataset.</p><media xlink:href="medinform_v13i1e64979_app10.xlsx" xlink:title="XLSX File, 439 KB"/></supplementary-material></app-group></back></article>