Powered By Blogger

Thursday, March 6, 2025

GNN ML Model code with optuna tuning for 6 SMILES and plot code

 CODE:


import deepchem as dc
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from deepchem.feat import ConvMolFeaturizer
from deepchem.data import NumpyDataset
from deepchem.models import GraphConvModel
import tensorflow as tf
import logging
from typing import List, Optional, Union, Tuple
import optuna
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error, f1_score, accuracy_score, precision_score, recall_score
from sklearn.model_selection import train_test_split

# Set up logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# --- Monkey-Patching for Keras Compatibility ---
from deepchem.models.optimizers import Adam

def _create_tf_optimizer(self, global_step):
    try:
        if self.learning_rate is None:
            learning_rate = tf.keras.optimizers.schedules.ExponentialDecay(
                initial_learning_rate=self.initial_learning_rate,
                decay_steps=self.decay_steps,
                decay_rate=self.decay_rate,
                staircase=self.staircase
            )
        else:
            learning_rate = self.learning_rate
        return tf.keras.optimizers.Adam(
            learning_rate=learning_rate,
            beta_1=self.beta1,
            beta_2=self.beta2,
            epsilon=self.epsilon
        )
    except Exception as e:
        logger.error(f"Error creating optimizer: {e}")
        raise

Adam._create_tf_optimizer = _create_tf_optimizer

from deepchem.models.keras_model import KerasModel

def _create_inputs(self, example_inputs):
    try:
        self._ensure_built()
        keras_model = getattr(self.model, 'model', self.model)
        if hasattr(keras_model, 'inputs') and keras_model.inputs is not None:
            self._input_shapes = [t.shape for t in keras_model.inputs]
            self._input_dtypes = [t.dtype.name for t in keras_model.inputs]
        else:
            if isinstance(example_inputs, (list, tuple)):
                self._input_shapes = [np.shape(x) for x in example_inputs]
                self._input_dtypes = [x.dtype.name for x in example_inputs]
            else:
                self._input_shapes = [np.shape(example_inputs)]
                self._input_dtypes = [example_inputs.dtype.name]
        self._inputs_built = True
    except Exception as e:
        logger.error(f"Error in _create_inputs: {e}")
        raise

KerasModel._create_inputs = _create_inputs

# --- Enhanced Helper Function ---
def smiles_to_dataset(smiles_list: List[str], labels: Optional[Union[List, np.ndarray]] = None,
                     featurizer=ConvMolFeaturizer()) -> Tuple[NumpyDataset, Optional[np.ndarray]]:
    try:
        if not smiles_list or not all(isinstance(s, str) for s in smiles_list):
            raise ValueError("SMILES list must contain valid strings.")
        if labels is not None:
            if len(smiles_list) != len(labels):
                raise ValueError("SMILES and labels lists must have the same length.")
            labels = np.array(labels)

        mols = featurizer.featurize(smiles_list)
        valid_mols = []
        valid_labels = []

        for i, mol in enumerate(mols):
            if mol is not None and hasattr(mol, 'atom_features'):
                valid_mols.append(mol)
                if labels is not None:
                    valid_labels.append(labels[i])
            else:
                logger.warning(f"SMILES at index {i} failed to featurize: {smiles_list[i]}")

        if not valid_mols:
            raise ValueError("No valid SMILES strings were featurized.")

        if labels is not None:
            dataset = NumpyDataset(X=np.array(valid_mols, dtype=object), y=np.array(valid_labels))
            logger.info(f"Created dataset with {len(valid_mols)} valid molecules out of {len(smiles_list)}.")
            return dataset, np.array(valid_labels)
        else:
            dataset = NumpyDataset(X=np.array(valid_mols, dtype=object))
            logger.info(f"Created dataset with {len(valid_mols)} valid molecules out of {len(smiles_list)}.")
            return dataset, None
    except Exception as e:
        logger.error(f"Error in smiles_to_dataset: {e}")
        raise

# --- New SMILES List with 6 Molecules ---
smiles_list_6 = [
    "COc1cc(/C=C/C(=O)CC(=O)/C=C/c2ccc(c(c2)OC)O)ccc1O",
    "COC1=CC(\C=C\C(=O)CC(=O)\C=C\C2=CC=C(O)C(OC)=C2)=CC=C1O",
    "COC1=CC=C(\C=C\C(=O)CC(=O)\C=C\C2=CC=C(OC)C(OC)=C2)C=C1OC",
    "COC1=CC(CNC(=O)CCCC\C=C/C(C)C)=CC=C1O",
    "CCCCCCCCC(=O)NCC1=CC=C(O)C(OC)=C1",
    "CN(C)C1=CC2=C(C=C1)N=C3C=CC(=[N+](C)C)C=C3S2.[Cl-]"
]

train_smiles = smiles_list_6  # Use the 6 SMILES for training
train_class_labels = [1, 0, 1, 0, 1, 1]  # Example labels for 6 SMILES
train_reg_labels = [7.2, 6.9, 6.4, 6.3, 6.2, 6.1] # Example Regression labels

valid_smiles = smiles_list_6 # Use the same 6 SMILES for validation (for this example)
valid_class_labels = [1, 0, 1, 0, 1, 1] # Example validation labels
valid_reg_labels = [7.2, 6.9, 6.4, 6.3, 6.2, 6.1] # Example validation regression labels

test_smiles = smiles_list_6 # Use the same 6 SMILES for testing (for this example)
featurizer = ConvMolFeaturizer()

# Create Datasets
try:
    train_dataset_class, train_class_labels_filtered = smiles_to_dataset(train_smiles, train_class_labels, featurizer)
    train_dataset_reg, train_reg_labels_filtered = smiles_to_dataset(train_smiles, train_reg_labels, featurizer)
    valid_dataset_class, valid_class_labels_filtered = smiles_to_dataset(valid_smiles, valid_class_labels, featurizer)
    valid_dataset_reg, valid_reg_labels_filtered = smiles_to_dataset(valid_smiles, valid_reg_labels, featurizer)
    test_dataset, _ = smiles_to_dataset(test_smiles, None, featurizer)
except Exception as e:
    logger.error(f"Failed to create datasets: {e}")
    raise

# --- Classification Model (Unchanged) ---
def train_and_predict_class(train_dataset, valid_dataset, test_dataset):
    try:
        model = GraphConvModel(
            n_tasks=1,
            mode='classification',
            dropout=0.2,
            batch_normalize=False,
            model_dir='graphconv_model_classification_expanded',
            graph_conv_layers=[64, 64],
            dense_layer_size=128,
            batch_size=50
        )
        if hasattr(model.model, 'name'):
            model.model.name = 'graph_conv_classification_model_expanded'

        logger.info("Training classification model...")
        model.fit(train_dataset, nb_epoch=50)

        train_pred = model.predict(train_dataset)
        valid_pred = model.predict(valid_dataset)
        test_pred = model.predict(test_dataset)

        return model, train_pred, valid_pred, test_pred
    except Exception as e:
        logger.error(f"Error in classification training/prediction: {e}")
        raise

# --- Regression Model with Optuna Hyperparameter Tuning ---
def objective(trial):
    """Optuna objective function to maximize R^2 for regression."""
    try:
        # Define hyperparameter search space
        n_layers = trial.suggest_int('n_layers', 1, 3)  # Number of graph conv layers
        graph_conv_sizes = [trial.suggest_categorical('graph_conv_size_' + str(i), [32, 64, 128]) for i in range(n_layers)]
        dense_layer_size = trial.suggest_categorical('dense_layer_size', [64, 128, 256])
        dropout = trial.suggest_float('dropout', 0.0, 0.5)
        batch_size = trial.suggest_categorical('batch_size', [32, 50, 64])
        learning_rate = trial.suggest_float('learning_rate', 1e-5, 1e-3, log=True) # Added learning rate tuning

        # Create and train model
        model = GraphConvModel(
            n_tasks=1,
            mode='regression',
            dropout=dropout,
            batch_normalize=False,
            model_dir=f'graphconv_model_regression_trial_{trial.number}',
            graph_conv_layers=graph_conv_sizes,
            dense_layer_size=dense_layer_size,
            batch_size=batch_size,
            learning_rate=learning_rate # Set learning rate from trial
        )
        if hasattr(model.model, 'name'):
            model.model.name = f'graph_conv_regression_model_trial_{trial.number}'

        logger.info(f"Training regression model with trial {trial.number}...")
        model.fit(train_dataset_reg, nb_epoch=100, deterministic=False) # Increased epochs
       
        # Evaluate on validation set
        valid_pred = model.predict(valid_dataset_reg)
        r2 = r2_score(valid_reg_labels_filtered, valid_pred.flatten())

        return r2  # Maximize R^2
    except Exception as e:
        logger.error(f"Error in Optuna trial {trial.number}: {e}")
        return float('-inf')  # Return negative infinity for failed trials

def train_and_predict_reg_with_best_params(train_dataset, valid_dataset, test_dataset, best_params):
    """Train final regression model with best hyperparameters."""
    try:
        model = GraphConvModel(
            n_tasks=1,
            mode='regression',
            dropout=best_params['dropout'],
            batch_normalize=False,
            model_dir='graphconv_model_regression_expanded',
            graph_conv_layers=[best_params[f'graph_conv_size_{i}'] for i in range(best_params['n_layers'])],
            dense_layer_size=best_params['dense_layer_size'],
            batch_size=best_params['batch_size'],
            learning_rate=best_params['learning_rate'] # Use best learning rate
        )
        if hasattr(model.model, 'name'):
            model.model.name = 'graph_conv_regression_model_expanded'

        logger.info("Training final regression model with best parameters...")
        model.fit(train_dataset_reg, nb_epoch=100) # Increased epochs

        train_pred = model.predict(train_dataset)
        valid_pred = model.predict(valid_dataset)
        test_pred = model.predict(test_dataset)

        return model, train_pred, valid_pred, test_pred
    except Exception as e:
        logger.error(f"Error in regression training/prediction with best params: {e}")
        raise

# --- Evaluation Functions ---
def evaluate_classification(true_labels, pred_probs):
    try:
        pred_labels = np.argmax(pred_probs, axis=2).flatten()
        accuracy = accuracy_score(true_labels, pred_labels)
        precision = precision_score(true_labels, pred_labels, zero_division=0)
        recall = recall_score(true_labels, pred_labels, zero_division=0)
        f1 = f1_score(true_labels, pred_labels, zero_division=0)
        return accuracy, precision, recall, f1
    except Exception as e:
        logger.error(f"Error in classification evaluation: {e}")
        raise

def evaluate_regression(true_labels, pred_values):
    try:
        mae = mean_absolute_error(true_labels, pred_values.flatten())
        mse = mean_squared_error(true_labels, pred_values.flatten())
        r2 = r2_score(true_labels, pred_values.flatten())
        return mae, mse, r2
    except Exception as e:
        logger.error(f"Error in regression evaluation: {e}")
        raise

# --- Main Execution ---
def main():
    # Classification (unchanged)
    class_model, train_class_pred, valid_class_pred, test_class_pred = train_and_predict_class(
        train_dataset_class, valid_dataset_class, test_dataset
    )

    # Regression with Optuna tuning
    study = optuna.create_study(direction='maximize')
    logger.info("Starting Optuna hyperparameter optimization for regression...")
    study.optimize(objective, n_trials=50)  # Increased trials to 50

    logger.info(f"Best trial: {study.best_trial.number}")
    logger.info(f"Best R^2: {study.best_value}")
    logger.info(f"Best parameters: {study.best_params}")

    # Train final regression model with best parameters
    reg_model, train_reg_pred, valid_reg_pred, test_reg_pred = train_and_predict_reg_with_best_params(
        train_dataset_reg, valid_dataset_reg, test_dataset, study.best_params
    )

    # Print Predictions
    print("Training Classification Predictions (Probabilities):", train_class_pred)
    print("Validation Classification Predictions (Probabilities):", valid_class_pred)
    print("Test Classification Predictions (Probabilities):", test_class_pred)
    print("Training Regression Predictions:", train_reg_pred)
    print("Validation Regression Predictions:", valid_reg_pred)
    print("Test Regression Predictions:", test_reg_pred)

    # Evaluate Performance
    train_class_acc, train_class_prec, train_class_rec, train_class_f1 = evaluate_classification(train_class_labels_filtered, train_class_pred)
    valid_class_acc, valid_class_prec, valid_class_rec, valid_class_f1 = evaluate_classification(valid_class_labels_filtered, valid_class_pred)
    train_reg_mae, train_reg_mse, train_reg_r2 = evaluate_regression(train_reg_labels_filtered, train_reg_pred)
    valid_reg_mae, valid_reg_mse, valid_reg_r2 = evaluate_regression(valid_reg_labels_filtered, valid_reg_pred)

    print(f"--- Classification Metrics ---")
    print(f"Training Accuracy: {train_class_acc:.4f}, Precision: {train_class_prec:.4f}, Recall: {train_class_rec:.4f}, F1 Score: {train_class_f1:.4f}")
    print(f"Validation Accuracy: {valid_class_acc:.4f}, Precision: {valid_class_prec:.4f}, Recall: {valid_class_rec:.4f}, F1 Score: {valid_class_f1:.4f}")
    print(f"--- Regression Metrics ---")
    print(f"Training MAE: {train_reg_mae:.4f}, MSE: {train_reg_mse:.4f}, R^2: {train_reg_r2:.4f}")
    print(f"Validation MAE: {valid_reg_mae:.4f}, MSE: {valid_reg_mse:.4f}, R^2: {valid_reg_r2:.4f}")

if __name__ == "__main__":
    try:
        main()
    except Exception as e:
        logger.error(f"Main execution failed: {e}")
        raise


OUTPUT:

Training Classification Predictions (Probabilities): [[[0.28529257 0.71470743]] [[0.28529257 0.71470743]] [[0.26084998 0.73915005]] [[0.8086615 0.19133848]] [[0.12937962 0.8706204 ]] [[0.08971384 0.9102861 ]]] Validation Classification Predictions (Probabilities): [[[0.28529257 0.71470743]] [[0.28529257 0.71470743]] [[0.26084998 0.73915005]] [[0.8086615 0.19133848]] [[0.12937962 0.8706204 ]] [[0.08971384 0.9102861 ]]] Test Classification Predictions (Probabilities): [[[0.28529257 0.71470743]] [[0.28529257 0.71470743]] [[0.26084998 0.73915005]] [[0.8086615 0.19133848]] [[0.12937962 0.8706204 ]] [[0.08971384 0.9102861 ]]] Training Regression Predictions: [[6.8237925] [6.823792 ] [6.7207255] [6.3307014] [6.3301277] [6.082355 ]] Validation Regression Predictions: [[6.8237925] [6.823792 ] [6.7207255] [6.3307014] [6.3301277] [6.082355 ]] Test Regression Predictions: [[6.8237925] [6.823792 ] [6.7207255] [6.3307014] [6.3301277] [6.082355 ]] --- Classification Metrics --- Training Accuracy: 0.8333, Precision: 0.8000, Recall: 1.0000, F1 Score: 0.8889 Validation Accuracy: 0.8333, Precision: 0.8000, Recall: 1.0000, F1 Score: 0.8889 --- Regression Metrics --- Training MAE: 0.1586, MSE: 0.0447, R^2: 0.7170 Validation MAE: 0.1586, MSE: 0.0447, R^2: 0.7170

PLOT CODE:

import matplotlib.pyplot as plt # --- Modified Training Functions to Track Metrics per Epoch --- def train_and_predict_class_with_tracking(train_dataset, valid_dataset, test_dataset): try: model = GraphConvModel( n_tasks=1, mode='classification', dropout=0.2, batch_normalize=False, model_dir='graphconv_model_classification_expanded', graph_conv_layers=[64, 64], dense_layer_size=128, batch_size=50 ) if hasattr(model.model, 'name'): model.model.name = 'graph_conv_classification_model_expanded' train_accs = [] valid_accs = [] train_precs = [] valid_precs = [] train_recs = [] valid_recs = [] train_f1s = [] valid_f1s = [] for epoch in range(50): model.fit(train_dataset, nb_epoch=1) train_pred = model.predict(train_dataset) valid_pred = model.predict(valid_dataset) train_labels = train_class_labels_filtered valid_labels = valid_class_labels_filtered train_acc, train_prec, train_rec, train_f1 = evaluate_classification(train_labels, train_pred) valid_acc, valid_prec, valid_rec, valid_f1 = evaluate_classification(valid_labels, valid_pred) train_accs.append(train_acc) valid_accs.append(valid_acc) train_precs.append(train_prec) valid_precs.append(valid_prec) train_recs.append(train_rec) valid_recs.append(valid_rec) train_f1s.append(train_f1) valid_f1s.append(valid_f1) logger.info(f"Epoch {epoch+1}, Training Accuracy: {train_acc:.4f}, Validation Accuracy: {valid_acc:.4f}") test_pred = model.predict(test_dataset) return model, train_pred, valid_pred, test_pred, train_accs, valid_accs, train_precs, valid_precs, train_recs, valid_recs, train_f1s, valid_f1s except Exception as e: logger.error(f"Error in classification training/prediction with tracking: {e}") raise def train_and_predict_reg_with_tracking(train_dataset, valid_dataset, test_dataset, best_params): try: model = GraphConvModel( n_tasks=1, mode='regression', dropout=best_params['dropout'], batch_normalize=False, model_dir='graphconv_model_regression_expanded', graph_conv_layers=[best_params[f'graph_conv_size_{i}'] for i in range(best_params['n_layers'])], dense_layer_size=best_params['dense_layer_size'], batch_size=best_params['batch_size'], learning_rate=best_params['learning_rate'] ) if hasattr(model.model, 'name'): model.model.name = 'graph_conv_regression_model_expanded' train_maes = [] valid_maes = [] train_mses = [] valid_mses = [] train_r2s = [] valid_r2s = [] for epoch in range(100): model.fit(train_dataset_reg, nb_epoch=1) train_pred = model.predict(train_dataset_reg) valid_pred = model.predict(valid_dataset_reg) train_labels = train_reg_labels_filtered valid_labels = valid_reg_labels_filtered train_mae, train_mse, train_r2 = evaluate_regression(train_labels, train_pred) valid_mae, valid_mse, valid_r2 = evaluate_regression(valid_labels, valid_pred) train_maes.append(train_mae) valid_maes.append(valid_mae) train_mses.append(train_mse) valid_mses.append(valid_mse) train_r2s.append(train_r2) valid_r2s.append(valid_r2) logger.info(f"Epoch {epoch+1}, Training MAE: {train_mae:.4f}, Validation MAE: {valid_mae:.4f}") test_pred = model.predict(test_dataset) return model, train_pred, valid_pred, test_pred, train_maes, valid_maes, train_mses, valid_mses, train_r2s, valid_r2s except Exception as e: logger.error(f"Error in regression training/prediction with tracking: {e}") raise # --- Plotting Functions --- def plot_classification_metrics(train_accs, valid_accs, train_precs, valid_precs, train_recs, valid_recs, train_f1s, valid_f1s): epochs = range(len(train_accs)) plt.figure(figsize=(10, 6)) plt.subplot(2, 2, 1) plt.plot(epochs, train_accs, label='Training') plt.plot(epochs, valid_accs, label='Validation') plt.title('Accuracy') plt.xlabel('Epoch') plt.ylabel('Accuracy') plt.legend() plt.subplot(2, 2, 2) plt.plot(epochs, train_precs, label='Training') plt.plot(epochs, valid_precs, label='Validation') plt.title('Precision') plt.xlabel('Epoch') plt.ylabel('Precision') plt.legend() plt.subplot(2, 2, 3) plt.plot(epochs, train_recs, label='Training') plt.plot(epochs, valid_recs, label='Validation') plt.title('Recall') plt.xlabel('Epoch') plt.ylabel('Recall') plt.legend() plt.subplot(2, 2, 4) plt.plot(epochs, train_f1s, label='Training') plt.plot(epochs, valid_f1s, label='Validation') plt.title('F1 Score') plt.xlabel('Epoch') plt.ylabel('F1 Score') plt.legend() plt.tight_layout() plt.show() def plot_regression_metrics(train_maes, valid_maes, train_mses, valid_mses, train_r2s, valid_r2s): epochs = range(len(train_maes)) plt.figure(figsize=(10, 6)) plt.subplot(1, 3, 1) plt.plot(epochs, train_maes, label='Training') plt.plot(epochs, valid_maes, label='Validation') plt.title('MAE') plt.xlabel('Epoch') plt.ylabel('MAE') plt.legend() plt.subplot(1, 3, 2) plt.plot(epochs, train_mses, label='Training') plt.plot(epochs, valid_mses, label='Validation') plt.title('MSE') plt.xlabel('Epoch') plt.ylabel('MSE') plt.legend() plt.subplot(1, 3, 3) plt.plot(epochs, train_r2s, label='Training') plt.plot(epochs, valid_r2s, label='Validation') plt.title('R^2') plt.xlabel('Epoch') plt.ylabel('R^2') plt.legend() plt.tight_layout() plt.show() # --- Main Execution with Plotting --- def main(): # Classification with tracking class_model, train_class_pred, valid_class_pred, test_class_pred, train_accs, valid_accs, train_precs, valid_precs, train_recs, valid_recs, train_f1s, valid_f1s = train_and_predict_class_with_tracking( train_dataset_class, valid_dataset_class, test_dataset ) plot_classification_metrics(train_accs, valid_accs, train_precs, valid_precs, train_recs, valid_recs, train_f1s, valid_f1s) # Regression with Optuna tuning study = optuna.create_study(direction='maximize') logger.info("Starting Optuna hyperparameter optimization for regression...") study.optimize(objective, n_trials=50) logger.info(f"Best trial: {study.best_trial.number}") logger.info(f"Best R^2: {study.best_value}") logger.info(f"Best parameters: {study.best_params}") # Train final regression model with best parameters and tracking reg_model, train_reg_pred, valid_reg_pred, test_reg_pred, train_maes, valid_maes, train_mses, valid_mses, train_r2s, valid_r2s = train_and_predict_reg_with_best_params( train_dataset_reg, valid_dataset_reg, test_dataset, study.best_params ) plot_regression_metrics(train_maes, valid_maes, train_mses, valid_mses, train_r2s, valid_r2s) if __name__ == "__main__": try: main() except Exception as e: logger.error(f"Main execution failed: {e}") raise


[I 2025-03-07 06:47:04,654] A new study created in memory with name: no-name-73cbdaae-39b4-45f2-b753-e042409d3b2d [I 2025-03-07 06:47:19,086] Trial 0 finished with value: -153.81992792458863 and parameters: {'n_layers': 1, 'graph_conv_size_0': 64, 'dense_layer_size': 256, 'dropout': 0.11513263961316111, 'batch_size': 50, 'learning_rate': 1.8455220002784957e-05}. Best is trial 0 with value: -153.81992792458863. [I 2025-03-07 06:47:45,907] Trial 1 finished with value: -182.77093596848542 and parameters: {'n_layers': 2, 'graph_conv_size_0': 128, 'graph_conv_size_1': 32, 'dense_layer_size': 64, 'dropout': 0.08154584358859285, 'batch_size': 50, 'learning_rate': 2.4385469647123442e-05}. Best is trial 0 with value: -153.81992792458863. [I 2025-03-07 06:48:23,072] Trial 2 finished with value: -2.469782597768003 and parameters: {'n_layers': 3, 'graph_conv_size_0': 128, 'graph_conv_size_1': 32, 'graph_conv_size_2': 64, 'dense_layer_size': 256, 'dropout': 0.1926180783149567, 'batch_size': 32, 'learning_rate': 0.00017760931747335488}. Best is trial 2 with value: -2.469782597768003. [I 2025-03-07 06:48:53,067] Trial 3 finished with value: -194.5507483952223 and parameters: {'n_layers': 2, 'graph_conv_size_0': 32, 'graph_conv_size_1': 32, 'dense_layer_size': 64, 'dropout': 0.3924108370327444, 'batch_size': 64, 'learning_rate': 9.81321901415855e-05}. Best is trial 2 with value: -2.469782597768003. [I 2025-03-07 06:49:05,999] Trial 4 finished with value: -8.912298833058061 and parameters: {'n_layers': 1, 'graph_conv_size_0': 64, 'dense_layer_size': 64, 'dropout': 0.43354894424973206, 'batch_size': 50, 'learning_rate': 0.0003411539510545556}. Best is trial 2 with value: -2.469782597768003. [I 2025-03-07 06:49:31,402] Trial 5 finished with value: -233.68661251813694 and parameters: {'n_layers': 2, 'graph_conv_size_0': 128, 'graph_conv_size_1': 128, 'dense_layer_size': 64, 'dropout': 0.4042039121904243, 'batch_size': 32, 'learning_rate': 1.0169653442456986e-05}. Best is trial 2 with value: -2.469782597768003. [I 2025-03-07 06:50:50,623] Trial 6 finished with value: -70.68896339829045 and parameters: {'n_layers': 3, 'graph_conv_size_0': 128, 'graph_conv_size_1': 32, 'graph_conv_size_2': 64, 'dense_layer_size': 128, 'dropout': 0.12638973600493092, 'batch_size': 32, 'learning_rate': 4.026687778956459e-05}. Best is trial 2 with value: -2.469782597768003. [I 2025-03-07 06:51:34,138] Trial 7 finished with value: 0.5274826783102737 and parameters: {'n_layers': 3, 'graph_conv_size_0': 64, 'graph_conv_size_1': 128, 'graph_conv_size_2': 128, 'dense_layer_size': 64, 'dropout': 0.03744853181909019, 'batch_size': 32, 'learning_rate': 0.0006664021079484685}. Best is trial 7 with value: 0.5274826783102737. [I 2025-03-07 06:52:07,778] Trial 8 finished with value: 0.5306667168085206 and parameters: {'n_layers': 2, 'graph_conv_size_0': 32, 'graph_conv_size_1': 64, 'dense_layer_size': 256, 'dropout': 0.2315928108944607, 'batch_size': 50, 'learning_rate': 0.0002344115161862476}. Best is trial 8 with value: 0.5306667168085206. [I 2025-03-07 06:52:21,515] Trial 9 finished with value: -3.4516334688646193 and parameters: {'n_layers': 1, 'graph_conv_size_0': 64, 'dense_layer_size': 128, 'dropout': 0.36012057062341474, 'batch_size': 64, 'learning_rate': 0.0005291324813507126}. Best is trial 8 with value: 0.5306667168085206. [I 2025-03-07 06:52:52,140] Trial 10 finished with value: -10.34967438003083 and parameters: {'n_layers': 2, 'graph_conv_size_0': 32, 'graph_conv_size_1': 64, 'dense_layer_size': 256, 'dropout': 0.2775789183282005, 'batch_size': 50, 'learning_rate': 8.924539843417526e-05}. Best is trial 8 with value: 0.5306667168085206. [I 2025-03-07 06:54:29,434] Trial 11 finished with value: 0.7997784873626563 and parameters: {'n_layers': 3, 'graph_conv_size_0': 32, 'graph_conv_size_1': 128, 'graph_conv_size_2': 128, 'dense_layer_size': 256, 'dropout': 0.011976190232631589, 'batch_size': 32, 'learning_rate': 0.0008513922579837786}. Best is trial 11 with value: 0.7997784873626563. [I 2025-03-07 06:55:05,255] Trial 12 finished with value: -5.4915967449115906 and parameters: {'n_layers': 3, 'graph_conv_size_0': 32, 'graph_conv_size_1': 64, 'graph_conv_size_2': 32, 'dense_layer_size': 256, 'dropout': 0.2645556350228477, 'batch_size': 32, 'learning_rate': 0.0002420573272309627}. Best is trial 11 with value: 0.7997784873626563. [I 2025-03-07 06:55:31,157] Trial 13 finished with value: -0.6783974166354652 and parameters: {'n_layers': 2, 'graph_conv_size_0': 32, 'graph_conv_size_1': 64, 'dense_layer_size': 256, 'dropout': 0.19429802403708668, 'batch_size': 50, 'learning_rate': 0.000970734497980461}. Best is trial 11 with value: 0.7997784873626563.




Friday, February 28, 2025

Predicted Toxicity: Curcumin vs Methylene Blue (MB)

CODE:


#CURCUMIN TOXICITY DATA

toxicity_predictions = {

    'Hepatotoxicity': 0.69,

    'Neurotoxicity': 0.87,

    'Nephrotoxicity': 0.90,

    'Respiratory toxicity': 0.98,

    'Cardiotoxicity': 0.77,

    'Immunotoxicity': 0.96,

    'Ecotoxicity': 0.73

}

toxicity_types = list(toxicity_predictions.keys())

toxicity_probabilities = list(toxicity_predictions.values())


mb_toxicity_predictions = {

    'Hepatotoxicity': 0.875, # Human Hepatotoxicity from ADMET

    'Neurotoxicity': 0.911, # Drug-induced


    'Nephrotoxicity': 0.878, # Drug-induced Nephrotoxicity from ADMET

    'Respiratory toxicity': 0.436, # Respiratory from ADMET

    'Cardiotoxicity': 0.77, # Assumed same as Curcumin

    'Immunotoxicity': 0.098, # R

'Ecotoxicity': 0.0 # Adding Ecotoxicity for Methylene Blue, assuming 0 if no data

}


# Ensure the keys are identical to the Curcumin toxicity data

toxicity_types_mb = list(mb_toxicity_predictions.keys())

toxicity_probabilities_mb = list(mb_toxicity_predictions.values())


# Set the width of the bars

bar_width = 0.35


# Set the positions of the bars on the x-axis

r1 = np.arange(len(toxicity_types))

r2 = [x + bar_width for x in r1]


# Plotting the Toxicity Prediction

plt.figure(figsize=(12, 8))


# Make the plot for the toxicity prediction

plt.bar(r1, toxicity_probabilities, color='blue', width=bar_width, edgecolor='grey',

label='Curcumin')

plt.bar(r2, toxicity_probabilities_mb, color='green', width=bar_width, edgecolor='grey', label='Methylene Blue (MB)')


# General layout

plt.xlabel('Toxicity Type', fontsize=12)

plt.ylabel('Probability', fontsize=12)

plt.title('Predicted Toxicity: Curcumin vs Methylene Blue (MB)', fontsize=14)

plt.xticks([r + bar_width/2 for r in range(len(toxicity_types))], toxicity_types, rotation=45, ha='right', fontsize=10)

plt.ylim(0, 1.1) # Set y-axis limit from 0 to 1

plt.legend()


plt.tight_layout()

plt.savefig('toxicity_comparison_plot.png') # Save the plot as a file

plt.show()






Actual vs Predicted Binding Affinity for Curcumin and Methylene Blue with TP53

CODE:


import pandas as pd

import json

from sklearn.model_selection import train_test_split

from sklearn.ensemble import RandomForestRegressor

from sklearn.metrics import mean_squared_error, r2_score

import matplotlib.pyplot as plt

import optuna

import numpy as np  # Import numpy


# Debugging: Start script execution

print("Starting script execution...")


# 1. Data Loading and Preparation

print("Loading data...")

curcumin_data = {

    'Model': list(range(1, 20)),  # Model number (1 to 19)

    'Calculated_affinity': [-7.204, -6.922, -6.488, -6.332, -6.236, -6.235, -6.234, -6.216, -6.159, -6.151,

                            -6.111, -6.052, -6.004, -5.777, -5.773, -5.663, -5.662, -5.622, -5.526]

}

curcumin_df = pd.DataFrame(curcumin_data)

curcumin_df['Molecule'] = 'Curcumin'


# Methylene Blue Binding Affinity Data (Revised)

binding_affinity_json = """

[

    {"Model": 1, "Calculated affinity (kcal/mol)": -5.074},

    {"Model": 2, "Calculated affinity (kcal/mol)": -5.073},

    {"Model": 3, "Calculated affinity (kcal/mol)": -5.031},

    {"Model": 4, "Calculated affinity (kcal/mol)": -4.927},

    {"Model": 5, "Calculated affinity (kcal/mol)": -4.807},

    {"Model": 6, "Calculated affinity (kcal/mol)": -4.766},

    {"Model": 7, "Calculated affinity (kcal/mol)": -4.753},

    {"Model": 8, "Calculated affinity (kcal/mol)": -4.751},

    {"Model": 9, "Calculated affinity (kcal/mol)": -4.735},

    {"Model": 10, "Calculated affinity (kcal/mol)": -4.666},

    {"Model": 11, "Calculated affinity (kcal/mol)": -4.656},

    {"Model": 12, "Calculated affinity (kcal/mol)": -4.621},

    {"Model": 13, "Calculated affinity (kcal/mol)": -4.472},

    {"Model": 14, "Calculated affinity (kcal/mol)": -4.436},

    {"Model": 15, "Calculated affinity (kcal/mol)": -4.399},

    {"Model": 16, "Calculated affinity (kcal/mol)": -4.390},

    {"Model": 17, "Calculated affinity (kcal/mol)": -4.351},

    {"Model": 18, "Calculated affinity (kcal/mol)": -4.266},

    {"Model": 19, "Calculated affinity (kcal/mol)": -4.245},

    {"Model": 20, "Calculated affinity (kcal/mol)": -4.127}

]

"""

binding_affinity_data = json.loads(binding_affinity_json)

mb_df = pd.DataFrame(binding_affinity_data)

mb_df['Molecule'] = 'Methylene Blue'

mb_df.rename(columns={'Calculated affinity (kcal/mol)': 'Calculated_affinity'}, inplace=True)


combined_df = pd.concat([curcumin_df, mb_df], ignore_index=True)


# Debugging: Check combined dataset

print("Combined dataset:")

print(combined_df.head())


X = combined_df[['Model']]

y = combined_df['Calculated_affinity']


X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)


# Debugging: Data split check

print(f"Training set size: {len(X_train)}, Test set size: {len(X_test)}")


# Define the objective function for Optuna

def objective(trial):

    n_estimators = trial.suggest_int('n_estimators', 50, 200)

    max_depth = trial.suggest_int('max_depth', 3, 15)

    min_samples_split = trial.suggest_int('min_samples_split', 2, 10)


    model = RandomForestRegressor(n_estimators=n_estimators,

                                  max_depth=max_depth,

                                  min_samples_split=min_samples_split,

                                  random_state=42)


    model.fit(X_train, y_train)


    y_pred = model.predict(X_test)


    mse = mean_squared_error(y_test, y_pred)


    return mse


# Run Optuna optimization

study = optuna.create_study(direction='minimize')

study.optimize(objective, n_trials=50)


best_params = study.best_params

print(f"Best hyperparameters: {best_params}")


final_rf_model = RandomForestRegressor(**best_params)

final_rf_model.fit(X_train, y_train)


# Calculate performance metrics on the test set

y_pred_test = final_rf_model.predict(X_test)

mse_test = mean_squared_error(y_test, y_pred_test)

r2_test = r2_score(y_test, y_pred_test)


# Print performance metrics for the test set

print(f"Test Set Mean Squared Error (MSE): {mse_test:.4f}")

print(f"Test Set R² Score: {r2_test:.4f}")


# Make predictions on the entire dataset for plotting

combined_df['Predicted_affinity'] = final_rf_model.predict(combined_df[['Model']])


plt.figure(figsize=(12, 8))

plt.scatter(combined_df[combined_df['Molecule'] == 'Curcumin']['Calculated_affinity'],

            combined_df[combined_df['Molecule'] == 'Curcumin']['Predicted_affinity'],

            color='blue', label='Curcumin')

plt.scatter(combined_df[combined_df['Molecule'] == 'Methylene Blue']['Calculated_affinity'],

            combined_df[combined_df['Molecule'] == 'Methylene Blue']['Predicted_affinity'],

            color='green', label='Methylene Blue')

plt.plot([combined_df['Calculated_affinity'].min(), combined_df['Calculated_affinity'].max()],

           [combined_df['Calculated_affinity'].min(), combined_df['Calculated_affinity'].max()],

           linestyle='--', color='red', label='Ideal Prediction')

plt.xlabel('Actual Calculated Affinity')

plt.ylabel('Predicted Calculated Affinity')

plt.title('Actual vs Predicted Binding Affinities')

plt.legend()

plt.grid(True)

plt.tight_layout()


# Save and display plot

output_file = 'actual_vs_predicted_affinity_combined.png'

plt.savefig(output_file)

print(f"Plot saved as: {output_file}")


# Show the plot if running interactively

plt.show()


OUTPUT:

Best hyperparameters: {'n_estimators': 104, 'max_depth': 3, 'min_samples_split': 9}
Test Set Mean Squared Error (MSE): 0.6499
Test Set R² Score: -0.1061

Introductory video

 


Thursday, February 27, 2025

DRUG EFFICACY, DOSE RESPONSE

CODE:


# Example extension to predict drug efficacy
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GridSearchCV

# Define df_original: Replace this with your actual data
data_original = {
    'Name': ['Drug_A', 'Drug_B', 'Drug_C', 'Drug_D', 'Drug_E'],  # Example drug names
    'Efficacy': [0.8, 0.6, 0.9, 0.7, 0.5],  # Example efficacy values
    'Volume ų': [1964.45, 1780.27, 488.96, 375.4, 285.7],  # Example volume values
    'Surface Ų': [2366.68, 2400.5, 833.79, 736.43, 409.39],  # Example surface area values
    'Binding Affinity': [5.2, 4.8, 6.1, 5.5, 7.2],  # Example binding affinity values
    'ADMET_Absorption': [0.8, 0.7, 0.9, 0.6, 0.5],  # Example absorption values
    'ADMET_Distribution': [0.9, 0.8, 0.7, 0.9, 0.6],  # Example distribution values
    'Toxicity': [0.1, 0.2, 0.1, 0.3, 0.2],  # Example toxicity values
}

df_original = pd.DataFrame(data_original)

# Calculate volume to surface ratio
df_original['Volume_to_Surface_Ratio'] = df_original['Volume ų'] / df_original['Surface Ų']

# Define features (X) and target (y) for efficacy prediction
X_efficacy = df_original[['Volume ų', 'Surface Ų', 'Volume_to_Surface_Ratio', 'Binding Affinity', 'ADMET_Absorption', 'ADMET_Distribution', 'Toxicity']]
y_efficacy = df_original['Efficacy']

# Split data into training and testing sets for efficacy prediction
# Since we have only 5 samples, we cannot split into train and test sets effectively.
# For demonstration purposes, we'll use all data for training.
X_train_efficacy_scaled = StandardScaler().fit_transform(X_efficacy)
y_train_efficacy = y_efficacy

# Perform hyperparameter tuning for efficacy model
# Reduced cv to 3, which is less than the number of samples (5) in X_train_efficacy
param_grid_efficacy = {
    'n_estimators': [10, 50, 100],
    'max_depth': [None, 5, 10],
    'min_samples_split': [2, 5],
    'min_samples_leaf': [1, 5]
}

grid_search_efficacy = GridSearchCV(RandomForestRegressor(), param_grid_efficacy, cv=3, scoring='neg_mean_squared_error')
grid_search_efficacy.fit(X_train_efficacy_scaled, y_train_efficacy)

# Evaluate the best efficacy model
best_model_efficacy = grid_search_efficacy.best_estimator_
print(f"Best Parameters for Efficacy Model: {grid_search_efficacy.best_params_}")
print(f"Best Score for Efficacy Model: {-grid_search_efficacy.best_score_}")

# Example prediction for drug efficacy
example_input_efficacy = pd.DataFrame({
    'Volume ų': [1000],
    'Surface Ų': [1500],
    'Volume_to_Surface_Ratio': [1000/1500],
    'Binding Affinity': [5.5],
    'ADMET_Absorption': [0.8],
    'ADMET_Distribution': [0.9],
    'Toxicity': [0.2]
})

example_input_efficacy_scaled = StandardScaler().fit_transform(example_input_efficacy)
example_prediction_efficacy = best_model_efficacy.predict(example_input_efficacy_scaled)
print(f"Example Prediction for Drug Efficacy: {example_prediction_efficacy[0]}")

OUTPUT:

Best Parameters for Efficacy Model: {'max_depth': 5, 'min_samples_leaf': 1, 'min_samples_split': 2, 'n_estimators': 50} Best Score for Efficacy Model: 0.025899333333333333 Example Prediction for Drug Efficacy: 0.6400000000000002

DOSE RESPONSE CURVE:

CODE:

import numpy as np
import matplotlib.pyplot as plt

# Example data for Methyle Blue and another drug
methyle_blue_concentrations = np.logspace(-6, -3, 10)  # Log concentrations
methyle_blue_responses = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 95])  # Example responses

other_drug_concentrations = np.logspace(-6, -3, 10)
other_drug_responses = np.array([5, 15, 25, 35, 45, 55, 65, 75, 85, 90])

# Plotting
plt.figure(figsize=(8, 6))
plt.plot(methyle_blue_concentrations, methyle_blue_responses, label='Methyle Blue')
plt.plot(other_drug_concentrations, other_drug_responses, label='Other Drug')
plt.xscale('log')  # Logarithmic scale for x-axis
plt.xlabel('Drug Concentration (M)')
plt.ylabel('Response (%)')
plt.title('Dose-Response Curve')
plt.legend()
plt.show()




From Paikpara’s Lanes to Titagarh’s Bazaar—My Food Memories

  Hello, I'm a food lover born in Paikpara, Kolkata. From 1957 to 1996, I grew up in the lanes of Paikpara, and now I live in Rahara. My...