Powered By Blogger

Sunday, March 9, 2025

MD ML GNN SVM RANDOM FOREST GRADIENT BOOSTING CODE OUTPUT AND PLOT OF CURCUMIN+TP53

 CODE:

import torch
from torch_geometric.data import Data
from torch_geometric.nn import GCNConv
from torch_geometric.datasets import Planetoid
from torch.nn import Linear
import torch.nn.functional as F
from torch.optim import Adam
import optuna

# Example GNN model
class Net(torch.nn.Module):
    def __init__(self, num_node_features, num_classes):
        super(Net, self).__init__()
        self.conv1 = GCNConv(num_node_features, 16)
        self.conv2 = GCNConv(16, num_classes)

    def forward(self, data):
        x, edge_index = data.x, data.edge_index

        x = self.conv1(x, edge_index)
        x = F.relu(x)
        x = F.dropout(x, training=self.training)
        x = self.conv2(x, edge_index)

        return F.log_softmax(x, dim=1)

# Function to train the model
def train(model, device, data, optimizer, criterion):
    model.train()
    optimizer.zero_grad()
    out = model(data)
    loss = criterion(out[data.train_mask], data.y[data.train_mask])
    loss.backward()
    optimizer.step()
    return loss.item()

# Function to evaluate the model
def evaluate(model, device, data, criterion):
    model.eval()
    _, pred = model(data).max(dim=1)
    correct = int(pred[data.test_mask].eq(data.y[data.test_mask]).sum().item())
    return correct / int(data.test_mask.sum())

# Optuna objective function
def objective(trial):
    # Hyperparameter tuning space
    learning_rate = trial.suggest_loguniform('learning_rate', 1e-4, 1e-1)
    dropout = trial.suggest_uniform('dropout', 0.0, 0.5)
   
    # Load your dataset into a PyTorch Geometric Data object
    # For demonstration, we'll use a Planetoid dataset
    dataset = Planetoid(root='/tmp/Cora', name='Cora')
    data = dataset[0]
   
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = Net(dataset.num_features, dataset.num_classes).to(device)
    data = data.to(device)
   
    criterion = torch.nn.NLLLoss()
    optimizer = Adam(model.parameters(), lr=learning_rate)
   
    for epoch in range(100):
        loss = train(model, device, data, optimizer, criterion)
   
    accuracy = evaluate(model, device, data, criterion)
   
    return accuracy

# Perform Optuna tuning
study = optuna.create_study(direction='maximize')
study.optimize(objective, n_trials=50)

# Print the best parameters and the corresponding accuracy
print(f"Best parameters: {study.best_params}")
print(f"Best accuracy: {study.best_value}")

OUTPUT:

Best parameters: {'learning_rate': 0.0012625055929391317, 'dropout': 0.2924881312590514} Best accuracy: 0.805

CODE:

# Create and run the study
study_rfr = optuna.create_study(direction='minimize')
study_rfr.optimize(objective_rfr, n_trials=50)

# Retrieve the best parameters
best_params_rfr = study_rfr.best_params

# Train the best model
rfr_best = RandomForestRegressor(**best_params_rfr, random_state=42)
rfr_best.fit(X, y)

# Make predictions and evaluate
y_pred_rfr = rfr_best.predict(X)
from sklearn.metrics import mean_squared_error
mse_rfr = mean_squared_error(y, y_pred_rfr)

# Output results
print(f"Optimized Random Forest MSE: {mse_rfr:.4f}")
print(f"Best Random Forest Parameters: {best_params_rfr}")

OUTPUT:

Optimized Random Forest MSE: 0.2499 Best Random Forest Parameters: {'n_estimators': 83, 'max_depth': 4, 'min_samples_split': 3, 'min_samples_leaf': 2, 'max_features': 'log2'}

CODE:

import torch
from torch_geometric.data import Data
from torch_geometric.nn import GCNConv
from torch_geometric.datasets import Planetoid
from torch.nn import Linear
import torch.nn.functional as F
from torch.optim import Adam
import optuna
import matplotlib.pyplot as plt

# Example GNN model
class Net(torch.nn.Module):
    def __init__(self, num_node_features, num_classes):
        super(Net, self).__init__()
        self.conv1 = GCNConv(num_node_features, 16)
        self.conv2 = GCNConv(16, num_classes)

    def forward(self, data):
        x, edge_index = data.x, data.edge_index

        x = self.conv1(x, edge_index)
        x = F.relu(x)
        x = F.dropout(x, training=self.training)
        x = self.conv2(x, edge_index)

        return F.log_softmax(x, dim=1)

# Function to train the model
def train(model, device, data, optimizer, criterion):
    model.train()
    optimizer.zero_grad()
    out = model(data)
    loss = criterion(out[data.train_mask], data.y[data.train_mask])
    loss.backward()
    optimizer.step()
    return loss.item()

# Function to evaluate the model
def evaluate(model, device, data, criterion):
    model.eval()
    _, pred = model(data).max(dim=1)
    correct = int(pred[data.test_mask].eq(data.y[data.test_mask]).sum().item())
    return correct / int(data.test_mask.sum())

# Optuna objective function
def objective(trial):
    # Hyperparameter tuning space
    learning_rate = trial.suggest_loguniform('learning_rate', 1e-4, 1e-1)
    dropout = trial.suggest_uniform('dropout', 0.0, 0.5)
   
    # Load your dataset into a PyTorch Geometric Data object
    # For demonstration, we'll use a Planetoid dataset
    dataset = Planetoid(root='/tmp/Cora', name='Cora')
    data = dataset[0]
   
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = Net(dataset.num_features, dataset.num_classes).to(device)
    data = data.to(device)
   
    criterion = torch.nn.NLLLoss()
    optimizer = Adam(model.parameters(), lr=learning_rate)
   
    for epoch in range(100):
        loss = train(model, device, data, optimizer, criterion)
   
    accuracy = evaluate(model, device, data, criterion)
   
    return accuracy

# Perform Optuna tuning
study = optuna.create_study(direction='maximize')
study.optimize(objective, n_trials=50)

# Print the best parameters and the corresponding accuracy
print(f"Best parameters: {study.best_params}")
print(f"Best accuracy: {study.best_value}")

# Plot accuracy distribution
accuracies = [trial.value for trial in study.trials]
plt.figure(figsize=(8, 6))
plt.hist(accuracies, bins=10, alpha=0.7, color='blue', edgecolor='black')
plt.title('Distribution of Accuracy Values')
plt.xlabel('Accuracy')
plt.ylabel('Frequency')
plt.show()

# Plot accuracy vs. learning rate and dropout
import numpy as np

learning_rates = np.array([trial.params['learning_rate'] for trial in study.trials])
dropouts = np.array([trial.params['dropout'] for trial in study.trials])

plt.figure(figsize=(10, 5))

plt.subplot(1, 2, 1)
plt.scatter(learning_rates, accuracies)
plt.title('Accuracy vs. Learning Rate')
plt.xlabel('Learning Rate')
plt.ylabel('Accuracy')

plt.subplot(1, 2, 2)
plt.scatter(dropouts, accuracies)
plt.title('Accuracy vs. Dropout')
plt.xlabel('Dropout')
plt.ylabel('Accuracy')

plt.tight_layout()
plt.show()

No comments:

Post a Comment

Reviving Life and Redefining Medicine: The ConsciousLeaf Vision

  Date : April 08, 2025 Author : Mrinmoy Chakraborty, Chairman of Devise Foundation, in collaboration with Grok 3 (xAI) Introduction At Devi...