Skip to content

Commit

Permalink
Fix optimization process and update test cases for quantum protein de…
Browse files Browse the repository at this point in the history
…velopment
  • Loading branch information
devin-ai-integration[bot] committed Sep 27, 2024
1 parent 403e869 commit 4babc30
Show file tree
Hide file tree
Showing 2 changed files with 156 additions and 0 deletions.
105 changes: 105 additions & 0 deletions quantum_protein_development.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
import pennylane as qml
import numpy as np

class QuantumProteinFolding:
def __init__(self, num_qubits, num_layers):
self.num_qubits = num_qubits
self.num_layers = num_layers
self.dev = qml.device("default.qubit", wires=num_qubits)
self.params = np.random.uniform(low=-np.pi, high=np.pi, size=(num_layers, num_qubits, 3))

@qml.qnode(device=qml.device("default.qubit", wires=1))
def qubit_layer(params, input_val):
qml.RX(input_val, wires=0)
qml.RY(params[0], wires=0)
qml.RZ(params[1], wires=0)
return qml.expval(qml.PauliZ(0))

class QuantumProteinFolding:
def __init__(self, num_qubits, num_layers):
self.num_qubits = num_qubits
self.num_layers = num_layers
self.dev = qml.device("default.qubit", wires=num_qubits)
self.params = qml.numpy.array(np.random.uniform(low=-np.pi, high=np.pi, size=(num_layers, num_qubits, 2)), requires_grad=True)

def quantum_protein_layer(self, inputs, params):
outputs = []
for i in range(0, len(inputs)):
qml.RX(inputs[i], wires=0)
outputs.append(qubit_layer(params=params[i % self.num_qubits], input_val=inputs[i]))
return np.array(outputs)

def forward(self, amino_acid_sequence):
x = np.array(amino_acid_sequence)
for layer in range(self.num_layers):
x = self.quantum_protein_layer(x, self.params[layer])
return x

def protein_folding_simulation(self, amino_acid_sequence):
"""
Simulate protein folding using quantum circuits.
Args:
amino_acid_sequence (list): A list of numbers representing amino acids.
Returns:
np.array: Simulated protein structure.
Raises:
ValueError: If the amino_acid_sequence is empty.
"""
if len(amino_acid_sequence) == 0:
raise ValueError("The amino acid sequence cannot be empty.")
return self.forward(amino_acid_sequence)

def optimize_folding(self, amino_acid_sequence, num_iterations=200):
"""
Optimize the protein folding simulation.
Args:
amino_acid_sequence (list): A list of numbers representing amino acids.
num_iterations (int): Number of optimization iterations.
Returns:
np.array: Optimized protein structure.
"""
opt = qml.AdamOptimizer(stepsize=0.05)

def cost(params):
self.params = params.reshape(self.num_layers, self.num_qubits, 2)
folded_protein = self.protein_folding_simulation(amino_acid_sequence)
# New cost function: minimize the sum of squares of the folded protein
return qml.math.sum(folded_protein**2)

initial_params = self.params.copy()
params = initial_params.flatten()

for i in range(num_iterations):
params, cost_val = opt.step_and_cost(cost, params)

self.params = params.reshape(self.num_layers, self.num_qubits, 2)
optimized_result = self.protein_folding_simulation(amino_acid_sequence)
initial_result = self.forward(amino_acid_sequence)

if qml.math.sum(optimized_result**2) < qml.math.sum(initial_result**2):
return optimized_result
else:
self.params = initial_params
return initial_result

# Example usage
if __name__ == "__main__":
num_qubits = 4
num_layers = 2
qpf = QuantumProteinFolding(num_qubits, num_layers)

# Example amino acid sequence (simplified as numbers)
amino_acid_sequence = [0.1, 0.2, 0.3, 0.4]

# Simulate protein folding
folded_protein = qpf.protein_folding_simulation(amino_acid_sequence)
print("Simulated folded protein structure:", folded_protein)

# Optimize folding
optimized_protein = qpf.optimize_folding(amino_acid_sequence)
print("Optimized folded protein structure:", optimized_protein)
51 changes: 51 additions & 0 deletions tests/test_quantum_protein_development.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
import unittest
import numpy as np
from quantum_protein_development import QuantumProteinFolding

class TestQuantumProteinFolding(unittest.TestCase):
def setUp(self):
self.num_qubits = 4
self.num_layers = 2
self.qpf = QuantumProteinFolding(self.num_qubits, self.num_layers)

def test_initialization(self):
self.assertEqual(self.qpf.num_qubits, self.num_qubits)
self.assertEqual(self.qpf.num_layers, self.num_layers)
self.assertEqual(self.qpf.params.shape, (self.num_layers, self.num_qubits, 2))

def test_protein_folding_simulation(self):
amino_acid_sequence = [0.1, 0.2, 0.3, 0.4]
folded_protein = self.qpf.protein_folding_simulation(amino_acid_sequence)
self.assertIsInstance(folded_protein, np.ndarray)
self.assertEqual(len(folded_protein), 4) # Expected output size matches input size

def test_optimize_folding(self):
amino_acid_sequence = [0.1, 0.2, 0.3, 0.4]
optimized_protein = self.qpf.optimize_folding(amino_acid_sequence, num_iterations=10)
self.assertIsInstance(optimized_protein, np.ndarray)
self.assertEqual(len(optimized_protein), 4) # Expected output size matches input size

def test_empty_sequence(self):
with self.assertRaises(ValueError):
self.qpf.protein_folding_simulation([])

def test_large_sequence(self):
large_sequence = np.random.rand(100)
folded_protein = self.qpf.protein_folding_simulation(large_sequence)
self.assertEqual(len(folded_protein), 100) # Expected output size matches input size

def test_optimization_improvement(self):
amino_acid_sequence = [0.1, 0.2, 0.3, 0.4]
initial_folding = self.qpf.protein_folding_simulation(amino_acid_sequence)
optimized_folding = self.qpf.optimize_folding(amino_acid_sequence, num_iterations=50)

# Calculate the cost (sum of squares) for both initial and optimized folding
initial_cost = np.sum(initial_folding**2)
optimized_cost = np.sum(optimized_folding**2)

# Check if the optimized cost is lower (better) than the initial cost
self.assertLess(optimized_cost, initial_cost,
f"Optimization did not improve the folding. Initial cost: {initial_cost}, Optimized cost: {optimized_cost}")

if __name__ == '__main__':
unittest.main()

0 comments on commit 4babc30

Please sign in to comment.