Skip to content

Commit

Permalink
[DEPENDENCIES]
Browse files Browse the repository at this point in the history
  • Loading branch information
Kye committed Jan 4, 2024
1 parent c530966 commit f801ade
Show file tree
Hide file tree
Showing 15 changed files with 697 additions and 52 deletions.
4 changes: 2 additions & 2 deletions mm_mamba/model.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from torch import nn, Tensor
from torch import nn, Tensor
from torch import Tensor, nn
from zeta import RMSNorm

from mm_mamba import MultiModalMambaBlock


Expand Down
9 changes: 3 additions & 6 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"

[tool.poetry]
name = "mmm-zeta"
version = "0.0.2"
version = "0.0.4"
description = "MMM - Pytorch"
license = "MIT"
authors = ["Kye Gomez <[email protected]>"]
Expand All @@ -28,11 +28,8 @@ packages = [
[tool.poetry.dependencies]
python = "^3.6"
swarms = "*"
torch = "*"
zetascale = "*"

[tool.poetry.dev-dependencies]
# Add development dependencies here
torch = "2.1.2"
zetascale = "1.4.0"


[tool.poetry.group.lint.dependencies]
Expand Down
6 changes: 3 additions & 3 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
torch
zetascale
swarms
torch==2.1.2
zetascale==1.4.0
swarms==3.1.0
81 changes: 81 additions & 0 deletions scripts/auto_tests_docs/auto_docs.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
###### VERISON2
import inspect
import os
import threading

from dotenv import load_dotenv

from scripts.auto_tests_docs.docs import DOCUMENTATION_WRITER_SOP
from swarms import OpenAIChat

##########
from zeta.nn.modules.quantized_layernorm import QuantizedLN
from zeta.nn.modules.slerp_model_merger import SLERPModelMerger
from zeta.nn.modules.avg_model_merger import AverageModelMerger

####################
load_dotenv()

api_key = os.getenv("OPENAI_API_KEY")

model = OpenAIChat(
model_name="gpt-4",
openai_api_key=api_key,
max_tokens=3000,
)


def process_documentation(cls):
"""
Process the documentation for a given class using OpenAI model and save it in a Markdown file.
"""
doc = inspect.getdoc(cls)
source = inspect.getsource(cls)
input_content = (
"Class Name:"
f" {cls.__name__}\n\nDocumentation:\n{doc}\n\nSource"
f" Code:\n{source}"
)

# Process with OpenAI model (assuming the model's __call__ method takes this input and returns processed content)
processed_content = model(
DOCUMENTATION_WRITER_SOP(input_content, "zeta.nn.modules")
)

# doc_content = f"# {cls.__name__}\n\n{processed_content}\n"
doc_content = f"{processed_content}\n"

# Create the directory if it doesn't exist
dir_path = "docs/zeta/nn/modules"
os.makedirs(dir_path, exist_ok=True)

# Write the processed documentation to a Markdown file
file_path = os.path.join(dir_path, f"{cls.__name__.lower()}.md")
with open(file_path, "w") as file:
file.write(doc_content)

print(f"Documentation generated for {cls.__name__}.")


def main():
classes = [
QuantizedLN,
SLERPModelMerger,
AverageModelMerger,
]

threads = []
for cls in classes:
thread = threading.Thread(target=process_documentation, args=(cls,))
threads.append(thread)
thread.start()

# Wait for all threads to complete
for thread in threads:
thread.join()

print("Documentation generated in 'docs/zeta/nn/modules' directory.")


if __name__ == "__main__":
main()
78 changes: 78 additions & 0 deletions scripts/auto_tests_docs/auto_docs_functions.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
import inspect
import os
import sys
import threading

from dotenv import load_dotenv

from scripts.auto_tests_docs.docs import DOCUMENTATION_WRITER_SOP
from swarms import OpenAIChat
from zeta.ops import *

load_dotenv()

api_key = os.getenv("OPENAI_API_KEY")

model = OpenAIChat(
model_name="gpt-4-1106-preview",
openai_api_key=api_key,
max_tokens=2000,
)


def process_documentation(item):
"""
Process the documentation for a given function using OpenAI model and save it in a Markdown file.
"""
try:
doc = inspect.getdoc(item)
source = inspect.getsource(item)
input_content = (
f"Name: {item.__name__}\n\nDocumentation:\n{doc}\n\nSource"
f" Code:\n{source}"
)

# Process with OpenAI model
processed_content = model(
DOCUMENTATION_WRITER_SOP(input_content, "zeta.ops")
)

doc_content = f"# {item.__name__}\n\n{processed_content}\n"

# Create the directory if it doesn't exist
dir_path = "docs/zeta/ops"
os.makedirs(dir_path, exist_ok=True)

# Write the processed documentation to a Markdown file
file_path = os.path.join(dir_path, f"{item.__name__.lower()}.md")
with open(file_path, "w") as file:
file.write(doc_content)

print(f"Succesfully processed {item.__name__}.")
except Exception as e:
print(f"Error processing {item.__name__}: {e}")


def main():
# Gathering all functions from the zeta.ops module
functions = [
obj
for name, obj in inspect.getmembers(sys.modules["zeta.ops"])
if inspect.isfunction(obj)
]

threads = []
for func in functions:
thread = threading.Thread(target=process_documentation, args=(func,))
threads.append(thread)
thread.start()

# Wait for all threads to complete
for thread in threads:
thread.join()

print("Documentation generated in 'docs/zeta/ops' directory.")


if __name__ == "__main__":
main()
107 changes: 107 additions & 0 deletions scripts/auto_tests_docs/auto_tests.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,107 @@
import inspect
import os
import re
import threading
from swarms import OpenAIChat
from scripts.auto_tests_docs.docs import TEST_WRITER_SOP_PROMPT


# Import all classes from zeta.structs
# Tests will be automatically generated in the tests folder using parallized gpt4 with each of the file logic handled autonomously thus
# leading to a much faster testing process where you just import your classes or functions and tests are automatically generated
# Automating tests and documentation frees up atleast 75% of your time to focus on the actual logic of your code
from zeta.nn.modules.triple_skip import TripleSkipBlock
from zeta.nn.modules.dynamic_routing_block import DynamicRoutingBlock
from zeta.nn.modules.gated_residual_block import GatedResidualBlock
from zeta.nn.modules.stochastic_depth import StochasticSkipBlocK


####################


from dotenv import load_dotenv

load_dotenv()

api_key = os.getenv("OPENAI_API_KEY")

model = OpenAIChat(
model_name="gpt-4",
openai_api_key=api_key,
max_tokens=500,
)


def extract_code_from_markdown(markdown_content: str):
"""
Extracts code blocks from a Markdown string and returns them as a single string.
Args:
- markdown_content (str): The Markdown content as a string.
Returns:
- str: A single string containing all the code blocks separated by newlines.
"""
# Regular expression for fenced code blocks
pattern = r"```(?:\w+\n)?(.*?)```"
matches = re.findall(pattern, markdown_content, re.DOTALL)

# Concatenate all code blocks separated by newlines
return "\n".join(code.strip() for code in matches)


def create_test(cls):
"""
Process the documentation for a given class using OpenAI model and save it in a Python file.
"""
doc = inspect.getdoc(cls)
source = inspect.getsource(cls)
input_content = (
"Class Name:"
f" {cls.__name__}\n\nDocumentation:\n{doc}\n\nSource"
f" Code:\n{source}"
)

# Process with OpenAI model (assuming the model's __call__ method takes this input and returns processed content)
processed_content = model(
TEST_WRITER_SOP_PROMPT(input_content, "zeta", "zeta.nn.modules")
)
processed_content = extract_code_from_markdown(processed_content)

doc_content = f"{processed_content}"

# Create the directory if it doesn't exist
dir_path = "tests/nn/modules"
os.makedirs(dir_path, exist_ok=True)

# Write the processed documentation to a Python file
file_path = os.path.join(dir_path, f"{cls.__name__.lower()}.py")
with open(file_path, "w") as file:
file.write(doc_content)

print(f"Test generated for {cls.__name__}.")


def main():
classes = [
TripleSkipBlock,
DynamicRoutingBlock,
GatedResidualBlock,
StochasticSkipBlocK,
]

threads = []
for cls in classes:
thread = threading.Thread(target=create_test, args=(cls,))
threads.append(thread)
thread.start()

# Wait for all threads to complete
for thread in threads:
thread.join()

print("Tests generated in 'tests/nn/modules' directory.")


if __name__ == "__main__":
main()
80 changes: 80 additions & 0 deletions scripts/auto_tests_docs/auto_tests_functions.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
import inspect
import os
import sys
import threading

from dotenv import load_dotenv

from scripts.auto_tests_docs.docs import TEST_WRITER_SOP_PROMPT
from swarms import OpenAIChat
from swarms.utils.parse_code import (
extract_code_from_markdown,
)
from zeta.utils import *

load_dotenv()

api_key = os.getenv("OPENAI_API_KEY")

model = OpenAIChat(
model_name="gpt-4",
openai_api_key=api_key,
max_tokens=4000,
)


def process_documentation(item):
"""
Process the documentation for a given function using OpenAI model and save it in a Markdown file.
"""
doc = inspect.getdoc(item)
source = inspect.getsource(item)
input_content = (
f"Name: {item.__name__}\n\nDocumentation:\n{doc}\n\nSource"
f" Code:\n{source}"
)
# print(input_content)

# Process with OpenAI model
processed_content = model(
TEST_WRITER_SOP_PROMPT(input_content, "zeta.utils", "zeta.utils")
)
processed_content = extract_code_from_markdown(processed_content)

doc_content = f"{processed_content}"

# Create the directory if it doesn't exist
dir_path = "tests/utils"
os.makedirs(dir_path, exist_ok=True)

# Write the processed documentation to a Markdown file
file_path = os.path.join(dir_path, f"{item.__name__.lower()}.py")
with open(file_path, "w") as file:
file.write(doc_content)

print(f"Test generated for {item.__name__}.")


def main():
# Gathering all functions from the zeta.utils module
functions = [
obj
for name, obj in inspect.getmembers(sys.modules["zeta.utils"])
if inspect.isfunction(obj)
]

threads = []
for func in functions:
thread = threading.Thread(target=process_documentation, args=(func,))
threads.append(thread)
thread.start()

# Wait for all threads to complete
for thread in threads:
thread.join()

print("Tests generated in 'tests/utils' directory.")


if __name__ == "__main__":
main()
Loading

0 comments on commit f801ade

Please sign in to comment.