Skip to content

Commit

Permalink
Fixed auto-ctx indexing in autonomous mode - issue #23
Browse files Browse the repository at this point in the history
  • Loading branch information
szczyglis-dev committed Mar 8, 2024
1 parent 9ec7196 commit d7050fa
Show file tree
Hide file tree
Showing 5 changed files with 68 additions and 32 deletions.
31 changes: 21 additions & 10 deletions src/pygpt_net/controller/agent/flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
# GitHub: https://github.com/szczyglis-dev/py-gpt #
# MIT License #
# Created By : Marcin Szczygliński #
# Updated Date: 2024.03.07 23:00:00 #
# Updated Date: 2024.03.08 23:00:00 #
# ================================================== #

from pygpt_net.item.ctx import CtxItem
Expand Down Expand Up @@ -46,12 +46,17 @@ def on_system_prompt(
"""
stop_cmd = ""
if auto_stop:
stop_cmd = '\n\nSTATUS UPDATE: You can use "goal_update" command to update status of the task. Remember to put it in the form as given, at the end of response and including ' \
'the surrounding ~###~ marks, e.g.: ~###~{"cmd": "goal_update", "params": {"status": "finished"}}~###~'
stop_cmd = '\n\nSTATUS UPDATE: You can use "goal_update" command to update status of the task. ' \
'Remember to put it in the form as given, at the end of response and including ' \
'the surrounding ~###~ marks, ' \
'e.g.: ~###~{"cmd": "goal_update", "params": {"status": "finished"}}~###~'
stop_cmd+= '\nON GOAL FINISH: When you believe that the task has been completed 100% and all goals have ' \
'been achieved, run "goal_update" command with status = "finished".'
stop_cmd += '\nON PAUSE, FAILED OR WAIT: If more data from user is needed to achieve the goal or task run must be paused or ' \
'task was failed or when the conversation falls into a loop, THEN STOP REASONING and include "goal_update" command with one of these statuses: "pause", "failed" or "wait"'
stop_cmd += '\nON PAUSE, FAILED OR WAIT: If more data from user is needed to achieve the goal or task ' \
'run must be paused or ' \
'task was failed or when the conversation falls into a loop, ' \
'THEN STOP REASONING and include "goal_update" command with one of these statuses: ' \
'"pause", "failed" or "wait"'
if append_prompt is not None and append_prompt.strip() != "":
append_prompt = "\n" + append_prompt
prompt += str(append_prompt) + stop_cmd
Expand Down Expand Up @@ -109,9 +114,9 @@ def on_ctx_end(
force=True,
internal=True,
)
# internal call will not trigger async mode and will hide the message from previous iteration
# internal call will not trigger async mode and will hide the message from previous iteration
elif self.iteration >= int(iterations):
self.on_stop()
self.on_stop(auto=True)
if self.window.core.config.get("agent.goal.notify"):
self.window.ui.tray.show_msg(
trans("notify.agent.stop.title"),
Expand Down Expand Up @@ -194,7 +199,7 @@ def cmd(
try:
if item["cmd"] == "goal_update":
if item["params"]["status"] == "finished":
self.on_stop()
self.on_stop(auto=True)
self.window.ui.status(trans('status.finished')) # show info
self.finished = True
if self.window.core.config.get("agent.goal.notify"):
Expand All @@ -203,19 +208,25 @@ def cmd(
trans("notify.agent.goal.content"),
)
elif item["params"]["status"] in pause_status:
self.on_stop()
self.on_stop(auto=True)
self.window.ui.status(trans('status.finished')) # show info
self.finished = True
except Exception as e:
self.window.core.debug.error(e)
return

def on_stop(self):
def on_stop(self, auto: bool = False):
"""
Event: On force stop
:param auto: auto
"""
self.window.controller.chat.common.unlock_input()
self.iteration = 0
self.prev_output = None
self.stop = True
self.finished = False # reset finished flag

# update index if auto-index enabled, sync mode
if auto:
self.window.controller.idx.on_ctx_end(None, sync=True)
8 changes: 5 additions & 3 deletions src/pygpt_net/controller/chat/input.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
# GitHub: https://github.com/szczyglis-dev/py-gpt #
# MIT License #
# Created By : Marcin Szczygliński #
# Updated Date: 2024.02.25 06:00:00 #
# Updated Date: 2024.03.08 23:00:00 #
# ================================================== #

from PySide6.QtWidgets import QApplication
Expand All @@ -33,7 +33,8 @@ def __init__(self, window=None):
self.no_ctx_idx_modes = [
"img",
"assistant",
"llama_index"
"llama_index",
"agent",
] # assistant handled in async

def send_input(self, force: bool = False):
Expand Down Expand Up @@ -230,8 +231,9 @@ def execute(
self.window.controller.ui.update_tokens() # update UI
self.generating = False # unlock

if mode not in self.no_ctx_idx_modes:
if mode not in self.no_ctx_idx_modes and not self.window.controller.agent.enabled():
self.window.controller.idx.on_ctx_end(ctx) # update ctx DB index
# disabled in agent mode to prevent load everything at once when runs finished.

self.log("End.")

Expand Down
3 changes: 1 addition & 2 deletions src/pygpt_net/controller/ctx/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
# GitHub: https://github.com/szczyglis-dev/py-gpt #
# MIT License #
# Created By : Marcin Szczygliński #
# Updated Date: 2024.03.06 02:00:00 #
# Updated Date: 2024.03.08 23:00:00 #
# ================================================== #

from PySide6.QtWidgets import QApplication
Expand Down Expand Up @@ -132,7 +132,6 @@ def restore_filters_labels(self):
self.window.core.ctx.filters_labels = labels
self.window.ui.nodes['filter.ctx.labels'].restore(labels)


def toggle_display_filter(self, filter: str):
"""
Toggle display filter
Expand Down
7 changes: 4 additions & 3 deletions src/pygpt_net/controller/idx/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
# GitHub: https://github.com/szczyglis-dev/py-gpt #
# MIT License #
# Created By : Marcin Szczygliński #
# Updated Date: 2024.02.23 01:00:00 #
# Updated Date: 2024.03.08 23:00:00 #
# ================================================== #

import datetime
Expand Down Expand Up @@ -141,11 +141,12 @@ def update_list(self):
if items is not None:
self.window.ui.toolbox.indexes.update(items)

def on_ctx_end(self, ctx: CtxItem = None):
def on_ctx_end(self, ctx: CtxItem = None, sync: bool = False):
"""
After context item updated (request + response received)
:param ctx: Context item instance
:param sync: Synchronous call
"""
# ignore if manually stopped
if self.window.controller.chat.input.stop:
Expand All @@ -161,7 +162,7 @@ def on_ctx_end(self, ctx: CtxItem = None):
if current_ctx is not None:
meta = self.window.core.ctx.get_meta_by_id(current_ctx)
if meta is not None:
self.indexer.index_ctx_realtime(meta, idx)
self.indexer.index_ctx_realtime(meta, idx, sync=sync)

def after_index(self, idx: str = None):
"""
Expand Down
51 changes: 37 additions & 14 deletions src/pygpt_net/core/idx/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
# GitHub: https://github.com/szczyglis-dev/py-gpt #
# MIT License #
# Created By : Marcin Szczygliński #
# Updated Date: 2024.03.04 20:00:00 #
# Updated Date: 2024.03.08 23:00:00 #
# ================================================== #

import json
Expand Down Expand Up @@ -182,7 +182,7 @@ def chat(self, **kwargs) -> bool:
ctx.stream = response.response_gen
ctx.input_tokens = input_tokens
ctx.set_output("", "")
# no metadata here in agent chat stream response
# no metadata here in agent chat stream response!
else:
ctx.add_doc_meta(response.metadata) # store metadata
ctx.input_tokens = input_tokens
Expand All @@ -196,48 +196,65 @@ def chat(self, **kwargs) -> bool:
return True
return False

def query_file(self, path: str, query: str) -> str:
def query_file(self, path: str, query: str, model: ModelItem = None) -> str:
"""
Query file using temp index (created on the fly)
:param path: path to file to index
:param path: path to file to index (in memory)
:param query: query
:param model: model
:return: response
"""
model = self.window.core.models.from_defaults()
if model is None:
model = self.window.core.models.from_defaults()
context = self.window.core.idx.llm.get_service_context(model=model)
index = self.storage.get_tmp(path, service_context=context) # get or create tmp index
tmp_id, index = self.storage.get_tmp(path, service_context=context) # get or create tmp index

idx = "tmp:{}".format(path) # tmp index id
self.log("Indexing to temporary in-memory index: {}...".format(idx))

# index file to tmp index
files, errors = self.window.core.idx.indexing.index_files(idx, index, path, is_tmp=True)
files, errors = self.window.core.idx.indexing.index_files(
idx=idx,
index=index,
path=path,
is_tmp=True, # do not try to remove old doc id
)

# query tmp index
output = None
if len(files) > 0:
self.log("Querying temporary in-memory index: {}...".format(idx))
response = index.as_query_engine(
streaming=False,
).query(query) # query with default prompt
if response:
return response.response
output = response.response

def query_web(self, type: str, url: str, args: dict, query: str) -> str:
self.storage.clean_tmp(tmp_id) # clean memory
return output

def query_web(self, type: str, url: str, args: dict, query: str, model: ModelItem = None) -> str:
"""
Query web using temp index (created on the fly)
:param type: type of content
:param url: url to index
:param url: url to index (in memory)
:param args: extra args
:param query: query
:param model: model
:return: response
"""
parts = {
"type": type,
"url": url,
"args": args,
}
id = json.dumps(parts)
model = self.window.core.models.from_defaults()
if model is None:
model = self.window.core.models.from_defaults()
context = self.window.core.idx.llm.get_service_context(model=model)
index = self.storage.get_tmp(id, service_context=context) # get or create tmp index
tmp_id, index = self.storage.get_tmp(id, service_context=context) # get or create tmp index

idx = "tmp:{}".format(id) # tmp index id
self.log("Indexing to temporary in-memory index: {}...".format(idx))
Expand All @@ -249,15 +266,21 @@ def query_web(self, type: str, url: str, args: dict, query: str) -> str:
urls=[url],
type=type,
extra_args=args,
is_tmp = True,
is_tmp=True, # do not try to remove old doc id
)

# query tmp index
output = None
if num > 0:
self.log("Querying temporary in-memory index: {}...".format(idx))
response = index.as_query_engine(
streaming=False,
).query(query) # query with default prompt
if response:
return response.response
output = response.response

self.storage.clean_tmp(tmp_id) # clean memory
return output

def get_memory_buffer(self, history: list, llm = None) -> ChatMemoryBuffer:
"""
Expand Down

0 comments on commit d7050fa

Please sign in to comment.