Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion rdagent/components/coder/CoSTEER/evolving_strategy.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,10 @@
class MultiProcessEvolvingStrategy(EvolvingStrategy):
KEY_CHANGE_SUMMARY = "__change_summary__" # Optional key for the summary of the change of evolving subjects

def __init__(self, scen: Scenario, settings: CoSTEERSettings):
def __init__(self, scen: Scenario, settings: CoSTEERSettings, improve_mode: bool = False):
super().__init__(scen)
self.settings = settings
self.improve_mode = improve_mode # improve mode means we only implement the task which has failed before. The main diff is the first loop will not implement all tasks.

@abstractmethod
def implement_one_task(
Expand Down Expand Up @@ -93,6 +94,7 @@ def evolve(
elif (
target_task_desc not in queried_knowledge.success_task_to_knowledge_dict
and target_task_desc not in queried_knowledge.failed_task_info_set
and not (self.improve_mode and last_feedback[index] is None)
):
to_be_finished_task_index.append(index)

Expand Down
7 changes: 1 addition & 6 deletions rdagent/scenarios/data_science/dev/runner/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,11 +48,6 @@ def implement_one_task(
workspace: FBWorkspace | None = None,
prev_task_feedback: CoSTEERSingleFeedback | None = None,
) -> dict[str, str]:

if prev_task_feedback is None:
# if no prev_task_feedback, it is the first loop; we do not make any changes and goto evaluators directly.
return {}

# Get evolving history
task_info = target_task.get_task_information()
queried_former_failed_knowledge = (
Expand Down Expand Up @@ -157,7 +152,7 @@ def __init__(
single_evaluator=eval_l, scen=scen
) # Please specify whether you agree running your eva in parallel or not
settings = DSRunnerCoSTEERSettings()
es = DSRunnerMultiProcessEvolvingStrategy(scen=scen, settings=settings)
es = DSRunnerMultiProcessEvolvingStrategy(scen=scen, settings=settings, improve_mode=True)

# In runner, we don't need very big loops, so we set max_loop to runner_max_loop
super().__init__(
Expand Down