Skip to content
Draft
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
update /history_v2/:prompt_id to also have prompts in dict fmt
  • Loading branch information
ric-yu committed Jul 20, 2025
commit c9a5ecd18480a260c2678f1637bd9c448eb00f6c
20 changes: 20 additions & 0 deletions execution.py
Original file line number Diff line number Diff line change
Expand Up @@ -1165,6 +1165,26 @@ def get_ordered_history(self, max_items=None, offset=0):

return {"history": history_items}

def get_history_v2(self, prompt_id):
with self.mutex:
if prompt_id in self.history:
history_entry = copy.deepcopy(self.history[prompt_id])

# Extract and convert prompt tuple to dict
if "prompt" in history_entry:
priority, prompt_id_inner, prompt_data, extra_data, outputs_to_execute = history_entry["prompt"]
history_entry["prompt"] = {
"priority": priority,
"prompt_id": prompt_id_inner,
"prompt": prompt_data,
"extra_data": extra_data,
"outputs_to_execute": outputs_to_execute
}

return {prompt_id: history_entry}
else:
return {}

def wipe_history(self):
with self.mutex:
self.history = {}
Expand Down
2 changes: 1 addition & 1 deletion server.py
Original file line number Diff line number Diff line change
Expand Up @@ -666,7 +666,7 @@ async def get_ordered_history(request):
@routes.get("/history_v2/{prompt_id}")
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this is not necessary.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I believe we agreed we'd update this endpoint as well cc: @guill @webfiltered

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, this was specifically my recommendation to avoid a case where people need to remember to use one endpoint for the list and one for individual items (which goes against normal REST principles). It's much easier if we're just able to say "move to this new endpoint as we'll be deprecating /history eventually".

I don't have a strong opinion on the specific name we use, but do strongly feel we should keep this route to preserve consistency.

async def get_history_v2_prompt_id(request):
prompt_id = request.match_info.get("prompt_id", None)
return web.json_response(self.prompt_queue.get_history(prompt_id=prompt_id))
return web.json_response(self.prompt_queue.get_history_v2(prompt_id=prompt_id))

@routes.get("/queue")
async def get_queue(request):
Expand Down
37 changes: 26 additions & 11 deletions tests/inference/test_execution.py
Original file line number Diff line number Diff line change
Expand Up @@ -660,9 +660,24 @@ def test_history_prompt_id_endpoint(self, client: ComfyClient, builder: GraphBui
# Test history_v2 endpoint for specific prompt
specific_history = client.get_history_v2_for_prompt(prompt_id)
assert prompt_id in specific_history, "History v2 should contain prompt ID"
assert specific_history[prompt_id] == legacy_history[prompt_id], "History v2 data should match legacy history"

def test_history_max_items(self, client: ComfyClient, builder: GraphBuilder):

# Verify key fields match between legacy and v2
v2_data = specific_history[prompt_id]
legacy_data = legacy_history[prompt_id]

# Check that outputs and status match
assert v2_data["outputs"] == legacy_data["outputs"], "Outputs should match"
assert v2_data["status"] == legacy_data["status"], "Status should match"

# Verify prompt is converted to dict format in v2
assert isinstance(v2_data["prompt"], dict), "Prompt should be a dictionary in v2"
assert "prompt_id" in v2_data["prompt"], "Prompt dict should have prompt_id"
assert "priority" in v2_data["prompt"], "Prompt dict should have priority"
assert "prompt" in v2_data["prompt"], "Prompt dict should have prompt data"
assert "extra_data" in v2_data["prompt"], "Prompt dict should have extra_data"
assert "outputs_to_execute" in v2_data["prompt"], "Prompt dict should have outputs_to_execute"

def test_history_max_items(self, client: ComfyClient):
"""Test legacy history endpoint with max_items parameter."""
# Clear history to start fresh
client.clear_history()
Expand All @@ -680,7 +695,7 @@ def test_history_max_items(self, client: ComfyClient, builder: GraphBuilder):
limited_history = client.get_history(max_items=2)
assert len(limited_history) == 2, "History should return exactly max_items"

def test_ordered_history_max_items_and_offset(self, client: ComfyClient, builder: GraphBuilder):
def test_ordered_history_max_items_and_offset(self, client: ComfyClient):
"""Test ordered history endpoint with max_items and offset parameters."""
# Clear history to start fresh
client.clear_history()
Expand Down Expand Up @@ -731,13 +746,13 @@ def test_ordered_history_max_items_and_offset(self, client: ComfyClient, builder
assert len(set(all_paginated_ids)) == 5, "All paginated IDs should be unique"
assert set(all_paginated_ids) == set(full_prompt_ids), "Paginated results should cover all items"

# Test default behavior: get last N items (no offset specified)
# When offset < 0 and max_items is specified, offset = len(history) - max_items
last_2_items = client.get_ordered_history(max_items=2)
assert len(last_2_items["history"]) == 2, "Default behavior should return 2 items"
last_2_ids = [item["prompt_id"] for item in last_2_items["history"]]
# This should be equivalent to offset=3 (5-2=3)
assert last_2_ids == full_prompt_ids[3:5], "Default behavior should return last 2 items"
# Test default behavior: get first N items (no offset specified)
# When offset is not specified, it defaults to 0
first_2_items = client.get_ordered_history(max_items=2)
assert len(first_2_items["history"]) == 2, "Default behavior should return 2 items"
first_2_ids = [item["prompt_id"] for item in first_2_items["history"]]
# This should be equivalent to offset=0 with max_items=2
assert first_2_ids == full_prompt_ids[0:2], "Default behavior should return first 2 items"

# Test offset beyond available items
beyond_offset = client.get_ordered_history(max_items=2, offset=10)
Expand Down