Skip to content

Commit db3d1fa

Browse files
authored
save mle_score to log folder with 'mle_score' tag (#672)
1 parent 30b1ff8 commit db3d1fa

File tree

1 file changed

+24
-26
lines changed

1 file changed

+24
-26
lines changed

rdagent/log/mle_summary.py

+24-26
Original file line numberDiff line numberDiff line change
@@ -28,17 +28,19 @@ def extract_mle_json(log_content: str) -> dict | None:
2828

2929

3030
def save_grade_info(log_trace_path: Path):
31-
for msg in FileStorage(log_trace_path).iter_msg():
31+
trace_storage = FileStorage(log_trace_path)
32+
for msg in trace_storage.iter_msg():
3233
if "competition" in msg.tag:
3334
competition = msg.content
3435

3536
if "running" in msg.tag:
3637
if isinstance(msg.content, DSExperiment):
37-
msg.content.experiment_workspace.execute(
38+
mle_score_str = msg.content.experiment_workspace.execute(
3839
env=de,
39-
entry=f"mlebench grade-sample submission.csv {competition} --data-dir /mle/data > mle_score.txt 2>&1",
40+
entry=f"mlebench grade-sample submission.csv {competition} --data-dir /mle/data | tee mle_score.txt",
4041
)
4142
msg.content.experiment_workspace.execute(env=de, entry="chmod 777 mle_score.txt")
43+
trace_storage.log(mle_score_str, name=f"{msg.tag}.mle_score")
4244

4345

4446
def is_valid_session(p: Path) -> bool:
@@ -106,30 +108,26 @@ def summarize_folder(log_folder: Path):
106108
made_submission_num += 1
107109
scores_path = msg.content.experiment_workspace.workspace_path / "scores.csv"
108110
valid_scores[loop_num - 1] = pd.read_csv(scores_path, index_col=0)
109-
grade_output_path = msg.content.experiment_workspace.workspace_path / "mle_score.txt"
110-
if not grade_output_path.exists():
111-
raise FileNotFoundError(
112-
f"mle_score.txt in {grade_output_path} not found, genarate it first!"
111+
elif "mle_score" in msg.tag:
112+
grade_output = extract_mle_json(msg.content)
113+
if grade_output:
114+
if grade_output["score"] is not None:
115+
test_scores[loop_num - 1] = grade_output["score"]
116+
_, test_ranks[loop_num - 1] = score_rank(
117+
stat[log_trace_path.name]["competition"], grade_output["score"]
113118
)
114-
grade_output = extract_mle_json(grade_output_path.read_text())
115-
if grade_output:
116-
if grade_output["score"] is not None:
117-
test_scores[loop_num - 1] = grade_output["score"]
118-
_, test_ranks[loop_num - 1] = score_rank(
119-
stat[log_trace_path.name]["competition"], grade_output["score"]
120-
)
121-
if grade_output["valid_submission"]:
122-
valid_submission_num += 1
123-
if grade_output["above_median"]:
124-
above_median_num += 1
125-
if grade_output["any_medal"]:
126-
get_medal_num += 1
127-
if grade_output["bronze_medal"]:
128-
bronze_num += 1
129-
if grade_output["silver_medal"]:
130-
silver_num += 1
131-
if grade_output["gold_medal"]:
132-
gold_num += 1
119+
if grade_output["valid_submission"]:
120+
valid_submission_num += 1
121+
if grade_output["above_median"]:
122+
above_median_num += 1
123+
if grade_output["any_medal"]:
124+
get_medal_num += 1
125+
if grade_output["bronze_medal"]:
126+
bronze_num += 1
127+
if grade_output["silver_medal"]:
128+
silver_num += 1
129+
if grade_output["gold_medal"]:
130+
gold_num += 1
133131

134132
if "feedback" in msg.tag and "evolving" not in msg.tag:
135133
if isinstance(msg.content, ExperimentFeedback) and bool(msg.content):

0 commit comments

Comments
 (0)