@@ -28,17 +28,19 @@ def extract_mle_json(log_content: str) -> dict | None:
28
28
29
29
30
30
def save_grade_info (log_trace_path : Path ):
31
- for msg in FileStorage (log_trace_path ).iter_msg ():
31
+ trace_storage = FileStorage (log_trace_path )
32
+ for msg in trace_storage .iter_msg ():
32
33
if "competition" in msg .tag :
33
34
competition = msg .content
34
35
35
36
if "running" in msg .tag :
36
37
if isinstance (msg .content , DSExperiment ):
37
- msg .content .experiment_workspace .execute (
38
+ mle_score_str = msg .content .experiment_workspace .execute (
38
39
env = de ,
39
- entry = f"mlebench grade-sample submission.csv { competition } --data-dir /mle/data > mle_score.txt 2>&1 " ,
40
+ entry = f"mlebench grade-sample submission.csv { competition } --data-dir /mle/data | tee mle_score.txt" ,
40
41
)
41
42
msg .content .experiment_workspace .execute (env = de , entry = "chmod 777 mle_score.txt" )
43
+ trace_storage .log (mle_score_str , name = f"{ msg .tag } .mle_score" )
42
44
43
45
44
46
def is_valid_session (p : Path ) -> bool :
@@ -106,30 +108,26 @@ def summarize_folder(log_folder: Path):
106
108
made_submission_num += 1
107
109
scores_path = msg .content .experiment_workspace .workspace_path / "scores.csv"
108
110
valid_scores [loop_num - 1 ] = pd .read_csv (scores_path , index_col = 0 )
109
- grade_output_path = msg .content .experiment_workspace .workspace_path / "mle_score.txt"
110
- if not grade_output_path .exists ():
111
- raise FileNotFoundError (
112
- f"mle_score.txt in { grade_output_path } not found, genarate it first!"
111
+ elif "mle_score" in msg .tag :
112
+ grade_output = extract_mle_json (msg .content )
113
+ if grade_output :
114
+ if grade_output ["score" ] is not None :
115
+ test_scores [loop_num - 1 ] = grade_output ["score" ]
116
+ _ , test_ranks [loop_num - 1 ] = score_rank (
117
+ stat [log_trace_path .name ]["competition" ], grade_output ["score" ]
113
118
)
114
- grade_output = extract_mle_json (grade_output_path .read_text ())
115
- if grade_output :
116
- if grade_output ["score" ] is not None :
117
- test_scores [loop_num - 1 ] = grade_output ["score" ]
118
- _ , test_ranks [loop_num - 1 ] = score_rank (
119
- stat [log_trace_path .name ]["competition" ], grade_output ["score" ]
120
- )
121
- if grade_output ["valid_submission" ]:
122
- valid_submission_num += 1
123
- if grade_output ["above_median" ]:
124
- above_median_num += 1
125
- if grade_output ["any_medal" ]:
126
- get_medal_num += 1
127
- if grade_output ["bronze_medal" ]:
128
- bronze_num += 1
129
- if grade_output ["silver_medal" ]:
130
- silver_num += 1
131
- if grade_output ["gold_medal" ]:
132
- gold_num += 1
119
+ if grade_output ["valid_submission" ]:
120
+ valid_submission_num += 1
121
+ if grade_output ["above_median" ]:
122
+ above_median_num += 1
123
+ if grade_output ["any_medal" ]:
124
+ get_medal_num += 1
125
+ if grade_output ["bronze_medal" ]:
126
+ bronze_num += 1
127
+ if grade_output ["silver_medal" ]:
128
+ silver_num += 1
129
+ if grade_output ["gold_medal" ]:
130
+ gold_num += 1
133
131
134
132
if "feedback" in msg .tag and "evolving" not in msg .tag :
135
133
if isinstance (msg .content , ExperimentFeedback ) and bool (msg .content ):
0 commit comments