axes[i].set_xlabel("Bins")
axes[i].legend(performance.coords["metric"].values)
axes[i].yaxis.set_ticks(np.arange(0, 1, 0.1))
plt.tight_layout()
else:
fig, axes = plt.subplots(len(performance.coords["metric"].values))
for j, metric in enumerate(performance.coords["metric"].values): // we need to change figure!!!!
After Change
if compare not in ["models", "metrics"]:
raise RuntimeError("Please select compare method from ["models", "metrics"]")
fig_list = []
for dataset in performance.coords["dataset"].values:
if compare == "models":
fig, axes = plt.subplots(len(performance.coords["model"].values),
sharex=True, queeze=False)
for i, model in enumerate(performance.coords["model"].values):
for j, metric in enumerate(performance.coords["metric"].values):
k_values = performance.sel(dataset=dataset,
model=model,
metric=metric)
axes[i, 0].plot(k_values.values)
axes[i, 0].set_title(performance.name + " for " +
dataset.name + " and model " + model.name)
axes[i, 0].set_ylabel("Number of queries")
axes[i, 0].set_xlabel("Bins")
axes[i, 0].legend(performance.coords["metric"].values)
axes[i, 0].yaxis.set_ticks(np.arange(0, 1, 0.1))
elif compare == "metics":
fig, axes = plt.subplots(len(performance.coords["metric"].values),
sharex=True, queeze=False)
for j, metric in enumerate(performance.coords["metric"].values):
for i, model in enumerate(performance.coords["model"].values):
k_values = performance.sel(dataset=dataset,
model=model,
metric=metric)
axes[j, 0].plot(k_values.values)
axes[j, 0].set_title(performance.name + " for " +
dataset.name + "and metric " + str(metric))
axes[j, 0].set_ylabel("Number of queries")
axes[j, 0].set_xlabel("Bins")
axes[j, 0].legend(performance.coords["model"].values)
axes[j, 0].yaxis.set_ticks(np.arange(0, 1, 0.1))
fig_list.append(fig)return fig_list
def plot_document_graded_relevance(performance):
for dataset in performance.coords["dataset"].values: