cb840cb2da9012a3df001889370c131e8ac96498,rankeval/visualization/effectiveness.py,,plot_query_wise_performance,#Any#Any#,291

Before Change


                axes[i].set_xlabel("Bins")
                axes[i].legend(performance.coords["metric"].values)
                axes[i].yaxis.set_ticks(np.arange(0, 1, 0.1))
            plt.tight_layout()

        else:
            fig, axes = plt.subplots(len(performance.coords["metric"].values))
            for j, metric in enumerate(performance.coords["metric"].values):  // we need to change figure!!!!

After Change


    if compare not in ["models", "metrics"]:
        raise RuntimeError("Please select compare method from ["models", "metrics"]")

    fig_list = []

    for dataset in performance.coords["dataset"].values:
        if compare == "models":
            fig, axes = plt.subplots(len(performance.coords["model"].values),
                                     sharex=True, queeze=False)
            for i, model in enumerate(performance.coords["model"].values):
                for j, metric in enumerate(performance.coords["metric"].values):
                    k_values = performance.sel(dataset=dataset,
                                               model=model,
                                               metric=metric)
                    axes[i, 0].plot(k_values.values)

                axes[i, 0].set_title(performance.name + " for " +
                                     dataset.name + " and model " + model.name)
                axes[i, 0].set_ylabel("Number of queries")
                axes[i, 0].set_xlabel("Bins")
                axes[i, 0].legend(performance.coords["metric"].values)
                axes[i, 0].yaxis.set_ticks(np.arange(0, 1, 0.1))

        elif compare == "metics":
            fig, axes = plt.subplots(len(performance.coords["metric"].values),
                                     sharex=True, queeze=False)
            for j, metric in enumerate(performance.coords["metric"].values):
                for i, model in enumerate(performance.coords["model"].values):
                    k_values = performance.sel(dataset=dataset,
                                               model=model,
                                               metric=metric)
                    axes[j, 0].plot(k_values.values)

                axes[j, 0].set_title(performance.name + " for " +
                                     dataset.name + "and metric " + str(metric))
                axes[j, 0].set_ylabel("Number of queries")
                axes[j, 0].set_xlabel("Bins")
                axes[j, 0].legend(performance.coords["model"].values)
                axes[j, 0].yaxis.set_ticks(np.arange(0, 1, 0.1))

        fig_list.append(fig)

    return fig_list


def plot_document_graded_relevance(performance):
    for dataset in performance.coords["dataset"].values:
Italian Trulli
In pattern: SUPERPATTERN

Frequency: 3

Non-data size: 6

Instances


Project Name: hpclab/rankeval
Commit Name: cb840cb2da9012a3df001889370c131e8ac96498
Time: 2017-07-27
Author: cristina.i.muntean@gmail.com
File Name: rankeval/visualization/effectiveness.py
Class Name:
Method Name: plot_query_wise_performance


Project Name: hpclab/rankeval
Commit Name: b104ef8ea5f6e98b0b05a5cf068bba0c8689d445
Time: 2017-07-27
Author: cristina.i.muntean@gmail.com
File Name: rankeval/visualization/effectiveness.py
Class Name:
Method Name: plot_tree_wise_average_contribution


Project Name: hpclab/rankeval
Commit Name: cb840cb2da9012a3df001889370c131e8ac96498
Time: 2017-07-27
Author: cristina.i.muntean@gmail.com
File Name: rankeval/visualization/effectiveness.py
Class Name:
Method Name: plot_query_wise_performance


Project Name: hpclab/rankeval
Commit Name: 16c06c30ae23f13c4cc919a0c3d50fe3e786a7fb
Time: 2017-07-27
Author: cristina.i.muntean@gmail.com
File Name: rankeval/visualization/effectiveness.py
Class Name:
Method Name: plot_rank_confusion_matrix