tokens_batch = self.batch_input(tokens)
self.prepare_vectorizers(tokens_batch)
if self.preproc == "client":
examples = self.vectorize(tokens_batch)
elif self.preproc == "server":
// TODO: here we allow vectorizers even for preproc=server to get `word_lengths`.
// vectorizers should not be available when preproc=server.
featurized_examples = self.vectorize(tokens_batch)
examples = {
"tokens": np.array([" ".join(x) for x in tokens_batch]),
self.model.lengths_key: featurized_examples[self.model.lengths_key]
}
outcomes_list = self.model.predict(examples)
return self.format_output(outcomes_list)
def format_output(self, predicted):
results = []
After Change
version = kwargs.get("version")
if backend not in {"tf"}:
raise ValueError("only Tensorflow is currently supported for remote Services")
import_user_module("baseline.{}.remote".format(backend))
exp_type = kwargs.get("remote_type")
if exp_type is None: