33d61460bcda81161f0254b074c3a0eda3ce70ee,cde/density_estimator/NF.py,NormalizingFlowEstimator,_build_model,#NormalizingFlowEstimator#,156

Before Change


        with tf.variable_scope(self.name):
            // adds placeholders, data normalization and data noise to graph as desired
            self.layer_in_x, self.layer_in_y = self._build_input_layers()
            self.y_input = L.get_output(self.layer_in_y)

            flow_classes = [FLOWS[flow_name] for flow_name in self.flows_type]
            // get the individual parameter sizes for each flow
            param_split_sizes = [flow.get_param_size(self.ndim_y) for flow in flow_classes]
            mlp_output_dim = sum(param_split_sizes)
            core_network = MLP(
                name="core_network",
                input_layer=self.layer_in_x,
                output_dim=mlp_output_dim,
                hidden_sizes=self.hidden_sizes,
                hidden_nonlinearity=self.hidden_nonlinearity,
                output_nonlinearity=None,
                weight_normalization=self.weight_normalization
            )
            outputs = L.get_output(core_network.output_layer)
            flow_params = tf.split(value=outputs, num_or_size_splits=param_split_sizes, axis=1)

            // instanciate the flows with their parameters
            flows = [flow(params, self.ndim_y) for flow, params in zip(flow_classes, flow_params)]

            // build up the base distribution that will be transformed by the flows
            base_dist = tf.distributions.Normal(loc=[0.]*self.ndim_y, scale=[1.]*self.ndim_y)

            // chain the flows together and build the transformed distribution using the base_dist + flows
            // Chaining applies the flows in reverse, Chain([a,b]).forward(x) being a.forward(b.forward(x))
            // We reverse them so the flows are stacked ontop of the base distribution in the original order
            flows.reverse()
            chain = tf.contrib.distributions.bijectors.Chain(flows)
            target_dist = tf.contrib.distributions.TransformedDistribution(distribution=base_dist, bijector=chain)

            // since we operate with matrices not vectors, the output would have dimension (?,1)
            // and therefor has to be reduce first to have shape (?,)
            if self.data_normalization:
                self.pdf_ = tf.squeeze(target_dist.prob(self.y_input) / tf.reduce_prod(self.std_y_sym), axis=1)
                self.log_pdf_ = tf.squeeze(target_dist.log_prob(self.y_input) - tf.reduce_sum(tf.log(self.std_y_sym)), axis=1)
                self.cdf_ = tf.squeeze(target_dist.cdf(self.y_input) / tf.reduce_prod(self.std_y_sym), axis=1)
            else:
                self.pdf_ = tf.squeeze(target_dist.prob(self.y_input), axis=1)
                self.log_pdf_ = tf.squeeze(target_dist.log_prob(self.y_input), axis=1)

After Change


        assert p.shape[0] == X.shape[0], "Shapes should be equal, are {} != {}".format(p.shape[0], X.shape[0])
        return p

    def _build_model(self):
        
        implementation of the flow model
        
        with tf.variable_scope(self.name):
            // adds placeholders, data normalization and data noise to graph as desired
            self.layer_in_x, self.layer_in_y = self._build_input_layers()
            self.y_input = L.get_output(self.layer_in_y)

            flow_classes = [FLOWS[flow_name] for flow_name in self.flows_type]
            // get the individual parameter sizes for each flow
            param_split_sizes = [flow.get_param_size(self.ndim_y) for flow in flow_classes]
            mlp_output_dim = sum(param_split_sizes)
            core_network = MLP(
                name="core_network",
                input_layer=self.layer_in_x,
                output_dim=mlp_output_dim,
                hidden_sizes=self.hidden_sizes,
                hidden_nonlinearity=self.hidden_nonlinearity,
                output_nonlinearity=None,
                weight_normalization=self.weight_normalization
            )
            outputs = L.get_output(core_network.output_layer)
            flow_params = tf.split(value=outputs, num_or_size_splits=param_split_sizes, axis=1)

            // instanciate the flows with their parameters
            flows = [flow(params, self.ndim_y) for flow, params in zip(flow_classes, flow_params)]

            // build up the base distribution that will be transformed by the flows
            if self.ndim_y == 1:
                // this is faster for 1-D than the multivariate version
                // it also supports a cdf, which isn"t implemented for Multivariate
                base_dist = tf.distributions.Normal(loc=0., scale=1.)
            else:
                base_dist = tf.contrib.distributions.MultivariateNormalDiag(loc=[0.] * self.ndim_y,
                                                                            scale_diag=[1.] * self.ndim_y)

            // chain the flows together and build the transformed distribution using the base_dist + flows
            // Chaining applies the flows in reverse, Chain([a,b]).forward(x) being a.forward(b.forward(x))
            // We reverse them so the flows are stacked ontop of the base distribution in the original order
            flows.reverse()
            chain = tf.contrib.distributions.bijectors.Chain(flows)
            target_dist = tf.contrib.distributions.TransformedDistribution(distribution=base_dist, bijector=chain)

            // since we operate with matrices not vectors, the output would have dimension (?,1)
            // and therefor has to be reduce first to have shape (?,)
            if self.ndim_y == 1:
                // for x shape (batch_size, 1) normal_distribution.pdf(x) outputs shape (batch_size, 1) -> squeeze
                self.pdf_ = tf.squeeze(target_dist.prob(self.y_input), axis=1)
                self.log_pdf_ = tf.squeeze(target_dist.log_prob(self.y_input), axis=1)
Italian Trulli
In pattern: SUPERPATTERN

Frequency: 3

Non-data size: 8

Instances


Project Name: freelunchtheorem/Conditional_Density_Estimation
Commit Name: 33d61460bcda81161f0254b074c3a0eda3ce70ee
Time: 2019-05-02
Author: simonboehm@mailbox.org
File Name: cde/density_estimator/NF.py
Class Name: NormalizingFlowEstimator
Method Name: _build_model


Project Name: freelunchtheorem/Conditional_Density_Estimation
Commit Name: 33d61460bcda81161f0254b074c3a0eda3ce70ee
Time: 2019-05-02
Author: simonboehm@mailbox.org
File Name: cde/density_estimator/NF.py
Class Name: NormalizingFlowEstimator
Method Name: _build_model


Project Name: pymc-devs/pymc3
Commit Name: 21c16153ecd473a027df2af1e9a4fd3c71810e1a
Time: 2017-04-14
Author: maxim.v.kochurov@gmail.com
File Name: pymc3/variational/callbacks.py
Class Name: CheckLossConvergence
Method Name: __call__


Project Name: pymc-devs/pymc3
Commit Name: d493caa1278c158b78aa02c8f23d4f56c311f975
Time: 2017-04-14
Author: maxim.v.kochurov@gmail.com
File Name: pymc3/variational/callbacks.py
Class Name: CheckLossConvergence1
Method Name: __call__