# Setting the relations among variables in a graph where each node is a MoG (Migrated from community.research.microsoft.com) • ### Question

• JaviOliver posted on 03-07-2011 12:38 PM

-         I have the DAG shown in the attached figure (basically: x1 -> x2, where each node is itself a MoG (3 dim (x,y,z), N components) )

-         Node2 is related to Node1 in the following way:

o       Both have the same number of Mixture Components

o       The relation between both nodes is on the random var means:

For each Mixture Component of Node2, its meansNode2 = f( meansNode1 ), where f is a basic factor such as: sum, product...

-         As a simplification of the problem, I am considering the case where the distribution of means of Node2 is the same as Node1, but displaced by the constant vector K: meansNode2 = meansNode1 + K. The problem is that operator '+' does not work for the data types: VariableArray<Vector>...

-         Therefore, I have tried to decompose the mean of each Mixture component into 3 independent random vars (one for each dimension) with type: Variable<double>, but then I do not know how to combine them to compose the Variable<Vector> mean that I need to create the MoG.

-         In the real problem, I would like each Mixture component of node2 to have a mean which is a combination of the mean parameters from the node1, as depicted in the attached figure...

Is it possible to do that? Any other approach to tackle the problem?

Javier Friday, June 3, 2011 6:32 PM

• JaviOliver replied on 03-11-2011 11:13 AM

Thanks Tom.

When debugging, I see that  z_n__cases_B and the first element of z_uses_B[n]  are nan.

The critical function  that makes them  nan seems to be the following, but I do not understand what is happening...

for(int n = 0; n<100; n++) {

// Message to 'z_n__cases' from ReplicateWithMarginal factor

z_n__cases_B[n] = ReplicateOp.DefAverageLogarithm<DistributionStructArray<Bernoulli,bool>>(z_n__cases_uses_B[n], z_n__cases_B[n]);

}

Here is the code:

int ObservedNode = 2; // = 1 (Node1 is observed) or = 2 (Node2 is observed)

Range k = new Range(2).Named("k");

Range d = new Range(3).Named("d");

///////////////////////

// Node 1

///////////////////////

// Mixture component means

var means1 = Variable.Array(Variable.Array<double>(d), k);

means1[k][d] = Variable.GaussianFromMeanAndPrecision(2.0, 0.01).ForEach(k, d);

VariableArray<Vector> vmeans1 = Variable.Array<Vector>(k).Named("vmeans1");

vmeans1[k] = Variable.Vector(means1[k]);

// Mixture component precisions

VariableArray<PositiveDefiniteMatrix> precs = Variable.Array<PositiveDefiniteMatrix>(k).Named("precs");

precs[k] = Variable.WishartFromShapeAndScale(100.0, PositiveDefiniteMatrix.IdentityScaledBy(d.SizeAsInt, 0.01)).ForEach(k);

// Mixture weights

Variable<Vector> weights = Variable.Dirichlet(k, new double[] { 1, 1 }).Named("weights");

// Create a variable array which will hold the data

Range n = new Range(100).Named("n");

VariableArray<Vector> data = Variable.Array<Vector>(n).Named("x");

// Create latent indicator variable for each data point

VariableArray<int> z = Variable.Array<int>(n).Named("z");

// The mixture of Gaussians model

using (Variable.ForEach(n))

{

z[n] = Variable.Discrete(weights);

using (Variable.Switch(z[n]))

{

data[n] = Variable.VectorGaussianFromMeanAndPrecision(vmeans1[z[n]], precs[z[n]]);

}

}

// Attach some generated data

if( ObservedNode == 1 )

data.ObservedValue = GenerateData(n.SizeAsInt);

// Initialise messages randomly so as to break symmetry

Discrete[] zinit = new Discrete[n.SizeAsInt];

for (int i = 0; i < zinit.Length; i++)

zinit[i] = Discrete.PointMass(Rand.Int(k.SizeAsInt), k.SizeAsInt);

z.InitialiseTo(Distribution<int>.Array(zinit));

/////////////////////////////

// Node 2

/////////////////////////////

// Mixture component means

var means2 = Variable.Array(Variable.Array<double>(d), k);

// Set the relation among variables

// means2[k][d] = means1[k][d] + 1.0;      // This works!

means2 = means1 + 1.0;         // This not...

means2 = means1 + 1.0;

means2 = means1 + 1.0;

means2 = means1 + 1.0;

means2 = means1 + 1.0;

means2 = means1 + 1.0;

VariableArray<Vector> vmeans2 = Variable.Array<Vector>(k).Named("vmeans2");

vmeans2[k] = Variable.Vector(means2[k]);

// Mixture component precisions

VariableArray<PositiveDefiniteMatrix> precs2 = Variable.Array<PositiveDefiniteMatrix>(k).Named("precs2");

precs2[k] = Variable.WishartFromShapeAndScale(100.0, PositiveDefiniteMatrix.IdentityScaledBy(d.SizeAsInt, 0.01)).ForEach(k);

// Mixture weights

Variable<Vector> weights2 = Variable.Dirichlet(k, new double[] { 1, 1 }).Named("weights2");

// Create a variable array which will hold the data

Range n2 = new Range(100).Named("n2");

VariableArray<Vector> data2 = Variable.Array<Vector>(n2).Named("x2");

// Create latent indicator variable for each data point

VariableArray<int> z2 = Variable.Array<int>(n2).Named("z2");

// The mixture of Gaussians model

using (Variable.ForEach(n2))

{

z2[n2] = Variable.Discrete(weights2);

using (Variable.Switch(z2[n2]))

{

data2[n2] = Variable.VectorGaussianFromMeanAndPrecision(vmeans2[z2[n2]], precs2[z2[n2]]);

}

}

// Attach some generated data

if( ObservedNode == 2 )

data2.ObservedValue = GenerateData(n2.SizeAsInt);

// Initialise messages randomly so as to break symmetry

Discrete[] zinit2 = new Discrete[n2.SizeAsInt];

for (int i = 0; i < zinit2.Length; i++)

zinit2[i] = Discrete.PointMass(Rand.Int(k.SizeAsInt), k.SizeAsInt);

z2.InitialiseTo(Distribution<int>.Array(zinit2));

/////////////////////////////

// The inference

InferenceEngine ie = new InferenceEngine(new VariationalMessagePassing());

ie.NumberOfIterations = 50;

//ie.Compiler.GenerateInMemory = false;

//ie.Compiler.WriteSourceFiles = true;

//ie.Compiler.IncludeDebugInformation = true;

if (!(ie.Algorithm is ExpectationPropagation))

{

ie.InferAll(weights, vmeans1, precs, weights2, vmeans2, precs2);

Console.WriteLine("Dist over pi=" + ie.Infer(weights));

Console.WriteLine("Dist over means=\n" + ie.Infer(vmeans1));

Console.WriteLine("Dist over precs=\n" + ie.Infer(precs));

Console.WriteLine("Dist over pi=" + ie.Infer(weights2));

Console.WriteLine("Dist over means=\n" + ie.Infer(vmeans2));

Console.WriteLine("Dist over precs=\n" + ie.Infer(precs2));

}

else

Console.WriteLine("This example is not supported by Expectation Propagation");

}

/// <summary>

/// Generates a data set from a particular true model.

/// </summary>

public Vector[] GenerateData(int nData)

{

Vector trueM1 = Vector.FromArray(2.0, 3.0, 4.0);

Vector trueM2 = Vector.FromArray(7.0, 5.0, 5.0);

double[] auxP1 = {  3.0, 0.2, 0.2,

0.2, 2.0, 0.3,

0.2, 0.3, 2.5

};

double[] auxP2 = {  2.0, 0.4, 0.4,

0.4, 1.5, 0.1,

0.4, 0.1, 2.0

};

Matrix MatrixP1 = new Matrix(3, 3, auxP1);

Matrix MatrixP2 = new Matrix(3, 3, auxP2);

PositiveDefiniteMatrix trueP1 = new PositiveDefiniteMatrix(MatrixP1);

PositiveDefiniteMatrix trueP2 = new PositiveDefiniteMatrix(MatrixP2);

VectorGaussian trueVG1 = VectorGaussian.FromMeanAndPrecision(trueM1, trueP1);

VectorGaussian trueVG2 = VectorGaussian.FromMeanAndPrecision(trueM2, trueP2);

double truePi = 0.6;

Bernoulli trueB = new Bernoulli(truePi);

// Restart the infer.NET random number generator

Rand.Restart(12347);

Vector[] data = new Vector[nData];

for (int j = 0; j < nData; j++)

{

bool bSamp = trueB.Sample();

data[j] = bSamp ? trueVG1.Sample() : trueVG2.Sample();

}

return data;

}

Friday, June 3, 2011 6:33 PM

### All replies

• John Guiver replied on 03-08-2011 10:19 AM

Hi Javi

You can get convert from Vector variables to array variables as follows. This may get you some of the way, but you may have to end up implementing your own factor.

John

Range k = new Range(2);
Range d = new Range(3);
var means1 = Variable.Array(Variable.Array<double>(d), k);
var means2 = Variable.Array(Variable.Array<double>(d), k);
means1[k][d] =
Variable.GaussianFromMeanAndPrecision(0.0, 0.01).ForEach(k, d);
means2[k][d] = means1[k][d] + 1.0;

VariableArray<Vector> vmeans1 = Variable.Array<Vector>(k));
VariableArray<Vector> vmeans2 = Variable.Array<Vector>(k).Named("vmeans2");
vmeans1[k] =
Variable.Vector(means1[k]);
vmeans2[k] =
Variable.Vector(means2[k]);

Friday, June 3, 2011 6:32 PM
• JaviOliver replied on 03-09-2011 4:42 AM

Thanks John for your quick reply. It has helped me a lot!

Javi

Friday, June 3, 2011 6:32 PM
• JaviOliver replied on 03-11-2011 4:11 AM

Hi John,

Following with the example of the picture, when I set the relation between means as:

means2[k][d] = means1[k][d] + 1.0;

the Inference is performed perfect when the observed data is Node1 and also when is Node2.

However, if I set the relations as:
means2 = means1 + 1.0;
means2 = means1 + 1.0;
means2 = means1 + 1.0;

means2 = means1 + 1.0;
means2 = means1 + 1.0;
means2 = means1 + 1.0;

when Node1 is observed, the Inference is done well, but when Node2 is observed, I have the following error:

System.DivideByZeroException at Infer.Runtime.dll  (attempt to divide by zero)

The exception is found at: Model_VMP.cs,

for(int n = 0; n<100; n++) {

// Message to 'z_uses' from CasesInt factor

z_uses_B[n] = IntCasesOp.IAverageLogarithm(z_n__cases_B[n], z_uses_B[n]);

}

What am I doing wrong?

Thanks!

Friday, June 3, 2011 6:32 PM
• minka replied on 03-11-2011 4:56 AM

If you send us the full model, we can try to reproduce this problem.  Otherwise, you can enable debugging and when Visual Studio stops on this error, inspect the first argument to IAverageLogarithm.  My guess is that one element is Infinity or NaN.  You can trace back the source of this NaN in the generated code and see what function it came from.  Perhaps one of the covariance matrices is singular?

Friday, June 3, 2011 6:32 PM
• JaviOliver replied on 03-11-2011 11:06 AM

Thanks Tom.

I found that z_n__cases_B and the first element of z_uses_B[n] are nan. I am trying to find the cause... I see that this is the critical function but do not understand what is happening...

for(int n = 0; n<100; n++) {
// Message to 'z_n__cases' from ReplicateWithMarginal factor
z_n__cases_B[n] = ReplicateOp.DefAverageLogarithm<DistributionStructArray<Bernoulli,bool>>(z_n__cases_uses_B[n], z_n__cases_B[n]);
}

Here is the code:

int ObservedNode = 2; // = 1 (Node1 is observed) or = 2 (Node2 is observed)

Range k = new Range(2).Named("k");

Range d = new Range(3).Named("d");

///////////////////////

// Node 1

///////////////////////

// Mixture component means

var means1 = Variable.Array(Variable.Array<double>(d), k);

means1[k][d] = Variable.GaussianFromMeanAndPrecision(2.0, 0.01).ForEach(k, d);

VariableArray<Vector> vmeans1 = Variable.Array<Vector>(k).Named("vmeans1");

vmeans1[k] = Variable.Vector(means1[k]);

// Mixture component precisions

VariableArray<PositiveDefiniteMatrix> precs = Variable.Array<PositiveDefiniteMatrix>(k).Named("precs");

precs[k] = Variable.WishartFromShapeAndScale(100.0, PositiveDefiniteMatrix.IdentityScaledBy(d.SizeAsInt, 0.01)).ForEach(k);

// Mixture weights

Variable<Vector> weights = Variable.Dirichlet(k, new double[] { 1, 1 }).Named("weights");

// Create a variable array which will hold the data

Range n = new Range(100).Named("n");

VariableArray<Vector> data = Variable.Array<Vector>(n).Named("x");

// Create latent indicator variable for each data point

VariableArray<int> z = Variable.Array<int>(n).Named("z");

// The mixture of Gaussians model

using (Variable.ForEach(n))

{

z[n] = Variable.Discrete(weights);

using (Variable.Switch(z[n]))

{

data[n] = Variable.VectorGaussianFromMeanAndPrecision(vmeans1[z[n]], precs[z[n]]);

}

}

// Attach some generated data

if( ObservedNode == 1 )

data.ObservedValue = GenerateData(n.SizeAsInt);

// Initialise messages randomly so as to break symmetry

Discrete[] zinit = new Discrete[n.SizeAsInt];

for (int i = 0; i < zinit.Length; i++)

zinit[i] = Discrete.PointMass(Rand.Int(k.SizeAsInt), k.SizeAsInt);

z.InitialiseTo(Distribution<int>.Array(zinit));

/////////////////////////////

// Node 2

/////////////////////////////

// Mixture component means

var means2 = Variable.Array(Variable.Array<double>(d), k);

// Set the relation among variables

// means2[k][d] = means1[k][d] + 1.0;      // This works!

means2 = means1 + 1.0;         // This not...

means2 = means1 + 1.0;

means2 = means1 + 1.0;

means2 = means1 + 1.0;

means2 = means1 + 1.0;

means2 = means1 + 1.0;

VariableArray<Vector> vmeans2 = Variable.Array<Vector>(k).Named("vmeans2");

vmeans2[k] = Variable.Vector(means2[k]);

// Mixture component precisions

VariableArray<PositiveDefiniteMatrix> precs2 = Variable.Array<PositiveDefiniteMatrix>(k).Named("precs2");

precs2[k] = Variable.WishartFromShapeAndScale(100.0, PositiveDefiniteMatrix.IdentityScaledBy(d.SizeAsInt, 0.01)).ForEach(k);

// Mixture weights

Variable<Vector> weights2 = Variable.Dirichlet(k, new double[] { 1, 1 }).Named("weights2");

// Create a variable array which will hold the data

Range n2 = new Range(100).Named("n2");

VariableArray<Vector> data2 = Variable.Array<Vector>(n2).Named("x2");

// Create latent indicator variable for each data point

VariableArray<int> z2 = Variable.Array<int>(n2).Named("z2");

// The mixture of Gaussians model

using (Variable.ForEach(n2))

{

z2[n2] = Variable.Discrete(weights2);

using (Variable.Switch(z2[n2]))

{

data2[n2] = Variable.VectorGaussianFromMeanAndPrecision(vmeans2[z2[n2]], precs2[z2[n2]]);

}

}

// Attach some generated data

if( ObservedNode == 2 )

data2.ObservedValue = GenerateData(n2.SizeAsInt);

// Initialise messages randomly so as to break symmetry

Discrete[] zinit2 = new Discrete[n2.SizeAsInt];

for (int i = 0; i < zinit2.Length; i++)

zinit2[i] = Discrete.PointMass(Rand.Int(k.SizeAsInt), k.SizeAsInt);

z2.InitialiseTo(Distribution<int>.Array(zinit2));

/////////////////////////////

// The inference

InferenceEngine ie = new InferenceEngine(new VariationalMessagePassing());

ie.NumberOfIterations = 50;

//ie.Compiler.GenerateInMemory = false;

//ie.Compiler.WriteSourceFiles = true;

//ie.Compiler.IncludeDebugInformation = true;

if (!(ie.Algorithm is ExpectationPropagation))

{

ie.InferAll(weights, vmeans1, precs, weights2, vmeans2, precs2);

Console.WriteLine("Dist over pi=" + ie.Infer(weights));

Console.WriteLine("Dist over means=\n" + ie.Infer(vmeans1));

Console.WriteLine("Dist over precs=\n" + ie.Infer(precs));

Console.WriteLine("Dist over pi=" + ie.Infer(weights2));

Console.WriteLine("Dist over means=\n" + ie.Infer(vmeans2));

Console.WriteLine("Dist over precs=\n" + ie.Infer(precs2));

}

else

Console.WriteLine("This example is not supported by Expectation Propagation");

}

/// <summary>

/// Generates a data set from a particular true model.

/// </summary>

public Vector[] GenerateData(int nData)

{

Vector trueM1 = Vector.FromArray(2.0, 3.0, 4.0);

Vector trueM2 = Vector.FromArray(7.0, 5.0, 5.0);

double[] auxP1 = {  3.0, 0.2, 0.2,

0.2, 2.0, 0.3,

0.2, 0.3, 2.5

};

double[] auxP2 = {  2.0, 0.4, 0.4,

0.4, 1.5, 0.1,

0.4, 0.1, 2.0

};

Matrix MatrixP1 = new Matrix(3, 3, auxP1);

Matrix MatrixP2 = new Matrix(3, 3, auxP2);

PositiveDefiniteMatrix trueP1 = new PositiveDefiniteMatrix(MatrixP1);

PositiveDefiniteMatrix trueP2 = new PositiveDefiniteMatrix(MatrixP2);

VectorGaussian trueVG1 = VectorGaussian.FromMeanAndPrecision(trueM1, trueP1);

VectorGaussian trueVG2 = VectorGaussian.FromMeanAndPrecision(trueM2, trueP2);

double truePi = 0.6;

Bernoulli trueB = new Bernoulli(truePi);

// Restart the infer.NET random number generator

Rand.Restart(12347);

Vector[] data = new Vector[nData];

for (int j = 0; j < nData; j++)

{

bool bSamp = trueB.Sample();

data[j] = bSamp ? trueVG1.Sample() : trueVG2.Sample();

}

return data;

}

Friday, June 3, 2011 6:33 PM
• JaviOliver replied on 03-11-2011 11:13 AM

Thanks Tom.

When debugging, I see that  z_n__cases_B and the first element of z_uses_B[n]  are nan.

The critical function  that makes them  nan seems to be the following, but I do not understand what is happening...

for(int n = 0; n<100; n++) {

// Message to 'z_n__cases' from ReplicateWithMarginal factor

z_n__cases_B[n] = ReplicateOp.DefAverageLogarithm<DistributionStructArray<Bernoulli,bool>>(z_n__cases_uses_B[n], z_n__cases_B[n]);

}

Here is the code:

int ObservedNode = 2; // = 1 (Node1 is observed) or = 2 (Node2 is observed)

Range k = new Range(2).Named("k");

Range d = new Range(3).Named("d");

///////////////////////

// Node 1

///////////////////////

// Mixture component means

var means1 = Variable.Array(Variable.Array<double>(d), k);

means1[k][d] = Variable.GaussianFromMeanAndPrecision(2.0, 0.01).ForEach(k, d);

VariableArray<Vector> vmeans1 = Variable.Array<Vector>(k).Named("vmeans1");

vmeans1[k] = Variable.Vector(means1[k]);

// Mixture component precisions

VariableArray<PositiveDefiniteMatrix> precs = Variable.Array<PositiveDefiniteMatrix>(k).Named("precs");

precs[k] = Variable.WishartFromShapeAndScale(100.0, PositiveDefiniteMatrix.IdentityScaledBy(d.SizeAsInt, 0.01)).ForEach(k);

// Mixture weights

Variable<Vector> weights = Variable.Dirichlet(k, new double[] { 1, 1 }).Named("weights");

// Create a variable array which will hold the data

Range n = new Range(100).Named("n");

VariableArray<Vector> data = Variable.Array<Vector>(n).Named("x");

// Create latent indicator variable for each data point

VariableArray<int> z = Variable.Array<int>(n).Named("z");

// The mixture of Gaussians model

using (Variable.ForEach(n))

{

z[n] = Variable.Discrete(weights);

using (Variable.Switch(z[n]))

{

data[n] = Variable.VectorGaussianFromMeanAndPrecision(vmeans1[z[n]], precs[z[n]]);

}

}

// Attach some generated data

if( ObservedNode == 1 )

data.ObservedValue = GenerateData(n.SizeAsInt);

// Initialise messages randomly so as to break symmetry

Discrete[] zinit = new Discrete[n.SizeAsInt];

for (int i = 0; i < zinit.Length; i++)

zinit[i] = Discrete.PointMass(Rand.Int(k.SizeAsInt), k.SizeAsInt);

z.InitialiseTo(Distribution<int>.Array(zinit));

/////////////////////////////

// Node 2

/////////////////////////////

// Mixture component means

var means2 = Variable.Array(Variable.Array<double>(d), k);

// Set the relation among variables

// means2[k][d] = means1[k][d] + 1.0;      // This works!

means2 = means1 + 1.0;         // This not...

means2 = means1 + 1.0;

means2 = means1 + 1.0;

means2 = means1 + 1.0;

means2 = means1 + 1.0;

means2 = means1 + 1.0;

VariableArray<Vector> vmeans2 = Variable.Array<Vector>(k).Named("vmeans2");

vmeans2[k] = Variable.Vector(means2[k]);

// Mixture component precisions

VariableArray<PositiveDefiniteMatrix> precs2 = Variable.Array<PositiveDefiniteMatrix>(k).Named("precs2");

precs2[k] = Variable.WishartFromShapeAndScale(100.0, PositiveDefiniteMatrix.IdentityScaledBy(d.SizeAsInt, 0.01)).ForEach(k);

// Mixture weights

Variable<Vector> weights2 = Variable.Dirichlet(k, new double[] { 1, 1 }).Named("weights2");

// Create a variable array which will hold the data

Range n2 = new Range(100).Named("n2");

VariableArray<Vector> data2 = Variable.Array<Vector>(n2).Named("x2");

// Create latent indicator variable for each data point

VariableArray<int> z2 = Variable.Array<int>(n2).Named("z2");

// The mixture of Gaussians model

using (Variable.ForEach(n2))

{

z2[n2] = Variable.Discrete(weights2);

using (Variable.Switch(z2[n2]))

{

data2[n2] = Variable.VectorGaussianFromMeanAndPrecision(vmeans2[z2[n2]], precs2[z2[n2]]);

}

}

// Attach some generated data

if( ObservedNode == 2 )

data2.ObservedValue = GenerateData(n2.SizeAsInt);

// Initialise messages randomly so as to break symmetry

Discrete[] zinit2 = new Discrete[n2.SizeAsInt];

for (int i = 0; i < zinit2.Length; i++)

zinit2[i] = Discrete.PointMass(Rand.Int(k.SizeAsInt), k.SizeAsInt);

z2.InitialiseTo(Distribution<int>.Array(zinit2));

/////////////////////////////

// The inference

InferenceEngine ie = new InferenceEngine(new VariationalMessagePassing());

ie.NumberOfIterations = 50;

//ie.Compiler.GenerateInMemory = false;

//ie.Compiler.WriteSourceFiles = true;

//ie.Compiler.IncludeDebugInformation = true;

if (!(ie.Algorithm is ExpectationPropagation))

{

ie.InferAll(weights, vmeans1, precs, weights2, vmeans2, precs2);

Console.WriteLine("Dist over pi=" + ie.Infer(weights));

Console.WriteLine("Dist over means=\n" + ie.Infer(vmeans1));

Console.WriteLine("Dist over precs=\n" + ie.Infer(precs));

Console.WriteLine("Dist over pi=" + ie.Infer(weights2));

Console.WriteLine("Dist over means=\n" + ie.Infer(vmeans2));

Console.WriteLine("Dist over precs=\n" + ie.Infer(precs2));

}

else

Console.WriteLine("This example is not supported by Expectation Propagation");

}

/// <summary>

/// Generates a data set from a particular true model.

/// </summary>

public Vector[] GenerateData(int nData)

{

Vector trueM1 = Vector.FromArray(2.0, 3.0, 4.0);

Vector trueM2 = Vector.FromArray(7.0, 5.0, 5.0);

double[] auxP1 = {  3.0, 0.2, 0.2,

0.2, 2.0, 0.3,

0.2, 0.3, 2.5

};

double[] auxP2 = {  2.0, 0.4, 0.4,

0.4, 1.5, 0.1,

0.4, 0.1, 2.0

};

Matrix MatrixP1 = new Matrix(3, 3, auxP1);

Matrix MatrixP2 = new Matrix(3, 3, auxP2);

PositiveDefiniteMatrix trueP1 = new PositiveDefiniteMatrix(MatrixP1);

PositiveDefiniteMatrix trueP2 = new PositiveDefiniteMatrix(MatrixP2);

VectorGaussian trueVG1 = VectorGaussian.FromMeanAndPrecision(trueM1, trueP1);

VectorGaussian trueVG2 = VectorGaussian.FromMeanAndPrecision(trueM2, trueP2);

double truePi = 0.6;

Bernoulli trueB = new Bernoulli(truePi);

// Restart the infer.NET random number generator

Rand.Restart(12347);

Vector[] data = new Vector[nData];

for (int j = 0; j < nData; j++)

{

bool bSamp = trueB.Sample();

data[j] = bSamp ? trueVG1.Sample() : trueVG2.Sample();

}

return data;

}

Friday, June 3, 2011 6:33 PM
• minka replied on 03-14-2011 5:40 AM

Thanks for sending this in.  If you trace further back, you'll see that the problem actually comes from the call to VectorGaussianOp.PrecisionMean, which is using precs before it has been initialized.  This is a bug in the model compiler and will be fixed in the next version.  Until then, you can work around the bug by explicitly initialising precs.  For example, this code initialises with the prior:

precs[k] = Variable.WishartFromShapeAndScale(100.0, PositiveDefiniteMatrix.IdentityScaledBy(d.SizeAsInt, 0.01)).ForEach(k);

precs.InitialiseTo(Distribution<PositiveDefiniteMatrix>.Array(Util.ArrayInit(k.SizeAsInt, i => Wishart.FromShapeAndScale(100.0, PositiveDefiniteMatrix.IdentityScaledBy(d.SizeAsInt, 0.01)))));

Friday, June 3, 2011 6:33 PM