Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

SG_ADD refactor #4417

Merged
merged 7 commits into from
Nov 21, 2018
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 31 additions & 3 deletions src/shogun/base/SGObject.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,10 +57,36 @@ template <class T> class SGStringList;
#define SG_UNREF_NO_NULL(x) { if (x) { (x)->unref(); } }

/*******************************************************************************
* Macros for registering parameters/model selection parameters
* Macros for registering parameter properties
******************************************************************************/

#define SG_ADD(param, name, description, param_properties) \
#ifdef _MSC_VER

#define VA_NARGS(...) INTERNAL_EXPAND_ARGS_PRIVATE(INTERNAL_ARGS_AUGMENTER(__VA_ARGS__))
#define INTERNAL_ARGS_AUGMENTER(...) unused, __VA_ARGS__
#define INTERNAL_EXPAND(x) x
#define INTERNAL_EXPAND_ARGS_PRIVATE(...) INTERNAL_EXPAND(INTERNAL_GET_ARG_COUNT_PRIVATE(__VA_ARGS__, 4, 3, 2, 1, 0))
#define INTERNAL_GET_ARG_COUNT_PRIVATE(_0_, _1_, _2_, _3_, _4_, count, ...) count

#else

#define VA_NARGS_IMPL(_1, _2, _3, _4, N, ...) N
#define VA_NARGS(...) VA_NARGS_IMPL(__VA_ARGS__, 4, 3, 2, 1)

#endif

#define VARARG_IMPL2(base, count, ...) base##count(__VA_ARGS__)
#define VARARG_IMPL(base, count, ...) VARARG_IMPL2(base, count, __VA_ARGS__)
#define VARARG(base, ...) VARARG_IMPL(base, VA_NARGS(__VA_ARGS__), __VA_ARGS__)

#define SG_ADD3(param, name, description) \
{ \
this->m_parameters->add(param, name, description); \
this->watch_param( \
name, param, AnyParameterProperties()); \
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yes this is good

}

#define SG_ADD4(param, name, description, param_properties) \
{ \
AnyParameterProperties pprop = \
AnyParameterProperties(description, param_properties); \
Expand All @@ -72,8 +98,10 @@ template <class T> class SGStringList;
this->m_gradient_parameters->add(param, name, description); \
}

#define SG_ADD(...) VARARG(SG_ADD, __VA_ARGS__)

/*******************************************************************************
* End of macros for registering parameters/model selection parameters
* End of macros for registering parameter properties
******************************************************************************/

/** @brief Class SGObject is the base class of all shogun objects.
Expand Down
4 changes: 2 additions & 2 deletions src/shogun/classifier/LDA.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -49,9 +49,9 @@ void CLDA::init()

SG_ADD(
(machine_int_t*)&m_method, "m_method",
"Method used for LDA calculation", ParameterProperties());
"Method used for LDA calculation");
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yep, much cleaner

SG_ADD(&m_gamma, "m_gamma", "Regularization parameter", ParameterProperties::HYPER);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this is quite nice now

SG_ADD(&m_bdc_svd, "m_bdc_svd", "Use BDC-SVD algorithm", ParameterProperties());
SG_ADD(&m_bdc_svd, "m_bdc_svd", "Use BDC-SVD algorithm");
}

CLDA::~CLDA()
Expand Down
14 changes: 5 additions & 9 deletions src/shogun/classifier/PluginEstimate.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,18 +21,14 @@ CPluginEstimate::CPluginEstimate(float64_t pos_pseudo, float64_t neg_pseudo)
pos_model(NULL), neg_model(NULL), features(NULL)
{
SG_ADD(
&m_pos_pseudo, "pos_pseudo", "pseudo count for positive class",
ParameterProperties());
&m_pos_pseudo, "pos_pseudo", "pseudo count for positive class");
SG_ADD(
&m_neg_pseudo, "neg_pseudo", "pseudo count for negative class",
ParameterProperties());
&m_neg_pseudo, "neg_pseudo", "pseudo count for negative class");
SG_ADD(
&pos_model, "pos_model", "LinearHMM modelling positive class.",
ParameterProperties());
&pos_model, "pos_model", "LinearHMM modelling positive class.");
SG_ADD(
&neg_model, "neg_model", "LinearHMM modelling negative class.",
ParameterProperties());
SG_ADD(&features, "features", "String Features.", ParameterProperties());
&neg_model, "neg_model", "LinearHMM modelling negative class.");
SG_ADD(&features, "features", "String Features.");
}

CPluginEstimate::~CPluginEstimate()
Expand Down
24 changes: 13 additions & 11 deletions src/shogun/classifier/mkl/MKL.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -271,21 +271,23 @@ void CMKL::register_params()
rho = 0;
lp_initialized = false;

SG_ADD((CMachine**)&svm, "svm", "wrapper svm", ParameterProperties());
SG_ADD(&C_mkl, "C_mkl", "C mkl", ParameterProperties());
SG_ADD(&mkl_norm, "mkl_norm", "norm used in mkl", ParameterProperties());
SG_ADD(&ent_lambda, "ent_lambda", "elastic net sparsity trade-off parameter", ParameterProperties());
SG_ADD(&mkl_block_norm, "mkl_block_norm", "mkl sparse trade-off parameter", ParameterProperties());
SG_ADD((CMachine**)&svm, "svm", "wrapper svm");
SG_ADD(&C_mkl, "C_mkl", "C mkl", ParameterProperties::HYPER);
SG_ADD(&mkl_norm, "mkl_norm", "norm used in mkl");
SG_ADD(&ent_lambda, "ent_lambda", "elastic net sparsity trade-off parameter",
ParameterProperties::HYPER);
SG_ADD(&mkl_block_norm, "mkl_block_norm", "mkl sparse trade-off parameter",
ParameterProperties::HYPER);

m_parameters->add_vector(&beta_local, &beta_local_size, "beta_local", "subkernel weights on L1 term of elastic net mkl");
watch_param("beta_local", &beta_local, &beta_local_size);

SG_ADD(&mkl_iterations, "mkl_iterations", "number of mkl steps", ParameterProperties());
SG_ADD(&mkl_epsilon, "mkl_epsilon", "mkl epsilon", ParameterProperties());
SG_ADD(&interleaved_optimization, "interleaved_optimization", "whether to use mkl wrapper or interleaved opt.", ParameterProperties());
SG_ADD(&w_gap, "w_gap", "gap between interactions", ParameterProperties());
SG_ADD(&rho, "rho", "objective after mkl iterations", ParameterProperties());
SG_ADD(&lp_initialized, "lp_initialized", "if lp is Initialized", ParameterProperties());
SG_ADD(&mkl_iterations, "mkl_iterations", "number of mkl steps");
SG_ADD(&mkl_epsilon, "mkl_epsilon", "mkl epsilon");
SG_ADD(&interleaved_optimization, "interleaved_optimization", "whether to use mkl wrapper or interleaved opt.");
SG_ADD(&w_gap, "w_gap", "gap between interactions");
SG_ADD(&rho, "rho", "objective after mkl iterations");
SG_ADD(&lp_initialized, "lp_initialized", "if lp is Initialized");
// Missing: self (3rd party specific, handled in clone())
}

Expand Down
11 changes: 5 additions & 6 deletions src/shogun/classifier/svm/LibLinear.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,15 +53,14 @@ void CLibLinear::init()
SG_ADD(&C1, "C1", "C Cost constant 1.", ParameterProperties::HYPER);
SG_ADD(&C2, "C2", "C Cost constant 2.", ParameterProperties::HYPER);
SG_ADD(
&use_bias, "use_bias", "Indicates if bias is used.", ParameterProperties());
SG_ADD(&epsilon, "epsilon", "Convergence precision.", ParameterProperties());
&use_bias, "use_bias", "Indicates if bias is used.");
SG_ADD(&epsilon, "epsilon", "Convergence precision.");
SG_ADD(
&max_iterations, "max_iterations", "Max number of iterations.",
ParameterProperties());
SG_ADD(&m_linear_term, "linear_term", "Linear Term", ParameterProperties());
&max_iterations, "max_iterations", "Max number of iterations.");
SG_ADD(&m_linear_term, "linear_term", "Linear Term");
SG_ADD(
(machine_int_t*)&liblinear_solver_type, "liblinear_solver_type",
"Type of LibLinear solver.", ParameterProperties());
"Type of LibLinear solver.");
}

CLibLinear::~CLibLinear()
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/classifier/svm/LibSVM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ CLibSVM::~CLibSVM()

void CLibSVM::register_params()
{
SG_ADD((machine_int_t*) &solver_type, "libsvm_solver_type", "LibSVM Solver type", ParameterProperties());
SG_ADD((machine_int_t*) &solver_type, "libsvm_solver_type", "LibSVM Solver type");
}

bool CLibSVM::train_machine(CFeatures* data)
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/classifier/svm/OnlineLibLinear.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ void COnlineLibLinear::init()
SG_ADD(&C1, "C1", "C Cost constant 1.", ParameterProperties::HYPER);
SG_ADD(&C2, "C2", "C Cost constant 2.", ParameterProperties::HYPER);
SG_ADD(
&use_bias, "use_bias", "Indicates if bias is used.", ParameterProperties());
&use_bias, "use_bias", "Indicates if bias is used.");

PG = 0;
PGmax_old = CMath::INFTY;
Expand Down
14 changes: 7 additions & 7 deletions src/shogun/classifier/svm/OnlineSVMSGD.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -209,14 +209,14 @@ void COnlineSVMSGD::init()
SG_ADD(&C1, "C1", "Cost constant 1.", ParameterProperties::HYPER);
SG_ADD(&C2, "C2", "Cost constant 2.", ParameterProperties::HYPER);
SG_ADD(&lambda, "lambda", "Regularization parameter.", ParameterProperties::HYPER);
SG_ADD(&wscale, "wscale", "W scale", ParameterProperties());
SG_ADD(&bscale, "bscale", "b scale", ParameterProperties());
SG_ADD(&epochs, "epochs", "epochs", ParameterProperties());
SG_ADD(&skip, "skip", "skip", ParameterProperties());
SG_ADD(&count, "count", "count", ParameterProperties());
SG_ADD(&wscale, "wscale", "W scale");
SG_ADD(&bscale, "bscale", "b scale");
SG_ADD(&epochs, "epochs", "epochs");
SG_ADD(&skip, "skip", "skip");
SG_ADD(&count, "count", "count");
SG_ADD(
&use_bias, "use_bias", "Indicates if bias is used.", ParameterProperties());
&use_bias, "use_bias", "Indicates if bias is used.");
SG_ADD(
&use_regularized_bias, "use_regularized_bias",
"Indicates if bias is regularized.", ParameterProperties());
"Indicates if bias is regularized.");
}
4 changes: 2 additions & 2 deletions src/shogun/classifier/svm/SGDQN.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -229,6 +229,6 @@ void CSGDQN::init()
SG_ADD(&C1, "C1", "Cost constant 1.", ParameterProperties::HYPER);
SG_ADD(&C2, "C2", "Cost constant 2.", ParameterProperties::HYPER);
SG_ADD(&epochs, "epochs", "epochs", ParameterProperties::HYPER);
SG_ADD(&skip, "skip", "skip", ParameterProperties());
SG_ADD(&count, "count", "count", ParameterProperties());
SG_ADD(&skip, "skip", "skip");
SG_ADD(&count, "count", "count");
}
15 changes: 6 additions & 9 deletions src/shogun/classifier/svm/SVM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -42,19 +42,16 @@ void CSVM::set_defaults(int32_t num_sv)
{
SG_ADD(&C1, "C1", "", ParameterProperties::HYPER);
SG_ADD(&C2, "C2", "", ParameterProperties::HYPER);
SG_ADD(&svm_loaded, "svm_loaded", "SVM is loaded.", ParameterProperties());
SG_ADD(&svm_loaded, "svm_loaded", "SVM is loaded.");
SG_ADD(&epsilon, "epsilon", "", ParameterProperties::HYPER);
SG_ADD(&tube_epsilon, "tube_epsilon",
"Tube epsilon for support vector regression.", ParameterProperties::HYPER);
SG_ADD(&nu, "nu", "", ParameterProperties::HYPER);
SG_ADD(&objective, "objective", "", ParameterProperties());
SG_ADD(&qpsize, "qpsize", "", ParameterProperties());
SG_ADD(&use_shrinking, "use_shrinking", "Shrinking shall be used.",
ParameterProperties());
SG_ADD((CSGObject**) &mkl, "mkl", "MKL object that svm optimizers need.",
ParameterProperties());
SG_ADD(&m_linear_term, "linear_term", "Linear term in qp.",
ParameterProperties());
SG_ADD(&objective, "objective", "");
SG_ADD(&qpsize, "qpsize", "");
SG_ADD(&use_shrinking, "use_shrinking", "Shrinking shall be used.");
SG_ADD((CSGObject**) &mkl, "mkl", "MKL object that svm optimizers need.");
SG_ADD(&m_linear_term, "linear_term", "Linear term in qp.");

callback=NULL;
mkl=NULL;
Expand Down
10 changes: 4 additions & 6 deletions src/shogun/classifier/svm/SVMOcas.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -348,14 +348,12 @@ void CSVMOcas::init()
SG_ADD(&C1, "C1", "Cost constant 1.", ParameterProperties::HYPER);
SG_ADD(&C2, "C2", "Cost constant 2.", ParameterProperties::HYPER);
SG_ADD(
&use_bias, "use_bias", "Indicates if bias is used.", ParameterProperties());
SG_ADD(&epsilon, "epsilon", "Convergence precision.", ParameterProperties());
&use_bias, "use_bias", "Indicates if bias is used.");
SG_ADD(&epsilon, "epsilon", "Convergence precision.");
SG_ADD(
&bufsize, "bufsize", "Maximum number of cutting planes.",
ParameterProperties());
&bufsize, "bufsize", "Maximum number of cutting planes.");
SG_ADD(
(machine_int_t*)&method, "method", "SVMOcas solver type.",
ParameterProperties());
(machine_int_t*)&method, "method", "SVMOcas solver type.");
}

float64_t CSVMOcas::compute_primal_objective() const
Expand Down
3 changes: 1 addition & 2 deletions src/shogun/clustering/GMM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -826,6 +826,5 @@ void CGMM::register_params()
//TODO serialization broken
//m_parameters->add((SGVector<CSGObject*>*) &m_components, "m_components", "Mixture components");
SG_ADD(
&m_coefficients, "m_coefficients", "Mixture coefficients.",
ParameterProperties());
&m_coefficients, "m_coefficients", "Mixture coefficients.");
}
4 changes: 2 additions & 2 deletions src/shogun/clustering/KMeansBase.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -361,8 +361,8 @@ void CKMeansBase::init()
use_kmeanspp=false;
SG_ADD(&max_iter, "max_iter", "Maximum number of iterations", ParameterProperties::HYPER);
SG_ADD(&k, "k", "k, the number of clusters", ParameterProperties::HYPER);
SG_ADD(&dimensions, "dimensions", "Dimensions of data", ParameterProperties());
SG_ADD(&R, "radiuses", "Cluster radiuses", ParameterProperties());
SG_ADD(&dimensions, "dimensions", "Dimensions of data");
SG_ADD(&R, "radiuses", "Cluster radiuses");

watch_method("cluster_centers", &CKMeansBase::get_cluster_centers);
}
Expand Down
3 changes: 1 addition & 2 deletions src/shogun/clustering/KMeansMiniBatch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -140,8 +140,7 @@ void CKMeansMiniBatch::init_mb_params()
batch_size=-1;

SG_ADD(
&batch_size, "batch_size", "batch size for mini-batch KMeans",
ParameterProperties());
&batch_size, "batch_size", "batch size for mini-batch KMeans");
}

bool CKMeansMiniBatch::train_machine(CFeatures* data)
Expand Down
4 changes: 2 additions & 2 deletions src/shogun/converter/FactorAnalysis.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@ CFactorAnalysis::CFactorAnalysis() :

void CFactorAnalysis::init()
{
SG_ADD(&m_max_iteration, "max_iteration", "maximum number of iterations", ParameterProperties());
SG_ADD(&m_epsilon, "epsilon", "convergence parameter", ParameterProperties());
SG_ADD(&m_max_iteration, "max_iteration", "maximum number of iterations");
SG_ADD(&m_epsilon, "epsilon", "convergence parameter");
}

CFactorAnalysis::~CFactorAnalysis()
Expand Down
14 changes: 5 additions & 9 deletions src/shogun/converter/HashedDocConverter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -57,15 +57,11 @@ void CHashedDocConverter::init(CTokenizer* tzer, int32_t hash_bits, bool normali
tokenizer = tzer;

SG_REF(tokenizer);
SG_ADD(&num_bits, "num_bits", "Number of bits of the hash",
ParameterProperties());
SG_ADD(&ngrams, "ngrams", "Number of consecutive tokens",
ParameterProperties());
SG_ADD(&tokens_to_skip, "tokens_to_skip", "Number of tokens to skip",
ParameterProperties());
SG_ADD(&should_normalize, "should_normalize", "Whether to normalize vectors or not",
ParameterProperties());
SG_ADD(&tokenizer, "tokenizer", "Tokenizer", ParameterProperties());
SG_ADD(&num_bits, "num_bits", "Number of bits of the hash");
SG_ADD(&ngrams, "ngrams", "Number of consecutive tokens");
SG_ADD(&tokens_to_skip, "tokens_to_skip", "Number of tokens to skip");
SG_ADD(&should_normalize, "should_normalize", "Whether to normalize vectors or not");
SG_ADD(&tokenizer, "tokenizer", "Tokenizer");
}

const char* CHashedDocConverter::get_name() const
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/converter/LocallyLinearEmbedding.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ void CLocallyLinearEmbedding::init()
SG_ADD(&m_nullspace_shift, "nullspace_shift",
"nullspace finding regularization shift",ParameterProperties());
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

missed one here :)

SG_ADD(&m_reconstruction_shift, "reconstruction_shift",
"shift used to regularize reconstruction step", ParameterProperties());
"shift used to regularize reconstruction step");
}


Expand Down
4 changes: 2 additions & 2 deletions src/shogun/converter/ManifoldSculpting.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,11 @@ CManifoldSculpting::CManifoldSculpting() :

void CManifoldSculpting::init()
{
SG_ADD(&m_k, "k", "number of neighbors", ParameterProperties());
SG_ADD(&m_k, "k", "number of neighbors");
SG_ADD(&m_squishing_rate, "quishing_rate",
"squishing rate",ParameterProperties());
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

and here

SG_ADD(&m_max_iteration, "max_iteration",
"maximum number of algorithm's iterations", ParameterProperties());
"maximum number of algorithm's iterations");
}

CManifoldSculpting::~CManifoldSculpting()
Expand Down
5 changes: 2 additions & 3 deletions src/shogun/converter/MultidimensionalScaling.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,10 +29,9 @@ CMultidimensionalScaling::CMultidimensionalScaling() : CEmbeddingConverter()

void CMultidimensionalScaling::init()
{
SG_ADD(&m_eigenvalues, "eigenvalues", "eigenvalues of last embedding",
ParameterProperties());
SG_ADD(&m_eigenvalues, "eigenvalues", "eigenvalues of last embedding");
SG_ADD(&m_landmark, "landmark",
"indicates if landmark approximation should be used", ParameterProperties());
"indicates if landmark approximation should be used");
SG_ADD(&m_landmark_number, "landmark_number",
"the number of landmarks for approximation", ParameterProperties::HYPER);
}
Expand Down
11 changes: 4 additions & 7 deletions src/shogun/converter/StochasticProximityEmbedding.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,13 +27,10 @@ CStochasticProximityEmbedding::CStochasticProximityEmbedding() :

void CStochasticProximityEmbedding::init()
{
SG_ADD(&m_k, "m_k", "Number of neighbors", ParameterProperties());
SG_ADD((machine_int_t*) &m_strategy, "m_strategy", "SPE strategy",
ParameterProperties());
SG_ADD(&m_tolerance, "m_tolerance", "Regularization parameter",
ParameterProperties());
SG_ADD(&m_max_iteration, "max_iteration", "maximum number of iterations",
ParameterProperties());
SG_ADD(&m_k, "m_k", "Number of neighbors");
SG_ADD((machine_int_t*) &m_strategy, "m_strategy", "SPE strategy");
SG_ADD(&m_tolerance, "m_tolerance", "Regularization parameter");
SG_ADD(&m_max_iteration, "max_iteration", "maximum number of iterations");
}

CStochasticProximityEmbedding::~CStochasticProximityEmbedding()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@ CTDistributedStochasticNeighborEmbedding::CTDistributedStochasticNeighborEmbeddi

void CTDistributedStochasticNeighborEmbedding::init()
{
SG_ADD(&m_perplexity, "perplexity", "perplexity", ParameterProperties());
SG_ADD(&m_theta, "theta", "learning rate", ParameterProperties());
SG_ADD(&m_perplexity, "perplexity", "perplexity");
SG_ADD(&m_theta, "theta", "learning rate");
}

CTDistributedStochasticNeighborEmbedding::~CTDistributedStochasticNeighborEmbedding()
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/converter/ica/FastICA.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ CFastICA::CFastICA() : CICAConverter()
void CFastICA::init()
{
whiten = true;
SG_ADD(&whiten, "whiten", "flag indicating whether to whiten the data", ParameterProperties());
SG_ADD(&whiten, "whiten", "flag indicating whether to whiten the data");
}

CFastICA::~CFastICA()
Expand Down
6 changes: 3 additions & 3 deletions src/shogun/converter/ica/ICAConverter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,9 @@ void CICAConverter::init()
max_iter = 200;
tol = 1e-6;

SG_ADD(&m_mixing_matrix, "mixing_matrix", "the mixing matrix", ParameterProperties());
SG_ADD(&max_iter, "max_iter", "maximum number of iterations", ParameterProperties());
SG_ADD(&tol, "tol", "the convergence tolerance", ParameterProperties());
SG_ADD(&m_mixing_matrix, "mixing_matrix", "the mixing matrix");
SG_ADD(&max_iter, "max_iter", "maximum number of iterations");
SG_ADD(&tol, "tol", "the convergence tolerance");
}

CICAConverter::~CICAConverter()
Expand Down
2 changes: 1 addition & 1 deletion src/shogun/converter/ica/Jade.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ CJade::CJade() : CICAConverter()
void CJade::init()
{
m_cumulant_matrix = SGMatrix<float64_t>();
SG_ADD(&m_cumulant_matrix, "cumulant_matrix", "m_cumulant_matrix", ParameterProperties());
SG_ADD(&m_cumulant_matrix, "cumulant_matrix", "m_cumulant_matrix");
}

CJade::~CJade()
Expand Down
Loading