I have used a variety of tools for binary, multiclass and even incremental SVM problems, today I found something quite nice in binary case for libSVM, although potentially a source of confusion.
It is common in machine learning to apply a sigmoid function to normalise the boundaries of a problem, this can by empirically defining the upper and lower bound or through experimentation. Within libSVM they do this through experimentation, that is great to save you some time. The only thing to remember is it means through the use of random and cross validation with small sets of data, you are likely to get different results on each run.
So the function to consider is this:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
// Cross-validation decision values for probability estimates
static void svm_binary_svc_probability(
const svm_problem *prob, const svm_parameter *param,
double Cp, double Cn, double& probA, double& probB)
{
int i;
int nr_fold = 5;
int *perm = Malloc(int,prob->l);
double *dec_values = Malloc(double,prob->l);
// random shuffle
for(i=0;i<prob->l;i++) perm[i]=i;
for(i=0;i<prob->l;i++)
{
int j = i+rand()%(prob->l-i);
swap(perm[i],perm[j]);
}
for(i=0;i<nr_fold;i++)
{
int begin = i*prob->l/nr_fold;
int end = (i+1)*prob->l/nr_fold;
int j,k;
struct svm_problem subprob;
subprob.l = prob->l-(end-begin);
subprob.x = Malloc(struct svm_node*,subprob.l);
subprob.y = Malloc(double,subprob.l);
k=0;
for(j=0;j<begin;j++)
{
subprob.x[k] = prob->x[perm[j]];
subprob.y[k] = prob->y[perm[j]];
++k;
}
for(j=end;j<prob->l;j++)
{
subprob.x[k] = prob->x[perm[j]];
subprob.y[k] = prob->y[perm[j]];
++k;
}
int p_count=0,n_count=0;
for(j=0;j<k;j++)
if(subprob.y[j]>0)
p_count++;
else
n_count++;
if(p_count==0 && n_count==0)
for(j=begin;j<end;j++)
dec_values[perm[j]] = 0;
else if(p_count > 0 && n_count == 0)
for(j=begin;j<end;j++)
dec_values[perm[j]] = 1;
else if(p_count == 0 && n_count > 0)
for(j=begin;j<end;j++)
dec_values[perm[j]] = -1;
else
{
svm_parameter subparam = *param;
subparam.probability=0;
subparam.C=1.0;
subparam.nr_weight=2;
subparam.weight_label = Malloc(int,2);
subparam.weight = Malloc(double,2);
subparam.weight_label[0]=+1;
subparam.weight_label[1]=-1;
subparam.weight[0]=Cp;
subparam.weight[1]=Cn;
struct svm_model *submodel = svm_train(&subprob,&subparam);
for(j=begin;j<end;j++)
{
svm_predict_values(submodel,prob->x[perm[j]],&(dec_values[perm[j]]));
// ensure +1 -1 order; reason not using CV subroutine
dec_values[perm[j]] *= submodel->label[0];
}
svm_free_and_destroy_model(&submodel);
svm_destroy_param(&subparam);
}
free(subprob.x);
free(subprob.y);
}
sigmoid_train(prob->l,dec_values,prob->y,probA,probB);
free(dec_values);
free(perm);
}
So if you have few numbers of samples, that is the case in some circumstances then the cross validation is where you hit problems. Of course you can simply re-implement it yourself or you can add a few lines to stop cross validation if the number of samples is too few.<p>
</p><p>Not the most elegant of code, but for the moment it will do. I choose to completely seperate the two steps as opposed to multiple if’s</p><pre class="brush: c++; toolbar: false">static void svm_binary_svc_probability(
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
const svm_problem *prob, const svm_parameter *param,
double Cp, double Cn, double& probA, double& probB)
{
int i;
int nr_fold = 5;
int *perm = Malloc(int,prob->l);
double *dec_values = Malloc(double,prob->l);
// random shuffle
for(i=0;i<prob->l;i++) perm[i]=i;
for(i=0;i<prob->l;i++)
{
int j = i+rand()%(prob->l-i);
swap(perm[i],perm[j]);
}
if (prob->l < (5*nr_fold)){
int begin = 0;
int end = prob->l;
int j,k;
struct svm_problem subprob;
subprob.l = prob->l;
subprob.x = Malloc(struct svm_node*,subprob.l);
subprob.y = Malloc(double,subprob.l);
k=0;
for(j=0;j<prob->l;j++)
{
subprob.x[k] = prob->x[perm[j]];
subprob.y[k] = prob->y[perm[j]];
++k;
}
int p_count=0,n_count=0;
for(j=0;j<k;j++)
if(prob->y[j]>0)
p_count++;
else
n_count++;
if(p_count==0 && n_count==0)
for(j=begin;j<end;j++)
dec_values[perm[j]] = 0;
else if(p_count > 0 && n_count == 0)
for(j=begin;j<end;j++)
dec_values[perm[j]] = 1;
else if(p_count == 0 && n_count > 0)
for(j=begin;j<end;j++)
dec_values[perm[j]] = -1;
else
{
svm_parameter subparam = *param;
subparam.probability=0;
subparam.C=1.0;
subparam.nr_weight=2;
subparam.weight_label = Malloc(int,2);
subparam.weight = Malloc(double,2);
subparam.weight_label[0]=+1;
subparam.weight_label[1]=-1;
subparam.weight[0]=Cp;
subparam.weight[1]=Cn;
struct svm_model *submodel = svm_train(&subprob,&subparam);
for(j=begin;j<end;j++)
{
svm_predict_values(submodel,prob->x[perm[j]],&(dec_values[perm[j]]));
// ensure +1 -1 order; reason not using CV subroutine
dec_values[perm[j]] *= submodel->label[0];
}
svm_free_and_destroy_model(&submodel);
svm_destroy_param(&subparam);
}
free(subprob.x);
free(subprob.y);
}else{
for(i=0;i<nr_fold;i++)
{
int begin = i*prob->l/nr_fold;
int end = (i+1)*prob->l/nr_fold;
if (nr_fold == 1){
begin = 0 ;
end = prob->l;
}
int j,k;
struct svm_problem subprob;
subprob.l = prob->l-(end-begin);
subprob.x = Malloc(struct svm_node*,subprob.l);
subprob.y = Malloc(double,subprob.l);
k=0;
for(j=0;j<begin;j++)
{
subprob.x[k] = prob->x[perm[j]];
subprob.y[k] = prob->y[perm[j]];
++k;
}
for(j=end;j<prob->l;j++)
{
subprob.x[k] = prob->x[perm[j]];
subprob.y[k] = prob->y[perm[j]];
++k;
}
int p_count=0,n_count=0;
for(j=0;j<k;j++)
if(subprob.y[j]>0)
p_count++;
else
n_count++;
if(p_count==0 && n_count==0)
for(j=begin;j<end;j++)
dec_values[perm[j]] = 0;
else if(p_count > 0 && n_count == 0)
for(j=begin;j<end;j++)
dec_values[perm[j]] = 1;
else if(p_count == 0 && n_count > 0)
for(j=begin;j<end;j++)
dec_values[perm[j]] = -1;
else
{
svm_parameter subparam = *param;
subparam.probability=0;
subparam.C=1.0;
subparam.nr_weight=2;
subparam.weight_label = Malloc(int,2);
subparam.weight = Malloc(double,2);
subparam.weight_label[0]=+1;
subparam.weight_label[1]=-1;
subparam.weight[0]=Cp;
subparam.weight[1]=Cn;
struct svm_model *submodel = svm_train(&subprob,&subparam);
for(j=begin;j<end;j++)
{
svm_predict_values(submodel,prob->x[perm[j]],&(dec_values[perm[j]]));
// ensure +1 -1 order; reason not using CV subroutine
dec_values[perm[j]] *= submodel->label[0];
}
svm_free_and_destroy_model(&submodel);
svm_destroy_param(&subparam);
}
free(subprob.x);
free(subprob.y);
}
}
sigmoid_train(prob->l,dec_values,prob->y,probA,probB);
free(dec_values);
free(perm);
}
As with a lot of my code based posts, this is more for my memory than anything, but hopefully may help people unlock the secrets of libSVM.