From 330afb0ddd24fda74ba255c9c1717b21909592a1 Mon Sep 17 00:00:00 2001 From: Viktor Gal Date: Wed, 29 Nov 2017 04:32:29 -0500 Subject: [PATCH 01/21] ipython notebook python3 compatibility fix --- .../classification/MKL.ipynb | 44 +++++------ .../SupportVectorMachines.ipynb | 38 +++++----- doc/ipython-notebooks/clustering/KMeans.ipynb | 12 +-- .../distributions/KernelDensity.ipynb | 10 +-- .../evaluation/xval_modelselection.ipynb | 26 +++---- .../gaussian_process/gaussian_processes.ipynb | 16 ++-- .../variational_classifier.ipynb | 56 +++++++------- doc/ipython-notebooks/ica/bss_audio.ipynb | 76 +++---------------- .../intro/Introduction.ipynb | 16 ++-- doc/ipython-notebooks/logdet/logdet.ipynb | 60 +++++++-------- doc/ipython-notebooks/metric/LMNN.ipynb | 12 +-- doc/ipython-notebooks/multiclass/KNN.ipynb | 28 +++---- .../multiclass/Tree/DecisionTrees.ipynb | 36 ++++----- .../multiclass/multiclass_reduction.ipynb | 60 +++++++-------- .../neuralnets/autoencoders.ipynb | 4 +- .../neuralnets/neuralnets_digits.ipynb | 12 +-- .../neuralnets/rbms_dbns.ipynb | 2 +- .../regression/Regression.ipynb | 24 +++--- .../mmd_two_sample_testing.ipynb | 54 ++++++------- .../structure/Binary_Denoising.ipynb | 2 +- doc/ipython-notebooks/structure/FGM.ipynb | 36 ++++----- 21 files changed, 284 insertions(+), 340 deletions(-) diff --git a/doc/ipython-notebooks/classification/MKL.ipynb b/doc/ipython-notebooks/classification/MKL.ipynb index d7d8bf5bffa..d75ddb04098 100644 --- a/doc/ipython-notebooks/classification/MKL.ipynb +++ b/doc/ipython-notebooks/classification/MKL.ipynb @@ -171,17 +171,17 @@ "[gmm.set_nth_mean(means[i], i) for i in range(num_components)]\n", "[gmm.set_nth_cov(covs,i) for i in range(num_components)]\n", "gmm.set_coef(array([1.0,0.0,0.0,0.0]))\n", - "xntr=array([gmm.sample() for i in xrange(num)]).T\n", - "xnte=array([gmm.sample() for i in xrange(5000)]).T\n", + "xntr=array([gmm.sample() for i in range(num)]).T\n", + "xnte=array([gmm.sample() for i in range(5000)]).T\n", "gmm.set_coef(array([0.0,1.0,0.0,0.0]))\n", - "xntr1=array([gmm.sample() for i in xrange(num)]).T\n", - "xnte1=array([gmm.sample() for i in xrange(5000)]).T\n", + "xntr1=array([gmm.sample() for i in range(num)]).T\n", + "xnte1=array([gmm.sample() for i in range(5000)]).T\n", "gmm.set_coef(array([0.0,0.0,1.0,0.0]))\n", - "xptr=array([gmm.sample() for i in xrange(num)]).T\n", - "xpte=array([gmm.sample() for i in xrange(5000)]).T\n", + "xptr=array([gmm.sample() for i in range(num)]).T\n", + "xpte=array([gmm.sample() for i in range(5000)]).T\n", "gmm.set_coef(array([0.0,0.0,0.0,1.0]))\n", - "xptr1=array([gmm.sample() for i in xrange(num)]).T\n", - "xpte1=array([gmm.sample() for i in xrange(5000)]).T\n", + "xptr1=array([gmm.sample() for i in range(num)]).T\n", + "xpte1=array([gmm.sample() for i in range(5000)]).T\n", "traindata=concatenate((xntr,xntr1,xptr,xptr1), axis=1)\n", "trainlab=concatenate((-ones(2*num), ones(2*num)))\n", "\n", @@ -269,7 +269,7 @@ "mkl.train() \n", "\n", "w=kernel.get_subkernel_weights()\n", - "print w" + "print(w)" ] }, { @@ -406,7 +406,7 @@ "out=mkl.apply()\n", "\n", "evaluator=ErrorRateMeasure()\n", - "print \"Test error is %2.2f%% :MKL\" % (100*evaluator.evaluate(out,BinaryLabels(testlab)))\n", + "print(\"Test error is %2.2f%% :MKL\" % (100*evaluator.evaluate(out,BinaryLabels(testlab))))\n", "\n", "\n", "comb_ker0t.init(feats_train,RealFeatures(testdata)) \n", @@ -414,14 +414,14 @@ "out=mkl.apply()\n", "\n", "evaluator=ErrorRateMeasure()\n", - "print \"Test error is %2.2f%% :Subkernel1\"% (100*evaluator.evaluate(out,BinaryLabels(testlab)))\n", + "print(\"Test error is %2.2f%% :Subkernel1\"% (100*evaluator.evaluate(out,BinaryLabels(testlab))))\n", "\n", "comb_ker1t.init(feats_train, RealFeatures(testdata))\n", "mkl.set_kernel(comb_ker1t)\n", "out=mkl.apply()\n", "\n", "evaluator=ErrorRateMeasure()\n", - "print \"Test error is %2.2f%% :subkernel2\" % (100*evaluator.evaluate(out,BinaryLabels(testlab)))\n" + "print(\"Test error is %2.2f%% :subkernel2\" % (100*evaluator.evaluate(out,BinaryLabels(testlab))))\n" ] }, { @@ -546,7 +546,7 @@ "\n", "\n", "w, mkl=train_mkl(c, feats_tr)\n", - "print w\n", + "print(w)\n", "out=test_mkl(mkl,grid)\n", "\n", "z=out.get_values().reshape((size, size))\n", @@ -659,8 +659,8 @@ "Nsplit = 2\n", "all_ks = range(1, 21)\n", "\n", - "print Xall.shape\n", - "print Xtrain.shape" + "print(Xall.shape)\n", + "print(Xtrain.shape)" ] }, { @@ -679,7 +679,7 @@ "outputs": [], "source": [ "def plot_example(dat, lab):\n", - " for i in xrange(5):\n", + " for i in range(5):\n", " ax=subplot(1,5,i+1)\n", " title(int(lab[i]))\n", " ax.imshow(dat[:,i].reshape((16,16)), interpolation='nearest')\n", @@ -753,7 +753,7 @@ "out = mkl.apply()\n", "evaluator = MulticlassAccuracy()\n", "accuracy = evaluator.evaluate(out, labels_rem)\n", - "print \"Accuracy = %2.2f%%\" % (100*accuracy)\n", + "print(\"Accuracy = %2.2f%%\" % (100*accuracy))\n", "\n", "idx=where(out.get_labels() != Yrem)[0]\n", "Xbad=Xrem[:,idx]\n", @@ -772,7 +772,7 @@ "outputs": [], "source": [ "w=kernel.get_subkernel_weights()\n", - "print w" + "print(w)" ] }, { @@ -794,7 +794,7 @@ "evaluator = MulticlassAccuracy()\n", "accuracy = evaluator.evaluate(out, labels_rem)\n", "\n", - "print \"Accuracy = %2.2f%%\" % (100*accuracy)\n", + "print(\"Accuracy = %2.2f%%\" % (100*accuracy))\n", "\n", "idx=np.where(out.get_labels() != Yrem)[0]\n", "Xbad=Xrem[:,idx]\n", @@ -825,7 +825,7 @@ "evaluator = MulticlassAccuracy()\n", "accuracy = evaluator.evaluate(out, labels_rem)\n", "\n", - "print \"Accuracy = %2.2f%%\" % (100*accuracy)\n", + "print(\"Accuracy = %2.2f%%\" % (100*accuracy))\n", "\n", "idx=np.where(out.get_labels() != Yrem)[0]\n", "Xbad=Xrem[:,idx]\n", @@ -942,9 +942,9 @@ "outputs": [], "source": [ "mkl.train()\n", - "print \"Weights:\"\n", + "print(\"Weights:\")\n", "w=kernel.get_subkernel_weights()\n", - "print w\n", + "print(w)\n", "\n", "#initialize with test features\n", "kernel.init(feats_train, feats_test) \n", diff --git a/doc/ipython-notebooks/classification/SupportVectorMachines.ipynb b/doc/ipython-notebooks/classification/SupportVectorMachines.ipynb index a5b5bb51a2c..630250daf99 100644 --- a/doc/ipython-notebooks/classification/SupportVectorMachines.ipynb +++ b/doc/ipython-notebooks/classification/SupportVectorMachines.ipynb @@ -194,7 +194,7 @@ "def solve (x1):\n", " return -( ( (w[0])*x1 + b )/w[1] )\n", "\n", - "x2=map(solve, x1)\n", + "x2=list(map(solve, x1))\n", "\n", "#plot\n", "plt.figure(figsize=(6,6))\n", @@ -395,7 +395,7 @@ "libsvm_obj=svm.get_objective()\n", "primal_obj, dual_obj=svm.compute_svm_primal_objective(), svm.compute_svm_dual_objective()\n", "\n", - "print libsvm_obj, primal_obj, dual_obj" + "print(libsvm_obj, primal_obj, dual_obj)" ] }, { @@ -413,7 +413,7 @@ }, "outputs": [], "source": [ - "print \"duality_gap\", dual_obj-primal_obj" + "print(\"duality_gap\", dual_obj-primal_obj)" ] }, { @@ -635,10 +635,10 @@ "gmm.set_nth_cov(np.array([[1.0,0.0],[0.0,1.0]]),1)\n", "\n", "gmm.set_coef(np.array([1.0,0.0]))\n", - "xntr=np.array([gmm.sample() for i in xrange(num)]).T\n", + "xntr=np.array([gmm.sample() for i in range(num)]).T\n", "\n", "gmm.set_coef(np.array([0.0,1.0]))\n", - "xptr=np.array([gmm.sample() for i in xrange(num)]).T\n", + "xptr=np.array([gmm.sample() for i in range(num)]).T\n", "\n", "traindata=np.concatenate((xntr,xptr), axis=1)\n", "trainlab=np.concatenate((-np.ones(num), np.ones(num)))\n", @@ -847,7 +847,7 @@ "\n", "Err=sg.ErrorRateMeasure()\n", "error=Err.evaluate(output, lab_test)\n", - "print 'Error:', error\n", + "print('Error:', error)\n", "\n", "#set normalization\n", "gaussian_kernel=sg.GaussianKernel()\n", @@ -863,7 +863,7 @@ "\n", "Err=sg.ErrorRateMeasure()\n", "error=Err.evaluate(output, lab_test)\n", - "print 'Error with normalization:', error" + "print('Error with normalization:', error)" ] }, { @@ -902,24 +902,24 @@ "[gmm.set_nth_mean(means[i], i) for i in range(num_components)]\n", "[gmm.set_nth_cov(covs,i) for i in range(num_components)]\n", "gmm.set_coef(np.array([1.0,0.0,0.0,0.0]))\n", - "xntr=np.array([gmm.sample() for i in xrange(num)]).T\n", - "xnte=np.array([gmm.sample() for i in xrange(5000)]).T\n", + "xntr=np.array([gmm.sample() for i in range(num)]).T\n", + "xnte=np.array([gmm.sample() for i in range(5000)]).T\n", "gmm.set_coef(np.array([0.0,1.0,0.0,0.0]))\n", - "xntr1=np.array([gmm.sample() for i in xrange(num)]).T\n", - "xnte1=np.array([gmm.sample() for i in xrange(5000)]).T\n", + "xntr1=np.array([gmm.sample() for i in range(num)]).T\n", + "xnte1=np.array([gmm.sample() for i in range(5000)]).T\n", "gmm.set_coef(np.array([0.0,0.0,1.0,0.0]))\n", - "xptr=np.array([gmm.sample() for i in xrange(num)]).T\n", - "xpte=np.array([gmm.sample() for i in xrange(5000)]).T\n", + "xptr=np.array([gmm.sample() for i in range(num)]).T\n", + "xpte=np.array([gmm.sample() for i in range(5000)]).T\n", "gmm.set_coef(np.array([0.0,0.0,0.0,1.0]))\n", - "xptr1=np.array([gmm.sample() for i in xrange(num)]).T\n", - "xpte1=np.array([gmm.sample() for i in xrange(5000)]).T\n", + "xptr1=np.array([gmm.sample() for i in range(num)]).T\n", + "xpte1=np.array([gmm.sample() for i in range(5000)]).T\n", "traindata=np.concatenate((xntr,xntr1,xptr,xptr1), axis=1)\n", "testdata=np.concatenate((xnte,xnte1,xpte,xpte1), axis=1)\n", "\n", - "l0 = np.array([0.0 for i in xrange(num)])\n", - "l1 = np.array([1.0 for i in xrange(num)])\n", - "l2 = np.array([2.0 for i in xrange(num)])\n", - "l3 = np.array([3.0 for i in xrange(num)])\n", + "l0 = np.array([0.0 for i in range(num)])\n", + "l1 = np.array([1.0 for i in range(num)])\n", + "l2 = np.array([2.0 for i in range(num)])\n", + "l3 = np.array([3.0 for i in range(num)])\n", "\n", "trainlab=np.concatenate((l0,l1,l2,l3))\n", "testlab=np.concatenate((l0,l1,l2,l3))\n", diff --git a/doc/ipython-notebooks/clustering/KMeans.ipynb b/doc/ipython-notebooks/clustering/KMeans.ipynb index 1c9e05dd8e0..b5f50c940a7 100644 --- a/doc/ipython-notebooks/clustering/KMeans.ipynb +++ b/doc/ipython-notebooks/clustering/KMeans.ipynb @@ -195,7 +195,7 @@ "input": [ "def plotResult(title = 'KMeans Plot'):\n", " figure,axis = pyplot.subplots(1,1)\n", - " for i in xrange(totalPoints):\n", + " for i in range(totalPoints):\n", " if result[i]==0.0:\n", " axis.plot(rectangle[0,i], rectangle[1,i], 'o', color='g', markersize=3)\n", " else:\n", @@ -638,7 +638,7 @@ "input": [ "# plot the clusters over the original points in 2 dimensions\n", "figure,axis = pyplot.subplots(1,1)\n", - "for i in xrange(150):\n", + "for i in range(150):\n", " if result[i]==0.0:\n", " axis.plot(obsmatrix[2,i],obsmatrix[3,i],'ko',color='r', markersize=5)\n", " elif result[i]==1.0:\n", @@ -707,7 +707,7 @@ " return (diff,accuracy)\n", "\n", "(diff,accuracy_4d) = analyzeResult(result)\n", - "print 'Accuracy : ' + str(accuracy_4d)\n", + "print('Accuracy : ' + str(accuracy_4d))\n", "\n", "# plot the difference between ground truth and predicted clusters\n", "figure,axis = pyplot.subplots(1,1)\n", @@ -839,7 +839,7 @@ "collapsed": false, "input": [ "(diff,accuracy_1d) = analyzeResult(result)\n", - "print 'Accuracy : ' + str(accuracy_1d)\n", + "print('Accuracy : ' + str(accuracy_1d))\n", "\n", "# plot the difference between ground truth and predicted clusters\n", "figure,axis = pyplot.subplots(1,1)\n", @@ -925,7 +925,7 @@ "collapsed": false, "input": [ "(diff,accuracy_2d) = analyzeResult(result)\n", - "print 'Accuracy : ' + str(accuracy_2d)\n", + "print('Accuracy : ' + str(accuracy_2d))\n", "\n", "# plot the difference between ground truth and predicted clusters\n", "figure,axis = pyplot.subplots(1,1)\n", @@ -1001,7 +1001,7 @@ "collapsed": false, "input": [ "(diff,accuracy_3d) = analyzeResult(result)\n", - "print 'Accuracy : ' + str(accuracy_3d)\n", + "print('Accuracy : ' + str(accuracy_3d))\n", "\n", "# plot the difference between ground truth and predicted clusters\n", "figure,axis = pyplot.subplots(1,1)\n", diff --git a/doc/ipython-notebooks/distributions/KernelDensity.ipynb b/doc/ipython-notebooks/distributions/KernelDensity.ipynb index 77b65ae7110..8a30f0e0cb4 100644 --- a/doc/ipython-notebooks/distributions/KernelDensity.ipynb +++ b/doc/ipython-notebooks/distributions/KernelDensity.ipynb @@ -82,8 +82,8 @@ "\n", "# generates samples from the distribution\n", "def generate_samples(n_samples,mu1,sigma1,mu2,sigma2):\n", - " samples1 = np.random.normal(mu1,sigma1,(1,n_samples/2))\n", - " samples2 = np.random.normal(mu2,sigma2,(1,n_samples/2))\n", + " samples1 = np.random.normal(mu1,sigma1,(1,int(n_samples/2)))\n", + " samples2 = np.random.normal(mu2,sigma2,(1,int(n_samples/2)))\n", " samples = np.concatenate((samples1,samples2),1)\n", " return samples\n", "\n", @@ -383,7 +383,7 @@ " query_feats=RealFeatures(np.array([x[0,:],y[0,:]]))\n", " z=np.array([kdestimator.get_log_density(query_feats)])\n", " z=np.exp(z)\n", - " for i in xrange(1,x.shape[0]):\n", + " for i in range(1,x.shape[0]):\n", " query_feats=RealFeatures(np.array([x[i,:],y[i,:]]))\n", " zi=np.exp(kdestimator.get_log_density(query_feats))\n", " z=np.vstack((z,zi))\n", @@ -441,9 +441,9 @@ "\n", "# classify using our decision rule\n", "z=[]\n", - "for i in xrange(0,x.shape[0]):\n", + "for i in range(0,x.shape[0]):\n", " zj=[]\n", - " for j in xrange(0,x.shape[1]):\n", + " for j in range(0,x.shape[1]):\n", " if ((z1[i,j]>z2[i,j]) and (z1[i,j]>z3[i,j])):\n", " zj.append(1)\n", " elif (z2[i,j]>z3[i,j]):\n", diff --git a/doc/ipython-notebooks/evaluation/xval_modelselection.ipynb b/doc/ipython-notebooks/evaluation/xval_modelselection.ipynb index b8b64431e12..bbae6169be3 100644 --- a/doc/ipython-notebooks/evaluation/xval_modelselection.ipynb +++ b/doc/ipython-notebooks/evaluation/xval_modelselection.ipynb @@ -293,7 +293,7 @@ "metrics=[ROCEvaluation(), AccuracyMeasure(), ErrorRateMeasure(), F1Measure(), PrecisionMeasure(), RecallMeasure(), SpecificityMeasure()]\n", "\n", "for metric in metrics:\n", - " print metric.get_name(), metric.evaluate(classifier.apply(features), labels)" + " print(metric.get_name(), metric.evaluate(classifier.apply(features), labels))" ] }, { @@ -330,7 +330,7 @@ "result=CrossValidationResult.obtain_from_generic(result)\n", "\n", "# this class contains a field \"mean\" which contain the mean performance metric\n", - "print \"Testing\", metric.get_name(), result.get_mean()" + "print(\"Testing\", metric.get_name(), result.get_mean())" ] }, { @@ -350,7 +350,7 @@ }, "outputs": [], "source": [ - "print \"Testing\", metric.get_name(), [CrossValidationResult.obtain_from_generic(cross.evaluate()).get_mean() for _ in range(10)]" + "print(\"Testing\", metric.get_name(), [CrossValidationResult.obtain_from_generic(cross.evaluate()).get_mean() for _ in range(10)])" ] }, { @@ -376,8 +376,8 @@ "result=cross.evaluate()\n", "result=CrossValidationResult.obtain_from_generic(result)\n", "\n", - "print \"Testing cross-validation mean %.2f \" \\\n", - "% (result.get_mean())" + "print(\"Testing cross-validation mean %.2f \" \\\n", + "% (result.get_mean()))" ] }, { @@ -408,7 +408,7 @@ "ylabel(metric.get_name())\n", "_=title(\"Accuracy for different kernel widths\")\n", "\n", - "print \"Best Gaussian kernel width %.2f\" % widths[results.argmax()], \"gives\", results.max()\n", + "print(\"Best Gaussian kernel width %.2f\" % widths[results.argmax()], \"gives\", results.max())\n", "\n", "# compare this with a linear kernel\n", "classifier.set_kernel(LinearKernel())\n", @@ -416,7 +416,7 @@ "plot([log2(widths[0]), log2(widths[len(widths)-1])], [lin_k.get_mean(),lin_k.get_mean()], 'r')\n", "\n", "# please excuse this horrible code :)\n", - "print \"Linear kernel gives\", lin_k.get_mean()\n", + "print(\"Linear kernel gives\", lin_k.get_mean())\n", "\n", "_=legend([\"Gaussian\", \"Linear\"], loc=\"lower center\")" ] @@ -600,7 +600,7 @@ "\n", "x, y=meshgrid(taus, widths)\n", "grid=array((ravel(x), ravel(y)))\n", - "print grid.shape\n", + "print(grid.shape)\n", "\n", "errors=[]\n", "for i in range(0, n*n):\n", @@ -662,16 +662,16 @@ "kernel.set_width(2)\n", "\n", "title_='Performance on Boston Housing dataset'\n", - "print \"%50s\" %title_\n", + "print(\"%50s\" %title_)\n", "for machine in regression_models:\n", " metric=MeanSquaredError()\n", " cross=CrossValidation(machine, feats, labels, split, metric)\n", " cross.set_num_runs(25)\n", " result=cross.evaluate()\n", " result=CrossValidationResult.obtain_from_generic(result)\n", - " print \"-\"*80\n", - " print \"|\", \"%30s\" % machine.get_name(),\"|\", \"%20s\" %metric.get_name(),\"|\",\"%20s\" %result.get_mean() ,\"|\" \n", - "print \"-\"*80" + " print(\"-\"*80)\n", + " print(\"|\", \"%30s\" % machine.get_name(),\"|\", \"%20s\" %metric.get_name(),\"|\",\"%20s\" %result.get_mean() ,\"|\" )\n", + "print(\"-\"*80") ] }, { @@ -779,7 +779,7 @@ "result=cross_validation.evaluate()\n", "result=CrossValidationResult.obtain_from_generic(result)\n", "\n", - "print 'Error with Best parameters:', result.get_mean()" + "print('Error with Best parameters:', result.get_mean())" ] }, { diff --git a/doc/ipython-notebooks/gaussian_process/gaussian_processes.ipynb b/doc/ipython-notebooks/gaussian_process/gaussian_processes.ipynb index dc15f960460..0cf758d82aa 100644 --- a/doc/ipython-notebooks/gaussian_process/gaussian_processes.ipynb +++ b/doc/ipython-notebooks/gaussian_process/gaussian_processes.ipynb @@ -138,7 +138,7 @@ " log_liks=lik.get_log_probability_f(lab, F)\n", " \n", " # plot likelihood functions, exponentiate since they were computed in log-domain\n", - " plt.plot(lab.get_labels(), map(exp,log_liks))\n", + " plt.plot(lab.get_labels(), list(map(exp,log_liks)))\n", " \n", "plt.ylabel(\"$p(y_i|f_i)$\")\n", "plt.xlabel(\"$y_i$\")\n", @@ -445,9 +445,9 @@ "best_width=GaussianKernel.obtain_from_generic(inf.get_kernel()).get_width()\n", "best_scale=inf.get_scale()\n", "best_sigma=GaussianLikelihood.obtain_from_generic(inf.get_model()).get_sigma()\n", - "print \"Selected tau (kernel bandwidth):\", best_width\n", - "print \"Selected gamma (kernel scaling):\", best_scale\n", - "print \"Selected sigma (observation noise):\", best_sigma" + "print(\"Selected tau (kernel bandwidth):\", best_width)\n", + "print(\"Selected gamma (kernel scaling):\", best_scale)\n", + "print(\"Selected sigma (observation noise):\", best_sigma)" ] }, { @@ -771,7 +771,7 @@ "plt.xlabel(\"Kernel Width in log-scale\")\n", "_=plt.ylabel(\"Log-Marginal Likelihood\")\n", "\n", - "print \"Width with largest marginal likelihood:\", widths[marginal_likelihoods.argmax()]" + "print(\"Width with largest marginal likelihood:\", widths[marginal_likelihoods.argmax()])" ] }, { @@ -875,8 +875,8 @@ "# we have to \"cast\" objects to the specific kernel interface we used (soon to be easier)\n", "best_width=GaussianKernel.obtain_from_generic(inf.get_kernel()).get_width()\n", "best_scale=inf.get_scale()\n", - "print \"Selected kernel bandwidth:\", best_width\n", - "print \"Selected kernel scale:\", best_scale" + "print(\"Selected kernel bandwidth:\", best_width)\n", + "print(\"Selected kernel scale:\", best_scale)" ] }, { @@ -1016,7 +1016,7 @@ "gp.train()\n", "means = gp.get_mean_vector(feats_test)\n", "variances = gp.get_variance_vector(feats_test)\n", - "print \"FITC inference took %.2f seconds\" % (time.time()-start)\n", + "print(\"FITC inference took %.2f seconds\" % (time.time()-start))\n", "\n", "# exact GP\n", "start=time.time()\n", diff --git a/doc/ipython-notebooks/gaussian_process/variational_classifier.ipynb b/doc/ipython-notebooks/gaussian_process/variational_classifier.ipynb index 6031bf5ad52..836ad4a175b 100644 --- a/doc/ipython-notebooks/gaussian_process/variational_classifier.ipynb +++ b/doc/ipython-notebooks/gaussian_process/variational_classifier.ipynb @@ -651,8 +651,8 @@ " (rows, cols) = plots.shape\n", " methods = np.asarray(methods).reshape(rows, cols)\n", " likelihoods = np.asarray(likelihoods).reshape(rows, cols)\n", - " for r in xrange(rows):\n", - " for c in xrange(cols):\n", + " for r in range(rows):\n", + " for c in range(cols):\n", " inference = methods[r][c]\n", " likelihood = likelihoods[r][c]\n", " inf = inference(kernel_func, features, mean_func, labels, likelihood())\n", @@ -868,7 +868,7 @@ " else:\n", " #training set\n", " idx=0\n", - " info=map(float,line.split(','))\n", + " info=list(map(float,line.split(',')))\n", " x[idx].append(info[:-1])\n", " y[idx].append(info[-1])\n", " if info[-1]>0:\n", @@ -1158,7 +1158,7 @@ " \"\"\"\n", " #obtain data points with labels\n", " (x_train, y_train, x_test, y_test, x1s, x2s)=extract_banana_dataset(input_path)\n", - " print \"%d training points\"%(len(x_train[0]))\n", + " print(\"%d training points\"%(len(x_train[0])))\n", " #we want to compare two inference methods\n", " inferences =[\n", " SingleFITCLaplaceInferenceMethod, #inference method for sparse Gaussian processes\n", @@ -1188,7 +1188,7 @@ " x1_inducing = np.random.rand(n_inducing)*(x_train[0,:].max()-x_train[0,:].min())+x_train[0,:].min()\n", " x2_inducing = np.random.rand(n_inducing)*(x_train[1,:].max()-x_train[1,:].min())+x_train[1,:].min()\n", " x_inducing=np.row_stack([x1_inducing,x2_inducing])\n", - " print \"%d inducing points\"%(n_inducing)\n", + " print(\"%d inducing points\"%(n_inducing))\n", " #for measuring runtime\n", " import time\n", " start=time.time()\n", @@ -1196,7 +1196,7 @@ " (gpc, inf)=train_large_scale(inferences[0], linesearch, likelihood, x_train, y_train, x_inducing, kernel_log_sigma, kernel_log_scale, False)\n", " name=inf.get_name()\n", " prbs=gpc.get_probabilities(features_boundary)\n", - " print \"FITC Laplace inference took %.2f seconds\" % (time.time()-start)\n", + " print(\"FITC Laplace inference took %.2f seconds\" % (time.time()-start))\n", " plot_helper(plots[0],x1_boundary,x2_boundary,prbs,n_boundary,name,x1s,x2s,\"with %d unoptimized inducing points\"%n_inducing)\n", " \n", " #plot the inducing points used in sparse Gaussian process models\n", @@ -1210,7 +1210,7 @@ " #Laplace approximation (inference method for full Gaussian processes)\n", " (gpc, name)=train_small_scale(inferences[1], linesearch, likelihood, x_train, y_train, kernel_log_sigma, kernel_log_scale)\n", " prbs=gpc.get_probabilities(features_boundary)\n", - " print \"Laplace inference took %.2f seconds\" % (time.time()-start)\n", + " print(\"Laplace inference took %.2f seconds\" % (time.time()-start))\n", " plot_helper(plots[1],x1_boundary,x2_boundary,prbs,n_boundary,name,x1s,x2s)\n", "\n", " plots[1][0].legend([\"positive points\",\"negative points\"],scatterpoints=1)\n", @@ -1270,7 +1270,7 @@ " \"\"\"\n", " #obtain data points with labels\n", " (x_train, y_train, x_test, y_test, x1s, x2s)=extract_banana_dataset(input_path)\n", - " print \"%d training points\"%(len(x_train[0]))\n", + " print(\"%d training points\"%(len(x_train[0])))\n", " #we want to compare two inference methods\n", " inferences =[\n", " SingleFITCLaplaceInferenceMethod, #inference method for sparse Gaussian processes\n", @@ -1298,7 +1298,7 @@ " x1_inducing = np.random.rand(n_inducing)*(x_train[0,:].max()-x_train[0,:].min())+x_train[0,:].min()\n", " x2_inducing = np.random.rand(n_inducing)*(x_train[1,:].max()-x_train[1,:].min())+x_train[1,:].min()\n", " x_inducing=np.row_stack([x1_inducing,x2_inducing])\n", - " print \"%d inducing points\"%(n_inducing)\n", + " print(\"%d inducing points\"%(n_inducing))\n", "\n", " #FITC Laplace approximation (inference method for sparse Gaussian processes)\n", " (gpc, inf)=train_large_scale(inferences[0], linesearch, likelihood, x_train, y_train, x_inducing, kernel_log_sigma, kernel_log_scale, False)\n", @@ -1467,12 +1467,12 @@ " line=line.strip()\n", " info=line.split(',')\n", " label=labels[info[-1].lower()]\n", - " x.append(map(float, info[:-1]))\n", + " x.append(list(map(float, info[:-1])))\n", " y.append(label)\n", " \n", " #train_size should be less than the size of all available data points\n", " assert train_size < len(x) \n", - " idx=range(len(y))\n", + " idx=[i for i in range(len(y))]\n", " random.shuffle(idx)\n", " train_idx=set(idx[:train_size])\n", " test_idx=set(idx[train_size:])\n", @@ -1531,8 +1531,8 @@ " linesearches=np.asarray(linesearches).reshape(rows, cols)\n", " likelihoods=np.asarray(likelihoods).reshape(rows, cols) \n", "\n", - " for r in xrange(rows):\n", - " for c in xrange(cols):\n", + " for r in range(rows):\n", + " for c in range(cols):\n", " inference = methods[r][c]\n", " minimizer = minimizers[r][c]\n", " likelihood = likelihoods[r][c]\n", @@ -1702,7 +1702,7 @@ " negative_count = bin_labels[negative_idx].shape[-1]\n", " binary_count = binary_labels.shape[-1]\n", " \n", - " print \"There are %d positive samples and %d negative samples\" %(positive_count, negative_count)\n", + " print(\"There are %d positive samples and %d negative samples\" %(positive_count, negative_count))\n", " return binary_features, binary_labels" ] }, @@ -1746,7 +1746,7 @@ " kernel_log_scale = 1.0\n", "\n", " inf = inference(kernel_func, features_train, mean_func, labels_train, likelihood)\n", - " print \"\\nusing %s\"%inf.get_name()\n", + " print(\"\\nusing %s\"%inf.get_name())\n", " \n", " inf.set_scale(exp(kernel_log_scale))\n", " \n", @@ -1760,21 +1760,21 @@ " gp = GaussianProcessClassification(inf)\n", " gp.train()\n", " end = time.time()\n", - " print \"cost %.2f seconds at training\"%(end-start)\n", + " print(\"cost %.2f seconds at training\"%(end-start))\n", " nlz=inf.get_negative_log_marginal_likelihood()\n", - " print \"the negative_log_marginal_likelihood is %.4f\"%nlz\n", + " print(\"the negative_log_marginal_likelihood is %.4f\"%nlz)\n", " start = time.time()\n", " #classification on train_data\n", " pred_labels_train = gp.apply_binary(features_train)\n", " #classification on test_data\n", " pred_labels_test = gp.apply_binary(features_test)\n", " end = time.time() \n", - " print \"cost %.2f seconds at prediction\"%(end-start)\n", + " print(\"cost %.2f seconds at prediction\"%(end-start))\n", " \n", " error_train = error_eval.evaluate(pred_labels_train, labels_train)\n", " error_test = error_eval.evaluate(pred_labels_test, labels_test)\n", " \n", - " print \"Train error : %.2f Test error: %.2f\\n\" % (error_train, error_test) " + " print(\"Train error : %.2f Test error: %.2f\\n\" % (error_train, error_test))" ] }, { @@ -1796,9 +1796,9 @@ "#Note that \n", "#y_train and y_test are followed the definition in the first section\n", "#the transpose of x_train and x_test are followed the definition in the first section\n", - "print \"Training set statistics\"\n", + "print(\"Training set statistics\")\n", "(x_train, y_train)=binary_extract(idx,train_features, train_labels)\n", - "print \"Test set statistics\"\n", + "print(\"Test set statistics\")\n", "(x_test, y_test)=binary_extract(idx,test_features, test_labels)" ] }, @@ -2093,7 +2093,7 @@ " kernel_log_scale = 1.0\n", "\n", " inf = inference(kernel_func, features_train, mean_func, labels_train, likelihood)\n", - " print \"\\nusing %s\"%inf.get_name()\n", + " print(\"\\nusing %s\"%inf.get_name())\n", " \n", " inf.set_scale(exp(kernel_log_scale))\n", " \n", @@ -2119,27 +2119,27 @@ " # we have to \"cast\" objects to the specific kernel interface we used (soon to be easier)\n", " best_width=GaussianKernel.obtain_from_generic(inf.get_kernel()).get_width()\n", " best_scale=inf.get_scale()\n", - " print \"Selected kernel bandwidth:\", best_width\n", - " print \"Selected kernel scale:\", best_scale\n", + " print(\"Selected kernel bandwidth:\", best_width)\n", + " print(\"Selected kernel scale:\", best_scale)\n", "\n", " start = time.time()\n", " gp.train()\n", " end = time.time()\n", - " print \"cost %s seconds at training\"%(end-start)\n", + " print(\"cost %s seconds at training\"%(end-start))\n", " nlz=inf.get_negative_log_marginal_likelihood()\n", - " print \"the negative_log_marginal_likelihood is %.4f\"%nlz\n", + " print(\"the negative_log_marginal_likelihood is %.4f\"%nlz)\n", " start = time.time()\n", " #classification on train_data\n", " pred_labels_train = gp.apply_binary(features_train)\n", " #classification on test_data\n", " pred_labels_test = gp.apply_binary(features_test)\n", " end = time.time() \n", - " print \"cost %s seconds at prediction\"%(end-start)\n", + " print(\"cost %s seconds at prediction\"%(end-start))\n", " \n", " error_train = error_eval.evaluate(pred_labels_train, labels_train)\n", " error_test = error_eval.evaluate(pred_labels_test, labels_test)\n", " \n", - " print \"Train error : %.2f Test error: %.2f\\n\" % (error_train, error_test);" + " print(\"Train error : %.2f Test error: %.2f\\n\" % (error_train, error_test))" ] }, { diff --git a/doc/ipython-notebooks/ica/bss_audio.ipynb b/doc/ipython-notebooks/ica/bss_audio.ipynb index 2b4570f0804..d38e8294d65 100644 --- a/doc/ipython-notebooks/ica/bss_audio.ipynb +++ b/doc/ipython-notebooks/ica/bss_audio.ipynb @@ -84,68 +84,12 @@ "cell_type": "code", "collapsed": true, "input": [ - "import sys\n", - "import StringIO\n", - "import base64\n", - "import struct \n", - "\n", - "from IPython.display import display\n", - "from IPython.core.display import HTML\n", - "\n", - "def wavPlayer(data, rate):\n", - " \"\"\" will display html 5 player for compatible browser\n", - " The browser need to know how to play wav through html5.\n", - " there is no autoplay to prevent file playing when the browser opens\n", - " Adapted from SciPy.io. and\n", - " github.com/Carreau/posts/blob/master/07-the-sound-of-hydrogen.ipynb\n", - " \"\"\"\n", - " \n", - " buffer = StringIO.StringIO()\n", - " buffer.write(b'RIFF')\n", - " buffer.write(b'\\x00\\x00\\x00\\x00')\n", - " buffer.write(b'WAVE')\n", - "\n", - " buffer.write(b'fmt ')\n", - " if data.ndim == 1:\n", - " noc = 1\n", - " else:\n", - " noc = data.shape[1]\n", - " bits = data.dtype.itemsize * 8\n", - " sbytes = rate*(bits // 8)*noc\n", - " ba = noc * (bits // 8)\n", - " buffer.write(struct.pack('' or (data.dtype.byteorder == '=' and sys.byteorder == 'big'):\n", - " data = data.byteswap()\n", - "\n", - " buffer.write(data.tostring())\n", - " # return buffer.getvalue()\n", - " # Determine file size and place it in correct\n", - " # position at start of the file.\n", - " size = buffer.tell()\n", - " buffer.seek(4)\n", - " buffer.write(struct.pack('\n", - " \n", - " Simple Test\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \"\"\".format(base64=base64.encodestring(val))\n", - " display(HTML(src))" + "try:\n", + " from IPython.display import Audio\n", + " def wavPlayer(data, rate):\n", + " display(Audio(data, rate=rate))\n", + "except ImportError:\n", + " pass" ], "language": "python", "metadata": {}, @@ -288,8 +232,8 @@ "A = np.array([[1, 0.5, 0.5],\n", " [0.5, 1, 0.5], \n", " [0.5, 0.5, 1]]) \n", - "print 'Mixing Matrix:'\n", - "print A.round(2)\n", + "print('Mixing Matrix:')\n", + "print(A.round(2))\n", "\n", "# Mix Signals\n", "X = np.dot(A,S)\n", @@ -353,8 +297,8 @@ "\n", "A_ = jade.get_mixing_matrix()\n", "A_ = A_ / A_.sum(axis=0)\n", - "print 'Estimated Mixing Matrix:'\n", - "print A_" + "print('Estimated Mixing Matrix:')\n", + "print(A_)" ], "language": "python", "metadata": {}, diff --git a/doc/ipython-notebooks/intro/Introduction.ipynb b/doc/ipython-notebooks/intro/Introduction.ipynb index f939e1b52ef..a4866e594fa 100644 --- a/doc/ipython-notebooks/intro/Introduction.ipynb +++ b/doc/ipython-notebooks/intro/Introduction.ipynb @@ -154,7 +154,7 @@ "#generate a numpy array\n", "feats=array(glucose_conc)\n", "feats=vstack((feats, array(BMI)))\n", - "print feats, feats.shape" + "print(feats, feats.shape)" ] }, { @@ -204,7 +204,7 @@ "print('Number of attributes: %s and number of samples: %s' %(num_f, num_s))\n", "print('Number of rows of feature matrix: %s and number of columns: %s' %(feat_matrix.shape[0], feat_matrix.shape[1]))\n", "print('First column of feature matrix (Data for first individual):')\n", - "print feats_train.get_feature_vector(0)" + "print(feats_train.get_feature_vector(0))" ] }, { @@ -249,7 +249,7 @@ "outputs": [], "source": [ "n=labels.get_num_labels()\n", - "print 'Number of labels:', n" + "print('Number of labels:', n)" ] }, { @@ -433,7 +433,7 @@ "#solve for w.x+b=0\n", "def solve (x1):\n", " return -( ( (w[0])*x1 + b )/w[1] )\n", - "x2=map(solve, x1)\n", + "x2=list(map(solve, x1))\n", "\n", "#plot\n", "figure(figsize=(7,6))\n", @@ -446,8 +446,8 @@ "p2 = Rectangle((0, 0), 1, 1, fc=\"k\")\n", "legend((p1, p2), [\"Non-diabetic\", \"Diabetic\"], loc=2)\n", "\n", - "print 'w :', w\n", - "print 'b :', b" + "print('w :', w)\n", + "print('b :', b)" ] }, { @@ -533,7 +533,7 @@ "acc=AccuracyMeasure()\n", "acc.evaluate(output,labels_true)\n", "accuracy=acc.get_accuracy()*100\n", - "print 'Accuracy(%):', accuracy" + "print('Accuracy(%):', accuracy)" ] }, { @@ -587,7 +587,7 @@ "\n", "feats=array(dist_centres)\n", "feats=vstack((feats, array(lower_pop)))\n", - "print feats, feats.shape\n", + "print(feats, feats.shape)\n", "#convert to shogun format features\n", "feats_train=RealFeatures(feats)" ] diff --git a/doc/ipython-notebooks/logdet/logdet.ipynb b/doc/ipython-notebooks/logdet/logdet.ipynb index c9e1e4d9391..4b87a375ffd 100644 --- a/doc/ipython-notebooks/logdet/logdet.ipynb +++ b/doc/ipython-notebooks/logdet/logdet.ipynb @@ -104,8 +104,8 @@ "\n", "# computing the eigenvalues\n", "eigen_solver.compute()\n", - "print 'Minimum Eigenvalue:', eigen_solver.get_min_eigenvalue()\n", - "print 'Maximum Eigenvalue:', eigen_solver.get_max_eigenvalue()" + "print('Minimum Eigenvalue:', eigen_solver.get_min_eigenvalue())\n", + "print('Maximum Eigenvalue:', eigen_solver.get_max_eigenvalue())" ] }, { @@ -130,7 +130,7 @@ "trace_sampler = ProbingSampler(op)\n", "# apply the graph coloring algorithm and generate the number of colors, i.e. number of trace samples\n", "trace_sampler.precompute()\n", - "print 'Number of colors used:', trace_sampler.get_num_samples()" + "print('Number of colors used:', trace_sampler.get_num_samples())" ] }, { @@ -163,7 +163,7 @@ "# we create a operator-log-function using the sparse matrix operator that uses CG-M to solve the shifted systems\n", "op_func = LogRationalApproximationCGM(op, engine, eigen_solver, cgm, accuracy)\n", "op_func.precompute()\n", - "print 'Number of shifts:', op_func.get_num_shifts()" + "print('Number of shifts:', op_func.get_num_shifts())" ] }, { @@ -192,7 +192,7 @@ "estimates = log_det_estimator.sample(num_samples)\n", "\n", "estimated_logdet = np.mean(estimates)\n", - "print 'Estimated log(det(A)):', estimated_logdet" + "print('Estimated log(det(A)):', estimated_logdet)" ] }, { @@ -217,7 +217,7 @@ "# actual_logdet = Statistics.log_det(A)\n", "\n", "actual_logdet = 7120357.73878\n", - "print 'Actual log(det(A)):', actual_logdet\n", + "print('Actual log(det(A)):', actual_logdet)\n", "\n", "plt.hist(estimates)\n", "plt.plot([actual_logdet, actual_logdet], [0,len(estimates)], linewidth=3)\n", @@ -297,11 +297,11 @@ " effective_estimates_normal[i] = np.mean(normal_estimates[idx:(idx + num_colors)])\n", "\n", "actual_logdet = Statistics.log_det(B)\n", - "print 'Actual log(det(B)):', actual_logdet\n", - "print 'Estimated log(det(B)) using probing sampler:', np.mean(probing_estimates)\n", - "print 'Estimated log(det(B)) using Gaussian sampler:', np.mean(effective_estimates_normal)\n", - "print 'Variance using probing sampler:', np.var(probing_estimates)\n", - "print 'Variance using Gaussian sampler:', np.var(effective_estimates_normal)" + "print('Actual log(det(B)):', actual_logdet\n", + "print('Estimated log(det(B)) using probing sampler:', np.mean(probing_estimates))\n", + "print('Estimated log(det(B)) using Gaussian sampler:', np.mean(effective_estimates_normal))\n", + "print('Variance using probing sampler:', np.var(probing_estimates))\n", + "print('Variance using Gaussian sampler:', np.var(effective_estimates_normal))" ] }, { @@ -420,7 +420,7 @@ " return log_marignal_lik\n", "\n", "L = log_likelihood(1.0, 15.0)\n", - "print 'Log-likelihood estimate:', L" + "print('Log-likelihood estimate:', L)" ] }, { @@ -459,12 +459,12 @@ "np.random.seed(1)\n", "b = np.array(np.random.randn(dim))\n", "v = op.apply(b)\n", - "print 'A.apply(b)=',v\n", + "print('A.apply(b)=',v)\n", "\n", "# create a dense matrix linear operator\n", "B = np.array(np.random.randn(dim, dim)).astype(complex)\n", "op = ComplexDenseMatrixOperator(B)\n", - "print 'Dimension:', op.get_dimension()" + "print('Dimension:', op.get_dimension())" ] }, { @@ -505,11 +505,11 @@ "cg.set_iteration_limit(20)\n", "x = cg.solve(Q,y)\n", "\n", - "print 'x:',x\n", + "print('x:',x)\n", "\n", "# verifying the result\n", - "print 'y:', y\n", - "print 'Qx:', Q.apply(x)\n", + "print('y:', y)\n", + "print('Qx:', Q.apply(x))\n", "\n", "residuals = cg.get_residuals()\n", "plt.plot(residuals)\n", @@ -553,11 +553,11 @@ "cocg.set_iteration_limit(20)\n", "x = cocg.solve(Q, z)\n", "\n", - "print 'x:',x\n", + "print('x:',x\n", "\n", "# verifying the result\n", - "print 'z:',z\n", - "print 'Qx:',np.real(Q.apply(x))\n", + "print('z:',z)\n", + "print('Qx:',np.real(Q.apply(x)))\n", "\n", "residuals = cocg.get_residuals()\n", "plt.plot(residuals)\n", @@ -602,21 +602,21 @@ "for i in range(0, num_shifts):\n", " shifts.append(complex(np.random.randn(), np.random.randn()))\n", "sigma = np.array(shifts)\n", - "print 'Shifts:', sigma\n", + "print('Shifts:', sigma)\n", "\n", "# generating some random weights\n", "weights = []\n", "for i in range(0, num_shifts):\n", " weights.append(complex(np.random.randn(), np.random.randn()))\n", "alpha = np.array(weights)\n", - "print 'Weights:',alpha\n", + "print('Weights:',alpha)\n", "\n", "# solve for the systems\n", "cgm = CGMShiftedFamilySolver(True)\n", "cgm.set_iteration_limit(20)\n", "x = cgm.solve_shifted_weighted(Q, v, sigma, alpha)\n", "\n", - "print 'x:',x\n", + "print('x:',x)\n", "\n", "residuals = cgm.get_residuals()\n", "plt.plot(residuals)\n", @@ -632,7 +632,7 @@ " Q_s = ComplexSparseMatrixOperator(a_s)\n", " # multiplying the result with weight\n", " x_s += alpha[i] * cocg.solve(Q_s, v)\n", - "print 'x\\':', x_s" + "print('x\\':', x_s)" ] }, { @@ -668,11 +668,11 @@ "chol = DirectSparseLinearSolver()\n", "x = chol.solve(Q,y)\n", "\n", - "print 'x:',x\n", + "print('x:',x)\n", "\n", "# verifying the result\n", - "print 'y:', y\n", - "print 'Qx:', Q.apply(x)" + "print('y:', y)\n", + "print('Qx:', Q.apply(x))" ] }, { @@ -710,11 +710,11 @@ "solver = DirectLinearSolverComplex()\n", "x = solver.solve(Q, z)\n", "\n", - "print 'x:',x\n", + "print('x:',x)\n", "\n", "# verifying the result\n", - "print 'z:',z\n", - "print 'Qx:',np.real(Q.apply(x))" + "print('z:',z)\n", + "print('Qx:',np.real(Q.apply(x)))" ] }, { diff --git a/doc/ipython-notebooks/metric/LMNN.ipynb b/doc/ipython-notebooks/metric/LMNN.ipynb index d85e111ccaa..6e6ff50acc8 100644 --- a/doc/ipython-notebooks/metric/LMNN.ipynb +++ b/doc/ipython-notebooks/metric/LMNN.ipynb @@ -344,8 +344,8 @@ " x = numpy.zeros((num_classes*num_points, 2))\n", " y = numpy.zeros(num_classes*num_points)\n", " \n", - " for i,j in zip(xrange(num_classes), xrange(-num_classes//2, num_classes//2 + 1)):\n", - " for k,l in zip(xrange(num_points), xrange(-num_points//2, num_points//2 + 1)):\n", + " for i,j in zip(range(num_classes), range(-num_classes//2, num_classes//2 + 1)):\n", + " for k,l in zip(range(num_points), range(-num_points//2, num_points//2 + 1)):\n", " x[i*num_points + k, :] = numpy.array([normal(l, 0.1), normal(dist*j, 0.1)])\n", " \n", " y[i*num_points:i*num_points + num_points] = i\n", @@ -387,7 +387,7 @@ "from shogun import KNN, EuclideanDistance, LMNN, RealFeatures, MulticlassLabels\n", "\n", "def plot_neighborhood_graph(x, nn, axis=pyplot, cols=['r', 'b', 'g', 'm', 'k', 'y']):\n", - "\tfor i in xrange(x.shape[0]):\n", + "\tfor i in range(x.shape[0]):\n", "\t\txs = [x[i,0], x[nn[1,i], 0]]\n", "\t\tys = [x[i,1], x[nn[1,i], 1]]\n", "\t\taxis.plot(xs, ys, cols[int(y[i])])\n", @@ -429,9 +429,9 @@ "axes[2].set_ylim(-3, 2.5)\n", "axes[2].set_title('Euclidean neighbourhood in the transformed space')\n", "\n", - "[axes[i].set_xlabel('x') for i in xrange(len(axes))]\n", - "[axes[i].set_ylabel('y') for i in xrange(len(axes))]\n", - "[axes[i].set_aspect('equal') for i in xrange(len(axes))]\n", + "[axes[i].set_xlabel('x') for i in range(len(axes))]\n", + "[axes[i].set_ylabel('y') for i in range(len(axes))]\n", + "[axes[i].set_aspect('equal') for i in range(len(axes))]\n", "\n", "pyplot.show()" ], diff --git a/doc/ipython-notebooks/multiclass/KNN.ipynb b/doc/ipython-notebooks/multiclass/KNN.ipynb index 68b2b174973..94a20c2230d 100644 --- a/doc/ipython-notebooks/multiclass/KNN.ipynb +++ b/doc/ipython-notebooks/multiclass/KNN.ipynb @@ -81,9 +81,9 @@ "Nsplit = 2\n", "all_ks = range(1, 21)\n", "\n", - "print Xall.shape\n", - "print Xtrain.shape\n", - "print Xtest.shape" + "print(Xall.shape)\n", + "print(Xtrain.shape)\n", + "print(Xtest.shape)" ] }, { @@ -104,7 +104,7 @@ "%matplotlib inline\n", "import pylab as P\n", "def plot_example(dat, lab):\n", - " for i in xrange(5):\n", + " for i in range(5):\n", " ax=P.subplot(1,5,i+1)\n", " P.title(int(lab[i]))\n", " ax.imshow(dat[:,i].reshape((16,16)), interpolation='nearest')\n", @@ -148,14 +148,14 @@ "feats_test = RealFeatures(Xtest)\n", "knn.train(feats)\n", "pred = knn.apply_multiclass(feats_test)\n", - "print \"Predictions\", pred[:5]\n", - "print \"Ground Truth\", Ytest[:5]\n", + "print(\"Predictions\", pred.get_int_labels()[:5])\n", + "print(\"Ground Truth\", Ytest[:5])\n", "\n", "from shogun import MulticlassAccuracy\n", "evaluator = MulticlassAccuracy()\n", "accuracy = evaluator.evaluate(pred, labels_test)\n", "\n", - "print \"Accuracy = %2.2f%%\" % (100*accuracy)" + "print(\"Accuracy = %2.2f%%\" % (100*accuracy))" ] }, { @@ -198,7 +198,7 @@ "source": [ "knn.set_k(13)\n", "multiple_k=knn.classify_for_multiple_k()\n", - "print multiple_k.shape" + "print(multiple_k.shape)" ] }, { @@ -216,8 +216,8 @@ }, "outputs": [], "source": [ - "for k in xrange(13):\n", - " print \"Accuracy for k=%d is %2.2f%%\" % (k+1, 100*np.mean(multiple_k[:,k]==Ytest))" + "for k in range(13):\n", + " print(\"Accuracy for k=%d is %2.2f%%\" % (k+1, 100*np.mean(multiple_k[:,k]==Ytest)))" ] }, { @@ -254,14 +254,14 @@ "knn.set_k(3)\n", "knn.set_knn_solver_type(KNN_BRUTE)\n", "pred = knn.apply_multiclass(feats_test)\n", - "print \"Standard KNN took %2.1fs\" % (Time.get_curtime() - start)\n", + "print(\"Standard KNN took %2.1fs\" % (Time.get_curtime() - start))\n", "\n", "\n", "start = Time.get_curtime()\n", "knn.set_k(3)\n", "knn.set_knn_solver_type(KNN_COVER_TREE)\n", "pred = knn.apply_multiclass(feats_test)\n", - "print \"Covertree KNN took %2.1fs\" % (Time.get_curtime() - start)\n" + "print(\"Covertree KNN took %2.1fs\" % (Time.get_curtime() - start))\n" ] }, { @@ -453,7 +453,7 @@ "evaluator = MulticlassAccuracy()\n", "accuracy = evaluator.evaluate(out, labels_test)\n", "\n", - "print \"Accuracy = %2.2f%%\" % (100*accuracy)" + "print(\"Accuracy = %2.2f%%\" % (100*accuracy))" ] }, { @@ -481,7 +481,7 @@ "evaluator = MulticlassAccuracy()\n", "accuracy = evaluator.evaluate(out, labels_rem)\n", "\n", - "print \"Accuracy = %2.2f%%\" % (100*accuracy)\n", + "print(\"Accuracy = %2.2f%%\" % (100*accuracy))\n", "\n", "idx=np.where(out.get_labels() != Yrem)[0]\n", "Xbad=Xrem[:,idx]\n", diff --git a/doc/ipython-notebooks/multiclass/Tree/DecisionTrees.ipynb b/doc/ipython-notebooks/multiclass/Tree/DecisionTrees.ipynb index fc2b7665604..1633755ccd4 100644 --- a/doc/ipython-notebooks/multiclass/Tree/DecisionTrees.ipynb +++ b/doc/ipython-notebooks/multiclass/Tree/DecisionTrees.ipynb @@ -173,10 +173,10 @@ "'High','Low','Medium']\n", "\n", "# print data\n", - "print 'Training Data Table : \\n'\n", - "print 'Income \\t\\t Age \\t\\t Education \\t\\t Marital Status \\t Usage'\n", - "for i in xrange(len(train_income)):\n", - "\tprint train_income[i]+' \\t\\t '+train_age[i]+' \\t\\t '+train_education[i]+' \\t\\t '+train_marital[i]+' \\t\\t '+train_usage[i]\n" + "print('Training Data Table : \\n')\n", + "print('Income \\t\\t Age \\t\\t Education \\t\\t Marital Status \\t Usage')\n", + "for i in range(len(train_income)):\n", + "\tprint(train_income[i]+' \\t\\t '+train_age[i]+' \\t\\t '+train_education[i]+' \\t\\t '+train_marital[i]+' \\t\\t '+train_usage[i])\n" ] }, { @@ -211,7 +211,7 @@ "\n", "\n", "# encode training data\n", - "for i in xrange(len(train_income)):\n", + "for i in range(len(train_income)):\n", "\ttrain_income[i] = income[train_income[i]]\n", "\ttrain_age[i] = age[train_age[i]]\n", "\ttrain_education[i] = education[train_education[i]]\n", @@ -284,10 +284,10 @@ "test_usage = ['Low','Medium','Low','High','High']\n", "\n", "# tabulate test data\n", - "print 'Test Data Table : \\n'\n", - "print 'Income \\t\\t Age \\t\\t Education \\t\\t Marital Status \\t Usage'\n", - "for i in xrange(len(test_income)):\n", - "\tprint test_income[i]+' \\t\\t '+test_age[i]+' \\t\\t '+test_education[i]+' \\t\\t '+test_marital[i]+' \\t\\t ?'\n" + "print('Test Data Table : \\n')\n", + "print('Income \\t\\t Age \\t\\t Education \\t\\t Marital Status \\t Usage')\n", + "for i in range(len(test_income)):\n", + "\tprint(test_income[i]+' \\t\\t '+test_age[i]+' \\t\\t '+test_education[i]+' \\t\\t '+test_marital[i]+' \\t\\t ?')\n" ] }, { @@ -311,7 +311,7 @@ "outputs": [], "source": [ "# encode test data\n", - "for i in xrange(len(test_income)):\n", + "for i in range(len(test_income)):\n", "\ttest_income[i] = income[test_income[i]]\n", "\ttest_age[i] = age[test_age[i]]\n", "\ttest_education[i] = education[test_education[i]]\n", @@ -349,7 +349,7 @@ "output_labels=[0]*len(output)\n", "\n", "# decode back test data for printing\n", - "for i in xrange(len(test_income)):\n", + "for i in range(len(test_income)):\n", "\ttest_income[i]=income.keys()[income.values().index(test_income[i])]\n", "\ttest_age[i]=age.keys()[age.values().index(test_age[i])]\n", "\ttest_education[i]=education.keys()[education.values().index(test_education[i])]\n", @@ -357,10 +357,10 @@ "\toutput_labels[i]=usage.keys()[usage.values().index(output[i])]\n", "\n", "# print output data\n", - "print 'Final Test Data Table : \\n'\n", - "print 'Income \\t Age \\t Education \\t Marital Status \\t Usage(predicted)'\n", - "for i in xrange(len(test_income)):\n", - "\tprint test_income[i]+' \\t '+test_age[i]+' \\t '+test_education[i]+' \\t '+test_marital[i]+' \\t\\t '+output_labels[i]" + "print('Final Test Data Table : \\n')\n", + "print('Income \\t Age \\t Education \\t Marital Status \\t Usage(predicted)')\n", + "for i in range(len(test_income)):\n", + "\tprint(test_income[i]+' \\t '+test_age[i]+' \\t '+test_education[i]+' \\t '+test_marital[i]+' \\t\\t '+output_labels[i])" ] }, { @@ -565,7 +565,7 @@ "\n", "# Shogun object for calculating multiclass accuracy\n", "accuracy = MulticlassAccuracy()\n", - "print 'Accuracy : ' + str(accuracy.evaluate(output, test_labels))" + "print('Accuracy : ' + str(accuracy.evaluate(output, test_labels)))" ] }, { @@ -601,7 +601,7 @@ " train_labels = labels[indices]\n", " \n", " average_error = 0\n", - " for i in xrange(num_repetitions):\n", + " for i in range(num_repetitions):\n", " output = ID3_routine(train_features, train_labels)\n", " average_error = average_error + (1-accuracy.evaluate(output, test_labels))\n", " \n", @@ -1142,7 +1142,7 @@ "\n", "# Shogun object for calculating multiclass accuracy\n", "accuracy = MulticlassAccuracy()\n", - "print 'Accuracy : ' + str(accuracy.evaluate(output, test_labels))" + "print('Accuracy : ' + str(accuracy.evaluate(output, test_labels)))" ] }, { diff --git a/doc/ipython-notebooks/multiclass/multiclass_reduction.ipynb b/doc/ipython-notebooks/multiclass/multiclass_reduction.ipynb index 22e3c21d651..bbfedd42b8e 100644 --- a/doc/ipython-notebooks/multiclass/multiclass_reduction.ipynb +++ b/doc/ipython-notebooks/multiclass/multiclass_reduction.ipynb @@ -231,9 +231,9 @@ " evaluator = MulticlassAccuracy()\n", " acc = evaluator.evaluate(pred_test, lab_test)\n", "\n", - " print \"training time: %.4f\" % t_train\n", - " print \"testing time: %.4f\" % t_test\n", - " print \"accuracy: %.4f\" % acc" + " print(\"training time: %.4f\" % t_train)\n", + " print(\"testing time: %.4f\" % t_test)\n", + " print(\"accuracy: %.4f\" % acc)" ], "language": "python", "metadata": {}, @@ -250,12 +250,12 @@ "cell_type": "code", "collapsed": false, "input": [ - "print \"\\nOne-vs-Rest\"\n", - "print \"=\"*60\n", + "print(\"\\nOne-vs-Rest\")\n", + "print(\"=\"*60)\n", "evaluate(MulticlassOneVsRestStrategy(), 5.0)\n", "\n", - "print \"\\nOne-vs-One\"\n", - "print \"=\"*60\n", + "print(\"\\nOne-vs-One\")\n", + "print(\"=\"*60)\n", "evaluate(MulticlassOneVsOneStrategy(), 2.0)" ], "language": "python", @@ -288,9 +288,9 @@ "evaluator = MulticlassAccuracy()\n", "acc = evaluator.evaluate(pred_test, lab_test)\n", "\n", - "print \"training time: %.4f\" % t_train\n", - "print \"testing time: %.4f\" % t_test\n", - "print \"accuracy: %.4f\" % acc" + "print(\"training time: %.4f\" % t_train)\n", + "print(\"testing time: %.4f\" % t_test)\n", + "print(\"accuracy: %.4f\" % acc)" ], "language": "python", "metadata": {}, @@ -451,8 +451,8 @@ "input": [ "from shogun import ECOCStrategy, ECOCRandomDenseEncoder, ECOCLLBDecoder\n", "\n", - "print \"\\nRandom Dense Encoder + Margin Loss based Decoder\"\n", - "print \"=\"*60\n", + "print(\"\\nRandom Dense Encoder + Margin Loss based Decoder\")\n", + "print(\"=\"*60)\n", "evaluate(ECOCStrategy(ECOCRandomDenseEncoder(), ECOCLLBDecoder()), 2.0)" ], "language": "python", @@ -504,12 +504,12 @@ " evaluator = MulticlassAccuracy()\n", " acc = evaluator.evaluate(pred_test, lab_test)\n", "\n", - " print \"training time: %.4f\" % t_train\n", - " print \"testing time: %.4f\" % t_test\n", - " print \"accuracy: %.4f\" % acc\n", + " print(\"training time: %.4f\" % t_train)\n", + " print(\"testing time: %.4f\" % t_test)\n", + " print(\"accuracy: %.4f\" % acc)\n", "\n", - "print \"\\nOne-vs-Rest\"\n", - "print \"=\"*60\n", + "print(\"\\nOne-vs-Rest\")\n", + "print(\"=\"*60)\n", "evaluate_multiclass_kernel(MulticlassOneVsRestStrategy())\n", "\n" ], @@ -549,29 +549,29 @@ "gmm.set_nth_cov(array([[1.0,0.0],[0.0,1.0]]),3)\n", "\n", "gmm.set_coef(array([1.0,0.0,0.0,0.0]))\n", - "x0=array([gmm.sample() for i in xrange(num)]).T\n", - "x0t=array([gmm.sample() for i in xrange(num)]).T\n", + "x0=array([gmm.sample() for i in range(num)]).T\n", + "x0t=array([gmm.sample() for i in range(num)]).T\n", "\n", "gmm.set_coef(array([0.0,1.0,0.0,0.0]))\n", - "x1=array([gmm.sample() for i in xrange(num)]).T\n", - "x1t=array([gmm.sample() for i in xrange(num)]).T\n", + "x1=array([gmm.sample() for i in range(num)]).T\n", + "x1t=array([gmm.sample() for i in range(num)]).T\n", "\n", "gmm.set_coef(array([0.0,0.0,1.0,0.0]))\n", - "x2=array([gmm.sample() for i in xrange(num)]).T\n", - "x2t=array([gmm.sample() for i in xrange(num)]).T\n", + "x2=array([gmm.sample() for i in range(num)]).T\n", + "x2t=array([gmm.sample() for i in range(num)]).T\n", "\n", "gmm.set_coef(array([0.0,0.0,0.0,1.0]))\n", - "x3=array([gmm.sample() for i in xrange(num)]).T\n", - "x3t=array([gmm.sample() for i in xrange(num)]).T\n", + "x3=array([gmm.sample() for i in range(num)]).T\n", + "x3t=array([gmm.sample() for i in range(num)]).T\n", "\n", "\n", "traindata=concatenate((x0,x1,x2,x3), axis=1)\n", "testdata=concatenate((x0t,x1t,x2t,x3t), axis=1)\n", "\n", - "l0 = array([0.0 for i in xrange(num)])\n", - "l1 = array([1.0 for i in xrange(num)])\n", - "l2 = array([2.0 for i in xrange(num)])\n", - "l3 = array([3.0 for i in xrange(num)])\n", + "l0 = array([0.0 for i in range(num)])\n", + "l1 = array([1.0 for i in range(num)])\n", + "l2 = array([2.0 for i in range(num)])\n", + "l3 = array([3.0 for i in range(num)])\n", "\n", "trainlab=concatenate((l0,l1,l2,l3))\n", "testlab=concatenate((l0,l1,l2,l3))" @@ -704,7 +704,7 @@ "z10=sub_out10.get_labels().reshape((size, size))\n", "z11=sub_out11.get_labels().reshape((size, size))\n", "\n", - "no_color=array([5.0 for i in xrange(num)])\n", + "no_color=array([5.0 for i in range(num)])\n", "\n", "figure(figsize=(20,5))\n", "subplot(131, title=\"Submachine 1\") #plot submachine and traindata\n", diff --git a/doc/ipython-notebooks/neuralnets/autoencoders.ipynb b/doc/ipython-notebooks/neuralnets/autoencoders.ipynb index 8444fc9e03e..05544ae0c8d 100644 --- a/doc/ipython-notebooks/neuralnets/autoencoders.ipynb +++ b/doc/ipython-notebooks/neuralnets/autoencoders.ipynb @@ -317,7 +317,7 @@ "predictions = nn.apply_multiclass(Xtest)\n", "accuracy = MulticlassAccuracy().evaluate(predictions, Ytest) * 100\n", "\n", - "print \"Classification accuracy on the test set =\", accuracy, \"%\"" + "print(\"Classification accuracy on the test set =\", accuracy, \"%\")" ] }, { @@ -441,7 +441,7 @@ "predictions = conv_nn.apply_multiclass(Xtest)\n", "accuracy = MulticlassAccuracy().evaluate(predictions, Ytest) * 100\n", "\n", - "print \"Classification accuracy on the test set =\", accuracy, \"%\"" + "print(\"Classification accuracy on the test set =\", accuracy, \"%\")" ] }, { diff --git a/doc/ipython-notebooks/neuralnets/neuralnets_digits.ipynb b/doc/ipython-notebooks/neuralnets/neuralnets_digits.ipynb index 02e056b18cd..f9b58d7aba6 100644 --- a/doc/ipython-notebooks/neuralnets/neuralnets_digits.ipynb +++ b/doc/ipython-notebooks/neuralnets/neuralnets_digits.ipynb @@ -252,7 +252,7 @@ "net_no_reg.train(Xtrain) # this might take a while, depending on your machine\n", "\n", "# compute accuracy on the validation set\n", - "print \"Without regularization, accuracy on the validation set =\", compute_accuracy(net_no_reg, Xval, Yval), \"%\"" + "print(\"Without regularization, accuracy on the validation set =\", compute_accuracy(net_no_reg, Xval, Yval), \"%\")" ] }, { @@ -282,7 +282,7 @@ "net_l2.train(Xtrain) # this might take a while, depending on your machine\n", "\n", "# compute accuracy on the validation set\n", - "print \"With L2 regularization, accuracy on the validation set =\", compute_accuracy(net_l2, Xval, Yval), \"%\"" + "print(\"With L2 regularization, accuracy on the validation set =\", compute_accuracy(net_l2, Xval, Yval), \"%\")" ] }, { @@ -312,7 +312,7 @@ "net_l1.train(Xtrain) # this might take a while, depending on your machine\n", "\n", "# compute accuracy on the validation set\n", - "print \"With L1 regularization, accuracy on the validation set =\", compute_accuracy(net_l1, Xval, Yval), \"%\"" + "print(\"With L1 regularization, accuracy on the validation set =\", compute_accuracy(net_l1, Xval, Yval), \"%\")" ] }, { @@ -357,7 +357,7 @@ "net_dropout.train(Xtrain) # this might take a while, depending on your machine\n", "\n", "# compute accuracy on the validation set\n", - "print \"With dropout, accuracy on the validation set =\", compute_accuracy(net_dropout, Xval, Yval), \"%\"" + "print(\"With dropout, accuracy on the validation set =\", compute_accuracy(net_dropout, Xval, Yval), \"%\")" ] }, { @@ -449,7 +449,7 @@ "net_conv.train(Xtrain)\n", "\n", "# compute accuracy on the validation set\n", - "print \"With a convolutional network, accuracy on the validation set =\", compute_accuracy(net_conv, Xval, Yval), \"%\"" + "print(\"With a convolutional network, accuracy on the validation set =\", compute_accuracy(net_conv, Xval, Yval), \"%\")" ] }, { @@ -474,7 +474,7 @@ }, "outputs": [], "source": [ - "print \"Accuracy on the test set using the convolutional network =\", compute_accuracy(net_conv, Xtest, Ytest), \"%\"" + "print(\"Accuracy on the test set using the convolutional network =\", compute_accuracy(net_conv, Xtest, Ytest), \"%\")" ] }, { diff --git a/doc/ipython-notebooks/neuralnets/rbms_dbns.ipynb b/doc/ipython-notebooks/neuralnets/rbms_dbns.ipynb index 0e7a05fe47b..b2231a30980 100644 --- a/doc/ipython-notebooks/neuralnets/rbms_dbns.ipynb +++ b/doc/ipython-notebooks/neuralnets/rbms_dbns.ipynb @@ -418,7 +418,7 @@ "predictions = nn.apply_multiclass(RealFeatures(Xtest))\n", "accuracy = MulticlassAccuracy().evaluate(predictions, MulticlassLabels(Ytest)) * 100\n", "\n", - "print \"Classification accuracy on the test set =\", accuracy, \"%\"" + "print(\"Classification accuracy on the test set =\", accuracy, \"%\")" ], "language": "python", "metadata": {}, diff --git a/doc/ipython-notebooks/regression/Regression.ipynb b/doc/ipython-notebooks/regression/Regression.ipynb index 6b475bbf358..630b58399d8 100644 --- a/doc/ipython-notebooks/regression/Regression.ipynb +++ b/doc/ipython-notebooks/regression/Regression.ipynb @@ -145,8 +145,8 @@ "ls = LeastSquaresRegression(feats_train, labels_train)\n", "ls.train()\n", "w = ls.get_w()\n", - "print 'Weights:'\n", - "print w" + "print('Weights:')\n", + "print(w)" ] }, { @@ -202,7 +202,7 @@ "title(\"Squared error and output\")\n", "_ = plot(X_test,out, linewidth=2)\n", "gray()\n", - "_ = scatter(X_train,labels_train,c=ones(30) ,cmap=gray(), s=40)\n", + "_ = scatter(X_train,labels_train.get_labels(),c=ones(30) ,cmap=gray(), s=40)\n", "for i in range(50,80):\n", " plot([X_test[i],X_test[i]],[out[i],y_train[i-50]] , linewidth=2, color='red')\n", "p1 = Rectangle((0, 0), 1, 1, fc=\"r\")\n", @@ -254,7 +254,7 @@ "rr = LinearRidgeRegression(tau, feats_train, labels_train)\n", "rr.train()\n", "w = rr.get_w()\n", - "print w\n", + "print(w)\n", "out = rr.apply(feats_test).get_labels()" ] }, @@ -552,8 +552,8 @@ "feats_train.add_preprocessor(preprocessor)\n", "feats_train.apply_preprocessor()\n", "\n", - "print \"(No. of attributes, No. of samples) of data:\"\n", - "print feats_train.get_feature_matrix().shape" + "print(\"(No. of attributes, No. of samples) of data:\")\n", + "print(feats_train.get_feature_matrix().shape)" ] }, { @@ -870,15 +870,15 @@ " time_test=(time.clock() - t_start)\n", " time_nus.append(time_test)\n", "\n", - " print \"-\"*72 \n", - " print \"|\", \"%15s\" % 'Nu' ,\"|\", \"%15s\" % 'Epsilon',\"|\",\"%15s\" % 'Time (Nu)' ,\"|\", \"%15s\" % 'Time(Epsilon)' ,\"|\"\n", + " print(\"-\"*72 )\n", + " print(\"|\", \"%15s\" % 'Nu' ,\"|\", \"%15s\" % 'Epsilon',\"|\",\"%15s\" % 'Time (Nu)' ,\"|\", \"%15s\" % 'Time(Epsilon)' ,\"|\")\n", " for i in range(len(nus)):\n", - " print \"-\"*72 \n", - " print \"|\", \"%15s\" % nus[i] ,\"|\", \"%15s\" %epsilons[i],\"|\",\"%15s\" %time_nus[i] ,\"|\", \"%15s\" %time_eps[i] ,\"|\" \n", - " print \"-\"*72 \n", + " print( \"-\"*72 )\n", + " print( \"|\", \"%15s\" % nus[i] ,\"|\", \"%15s\" %epsilons[i],\"|\",\"%15s\" %time_nus[i] ,\"|\", \"%15s\" %time_eps[i] ,\"|\" )\n", + " print(\"-\"*72 )\n", " \n", "title_='SVR Performance on Boston Housing dataset'\n", - "print \"%50s\" %title_\n", + "print(\"%50s\" %title_)\n", "compare_svr(nus, epsilons)" ] }, diff --git a/doc/ipython-notebooks/statistical_testing/mmd_two_sample_testing.ipynb b/doc/ipython-notebooks/statistical_testing/mmd_two_sample_testing.ipynb index 2bedd3a02a9..dc6853c2d5b 100644 --- a/doc/ipython-notebooks/statistical_testing/mmd_two_sample_testing.ipynb +++ b/doc/ipython-notebooks/statistical_testing/mmd_two_sample_testing.ipynb @@ -220,9 +220,9 @@ }, "outputs": [], "source": [ - "print \"Gaussian vs. Laplace\"\n", - "print \"Sample means: %.2f vs %.2f\" % (np.mean(X), np.mean(Y))\n", - "print \"Samples variances: %.2f vs %.2f\" % (np.var(X), np.var(Y))" + "print(\"Gaussian vs. Laplace\")\n", + "print(\"Sample means: %.2f vs %.2f\" % (np.mean(X), np.mean(Y)))\n", + "print(\"Samples variances: %.2f vs %.2f\" % (np.var(X), np.var(Y)))" ] }, { @@ -282,8 +282,8 @@ "mmd.set_statistic_type(sg.ST_UNBIASED_FULL)\n", "statistic=unbiased_statistic=mmd.compute_statistic()\n", "\n", - "print \"%d x MMD_b[X,Y]^2=%.2f\" % (len(X), biased_statistic)\n", - "print \"%d x MMD_u[X,Y]^2=%.2f\" % (len(X), unbiased_statistic)" + "print(\"%d x MMD_b[X,Y]^2=%.2f\" % (len(X), biased_statistic))\n", + "print(\"%d x MMD_u[X,Y]^2=%.2f\" % (len(X), unbiased_statistic))" ] }, { @@ -308,25 +308,25 @@ "\n", "# compute p-value for computed test statistic\n", "p_value=mmd.compute_p_value(statistic)\n", - "print \"P-value of MMD value %.2f is %.2f\" % (statistic, p_value)\n", + "print(\"P-value of MMD value %.2f is %.2f\" % (statistic, p_value))\n", "\n", "# compute threshold for rejecting H_0 for a given test power\n", "alpha=0.05\n", "threshold=mmd.compute_threshold(alpha)\n", - "print \"Threshold for rejecting H0 with a test power of %.2f is %.2f\" % (alpha, threshold)\n", + "print(\"Threshold for rejecting H0 with a test power of %.2f is %.2f\" % (alpha, threshold))\n", "\n", "# performing the test by hand given the above results, note that those two are equivalent\n", "if statistic>threshold:\n", - " print \"H0 is rejected with confidence %.2f\" % alpha\n", + " print(\"H0 is rejected with confidence %.2f\" % alpha)\n", " \n", "if p_value Date: Wed, 29 Nov 2017 12:43:22 -0500 Subject: [PATCH 02/21] fix xval notebook --- doc/ipython-notebooks/evaluation/xval_modelselection.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/ipython-notebooks/evaluation/xval_modelselection.ipynb b/doc/ipython-notebooks/evaluation/xval_modelselection.ipynb index bbae6169be3..4faf11344c8 100644 --- a/doc/ipython-notebooks/evaluation/xval_modelselection.ipynb +++ b/doc/ipython-notebooks/evaluation/xval_modelselection.ipynb @@ -671,7 +671,7 @@ " result=CrossValidationResult.obtain_from_generic(result)\n", " print(\"-\"*80)\n", " print(\"|\", \"%30s\" % machine.get_name(),\"|\", \"%20s\" %metric.get_name(),\"|\",\"%20s\" %result.get_mean() ,\"|\" )\n", - "print(\"-\"*80") + "print(\"-\"*80)" ] }, { From bc89ca00981a6967310c90c8843a6d19f43c0f11 Mon Sep 17 00:00:00 2001 From: Viktor Gal Date: Thu, 30 Nov 2017 04:16:43 -0500 Subject: [PATCH 03/21] bump version to 6.1.3 --- NEWS | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/NEWS b/NEWS index b7a6ce87a05..87ed7de33c0 100644 --- a/NEWS +++ b/NEWS @@ -1,3 +1,10 @@ +2017-12-05 Viktor Gal + + * SHOGUN Release version 6.1.3 (libshogun 18.0, data 0.11, parameter 1) + + * Bugfixes: + - Port ipython notebooks to be python3 compatible [Viktor Gal] + 2017-11-29 Viktor Gal * SHOGUN Release version 6.1.2 (libshogun 18.0, data 0.11, parameter 1) From b9ac01c74fd57092c5a404d7ff89f3ee195eaed3 Mon Sep 17 00:00:00 2001 From: Sudarshan Kamath Date: Sun, 3 Dec 2017 23:21:32 +0530 Subject: [PATCH 04/21] Issue #4001: Updating MacOS installation in INSTALL.md --- doc/readme/INSTALL.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/readme/INSTALL.md b/doc/readme/INSTALL.md index b66fa038505..55b3cdad1e2 100644 --- a/doc/readme/INSTALL.md +++ b/doc/readme/INSTALL.md @@ -85,7 +85,7 @@ Install as Shogun is part of [homebrew-science](https://github.com/Homebrew/homebrew-science). Install the latest stable version as - sudo brew install homebrew/science/shogun + brew install homebrew/science/shogun ### Windows Shogun natively compiles under Windows using MSVC, see the [AppVeyor CI build](https://ci.appveyor.com/project/vigsterkr/shogun) and the [Windows section](#manual-windows) From ecd6a8f11ac52748e89d27c7fab7f43c1de39f05 Mon Sep 17 00:00:00 2001 From: Esben Sorig Date: Wed, 6 Dec 2017 12:21:26 +0000 Subject: [PATCH 05/21] Fix meta example parser bug in parallel builds. Solves github issue #3999 --- examples/meta/CMakeLists.txt | 17 ++++++++++++++--- examples/meta/generator/parse.py | 5 +++++ 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/examples/meta/CMakeLists.txt b/examples/meta/CMakeLists.txt index 9fd5374bdc6..b5460b86578 100644 --- a/examples/meta/CMakeLists.txt +++ b/examples/meta/CMakeLists.txt @@ -23,8 +23,18 @@ LIST(APPEND GENERATOR_DEPENDENCIES ${CMAKE_SOURCE_DIR}/examples/meta/generator/t FILE(GLOB TARGET_LANGUAGES ${CMAKE_SOURCE_DIR}/examples/meta/generator/targets/*.json) LIST(APPEND GENERATOR_DEPENDENCIES ${TARGET_LANGUAGES}) -# parser output -file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/parser_files) +# parser generation +set(GENERATED_PARSER_FILES) +LIST(APPEND GENERATED_PARSER_FILES ${CMAKE_CURRENT_BINARY_DIR}/parser_files/parsetab.py) +LIST(APPEND GENERATED_PARSER_FILES ${CMAKE_CURRENT_BINARY_DIR}/parser_files/lextab.py) +add_custom_command( + OUTPUT ${GENERATED_PARSER_FILES} + DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/generator/parse.py + COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}/parser_files + COMMAND ${PYTHON_EXECUTABLE} + ${CMAKE_CURRENT_SOURCE_DIR}/generator/parse.py + --parser_files_dir ${CMAKE_CURRENT_BINARY_DIR}/parser_files + --only_generate_parser_files) # list of interfaces for which we dont generate meta examples SET(DISABLED_INTERFACES INTERFACE_PERL) @@ -69,7 +79,8 @@ FOREACH(META_EXAMPLE ${META_EXAMPLES}) COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/generator/generate.py ${GENERATOR_FLAGS} COMMENT "Generating example ${EXAMPLE_NAME_WITH_DIR}" - DEPENDS ctags ${META_EXAMPLE} ${GENERATOR_DEPENDENCIES}) + DEPENDS ctags ${META_EXAMPLE} ${GENERATOR_DEPENDENCIES} + ${GENERATED_PARSER_FILES}) LIST(APPEND TRANSLATED_META_EXAMPLES ${EXAMPLE_LISTINGS}) ENDFOREACH() diff --git a/examples/meta/generator/parse.py b/examples/meta/generator/parse.py index e84247630dc..dfb8aea307e 100644 --- a/examples/meta/generator/parse.py +++ b/examples/meta/generator/parse.py @@ -328,8 +328,13 @@ def parse(programString, filePath, generatedFilesOutputDir=None): argparser.add_argument("--pretty", action="store_true", help="If specified, output is pretty printed") argparser.add_argument("path", nargs='?', help="Path to input file. If not specified input is read from stdin") argparser.add_argument("--parser_files_dir", nargs='?', help='Path to directory where generated parser and lexer files should be stored.') + argparser.add_argument('--only_generate_parser_files', action="store_true", help="If specified, generate the parser files and quit without parsing stdin or the file at 'path'") args = argparser.parse_args() + if args.only_generate_parser_files: + FastParser(args.parser_files_dir) + exit(0) + programString = "" filePath = "" From 8ed59a95f1bbe2d8e0eba766b40cf891c57ebc0d Mon Sep 17 00:00:00 2001 From: Viktor Gal Date: Thu, 7 Dec 2017 10:50:20 +0100 Subject: [PATCH 06/21] fix headers_list.txt generation in cmake and header installs fix CONFIG_PACKAGE_DIR path for ShogunConfig.cmake.in --- src/shogun/CMakeLists.txt | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/shogun/CMakeLists.txt b/src/shogun/CMakeLists.txt index 7dcf484f8b3..3748b443477 100644 --- a/src/shogun/CMakeLists.txt +++ b/src/shogun/CMakeLists.txt @@ -82,6 +82,9 @@ FOREACH(template ${LIBSHOGUN_SRC_TMP}) STRING(REGEX REPLACE ${EXT_CPP_TMP} ${EXT_CPP_PY} generator_script "${template}") STRING(REGEX REPLACE ".*/(.*).${EXT_CPP_TMP}" "\\1" generated_target "${template}") + IF (EXISTS ${CMAKE_CURRENT_BINARY_DIR}/headers_list.txt) + FILE(REMOVE ${CMAKE_CURRENT_BINARY_DIR}/headers_list.txt) + ENDIF() FOREACH(h ${LIBSHOGUN_HEADERS}) FILE(APPEND ${CMAKE_CURRENT_BINARY_DIR}/headers_list.txt "${h}\n") ENDFOREACH() @@ -499,6 +502,9 @@ INSTALL( COMPONENT headers FILES_MATCHING PATTERN "*.h" PATTERN "*.hpp" PATTERN "CMakeFiles*" EXCLUDE + PATTERN "*.dir" EXCLUDE + PATTERN "*Debug*" EXCLUDE + PATTERN "*Release*" EXCLUDE PATTERN ".settings" EXCLUDE) IF (LIB_INSTALL_DIR) @@ -521,7 +527,7 @@ INSTALL( INCLUDES DESTINATION ${INCLUDE_INSTALL_DIR} ) -set(CONFIG_PACKAGE_DIR ${SHOGUN_LIB_INSTALL}/cmake/shogun) +file(TO_CMAKE_PATH ${SHOGUN_LIB_INSTALL}/cmake/shogun CONFIG_PACKAGE_DIR) configure_package_config_file( ${CMAKE_SOURCE_DIR}/cmake/ShogunConfig.cmake.in ${CMAKE_CURRENT_BINARY_DIR}/ShogunConfig.cmake INSTALL_DESTINATION ${SHOGUN_LIB_INSTALL}/cmake/shogun From f67f0e4bb7639a2696c29af9e82a2d7abe6cb17d Mon Sep 17 00:00:00 2001 From: Viktor Gal Date: Thu, 7 Dec 2017 08:19:47 +0100 Subject: [PATCH 07/21] fix python interface compilation with MSVC the SWIG definition files itself does not depend on shogun::shogun target --- cmake/ShogunInterfaces.cmake | 2 +- src/interfaces/python/CMakeLists.txt | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/cmake/ShogunInterfaces.cmake b/cmake/ShogunInterfaces.cmake index 3730a71088d..cf923fc3a91 100644 --- a/cmake/ShogunInterfaces.cmake +++ b/cmake/ShogunInterfaces.cmake @@ -33,7 +33,7 @@ SET(INTERFACE_TARGET interface_${INTERFACE_NAME}) SET(INTERFACE_TARGET_SRC ${INTERFACE_TARGET}_src) ADD_CUSTOM_TARGET(${INTERFACE_TARGET_SRC} - DEPENDS shogun::shogun ${INTERFACE_FILES} + DEPENDS ${INTERFACE_FILES} COMMENT "copying SWIG files") INCLUDE(${SWIG_USE_FILE}) diff --git a/src/interfaces/python/CMakeLists.txt b/src/interfaces/python/CMakeLists.txt index a9f0ce62e40..c97761d95a0 100644 --- a/src/interfaces/python/CMakeLists.txt +++ b/src/interfaces/python/CMakeLists.txt @@ -29,7 +29,9 @@ include_directories(${PYTHON_INCLUDE_PATH} ${NUMPY_INCLUDE_DIRS}) #TODO: check for SWIG version where this bug has been applied already: # https://github.com/swig/swig/pull/70 -SET(SWIG_CXX_COMPILER_FLAGS "${SWIG_CXX_COMPILER_FLAGS} -Wno-c++11-narrowing") +IF (NOT MSVC) + SET(SWIG_CXX_COMPILER_FLAGS "${SWIG_CXX_COMPILER_FLAGS} -Wno-c++11-narrowing") +ENDIF() GENERATE_INTERFACE_TARGET(python ${CMAKE_CURRENT_SOURCE_DIR} ${PYTHON_LIBRARIES}) From 3f4cfc18cf9082aa1703716c241e893fbefec364 Mon Sep 17 00:00:00 2001 From: Viktor Gal Date: Thu, 7 Dec 2017 13:08:44 +0100 Subject: [PATCH 08/21] make sure shogun.dll is installed on windows --- src/shogun/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/src/shogun/CMakeLists.txt b/src/shogun/CMakeLists.txt index 3748b443477..8b00920e28c 100644 --- a/src/shogun/CMakeLists.txt +++ b/src/shogun/CMakeLists.txt @@ -523,6 +523,7 @@ INSTALL( TARGETS ${INSTALL_TARGETS} EXPORT ShogunTargets ARCHIVE DESTINATION ${SHOGUN_LIB_INSTALL} + RUNTIME DESTINATION ${SHOGUN_LIB_INSTALL} LIBRARY DESTINATION ${SHOGUN_LIB_INSTALL} INCLUDES DESTINATION ${INCLUDE_INSTALL_DIR} ) From 23c4a570ee2f3de5791c58ab9034be5c8c78cd31 Mon Sep 17 00:00:00 2001 From: Viktor Gal Date: Thu, 7 Dec 2017 13:25:11 +0100 Subject: [PATCH 09/21] fix std::isinf/isfinite/isnan detection --- src/shogun/CMakeLists.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/shogun/CMakeLists.txt b/src/shogun/CMakeLists.txt index 8b00920e28c..36e87eca2a4 100644 --- a/src/shogun/CMakeLists.txt +++ b/src/shogun/CMakeLists.txt @@ -229,13 +229,13 @@ CHECK_FUNCTION_EXISTS(isinf HAVE_ISINF) CHECK_FUNCTION_EXISTS(isnan HAVE_ISNAN) CHECK_CXX_SOURCE_COMPILES( - "#include \nint main() { return std::isinf( 0 ); }\n" + "#include \nint main() { return std::isinf( 0.0 ); }\n" HAVE_STD_ISINF ) CHECK_CXX_SOURCE_COMPILES( - "#include \nint main() { return std::isfinite( 0 ); }\n" + "#include \nint main() { return std::isfinite( 0.0 ); }\n" HAVE_STD_ISFINITE ) CHECK_CXX_SOURCE_COMPILES( - "#include \nint main() { return std::isnan( 0 ); }\n" + "#include \nint main() { return std::isnan( 0.0 ); }\n" HAVE_STD_ISNAN ) ############################ external dependencies From 87765b4d730cd6230fd8a9681d6f295f03a9f68f Mon Sep 17 00:00:00 2001 From: Viktor Gal Date: Thu, 7 Dec 2017 14:07:15 +0100 Subject: [PATCH 10/21] drop function calls in CMath --- src/shogun/CMakeLists.txt | 3 -- src/shogun/lib/config.h.in | 3 -- src/shogun/mathematics/Math.h | 94 ++++++++++------------------------- 3 files changed, 26 insertions(+), 74 deletions(-) diff --git a/src/shogun/CMakeLists.txt b/src/shogun/CMakeLists.txt index 36e87eca2a4..3364355fb97 100644 --- a/src/shogun/CMakeLists.txt +++ b/src/shogun/CMakeLists.txt @@ -217,10 +217,7 @@ CHECK_CXX_SYMBOL_EXISTS(fdopen "stdio.h" HAVE_FDOPEN) IF(UNIX) SET(CMAKE_REQUIRED_LIBRARIES m) ENDIF() -CHECK_FUNCTION_EXISTS(log2 HAVE_LOG2) -CHECK_FUNCTION_EXISTS(powl HAVE_POWL) CHECK_FUNCTION_EXISTS(lgammal HAVE_LGAMMAL) -CHECK_FUNCTION_EXISTS(sqrtl HAVE_SQRTL) CHECK_FUNCTION_EXISTS(finite HAVE_FPCLASS) CHECK_FUNCTION_EXISTS(fpclass HAVE_FPCLASS) diff --git a/src/shogun/lib/config.h.in b/src/shogun/lib/config.h.in index 2cf9d91a036..47e11922a54 100644 --- a/src/shogun/lib/config.h.in +++ b/src/shogun/lib/config.h.in @@ -32,10 +32,7 @@ #cmakedefine HAVE_ARPREC 1 #cmakedefine USE_META_INTEGRATION_TESTS -#cmakedefine HAVE_POWL 1 #cmakedefine HAVE_LGAMMAL 1 -#cmakedefine HAVE_SQRTL 1 -#cmakedefine HAVE_LOG2 1 #cmakedefine USE_LOGCACHE 1 #cmakedefine USE_LOGSUMARRAY 1 diff --git a/src/shogun/mathematics/Math.h b/src/shogun/mathematics/Math.h index 401e7b7d326..1f5d3290a32 100644 --- a/src/shogun/mathematics/Math.h +++ b/src/shogun/mathematics/Math.h @@ -31,7 +31,7 @@ #ifndef _USE_MATH_DEFINES #define _USE_MATH_DEFINES #endif -#include +#include #include #ifndef _WIN32 @@ -381,7 +381,7 @@ class CMath : public CSGObject */ static inline float64_t round(float64_t d) { - return ::floor(d+0.5); + return std::floor(d+0.5); } /** The value of x rounded downward (as a floating-point value) @@ -390,7 +390,7 @@ class CMath : public CSGObject */ static inline float64_t floor(float64_t d) { - return ::floor(d); + return std::floor(d); } /** The value of x rounded upward (as a floating-point value) @@ -399,7 +399,7 @@ class CMath : public CSGObject */ static inline float64_t ceil(float64_t d) { - return ::ceil(d); + return std::ceil(d); } /** Signum of input value @@ -440,34 +440,11 @@ class CMath : public CSGObject * @param x input value * @return x^0.5 */ - static inline float32_t sqrt(float32_t x) - { - return ::sqrtf(x); - } - - /** Computes square-root of the input - * @param x input value - * @return x^0.5 - */ - static inline float64_t sqrt(float64_t x) - { - return ::sqrt(x); - } - - /** Computes square-root of the input - * @param x input value - * @return x^0.5 - */ - static inline floatmax_t sqrt(floatmax_t x) - { - //fall back to double precision sqrt if sqrtl is not - //available -#ifdef HAVE_SQRTL - return ::sqrtl(x); -#else - return ::sqrt(x); -#endif - } + template + static inline T sqrt(T x) + { + return std::sqrt(x); + } /// x^0.5, x being a complex128_t COMPLEX128_STDMATH(sqrt) @@ -507,13 +484,7 @@ class CMath : public CSGObject */ static inline floatmax_t powl(floatmax_t x, floatmax_t n) { - //fall back to double precision pow if powl is not - //available -#ifdef HAVE_POWL - return ::powl((long double) x, (long double) n); -#else - return ::pow((double) x, (double) n); -#endif + return std::pow(x, n); } static inline int32_t pow(bool x, int32_t n) @@ -541,16 +512,7 @@ class CMath : public CSGObject */ static inline float64_t pow(float64_t x, int32_t n) { - if (n>=0) - { - float64_t result=1; - while (n--) - result*=x; - - return result; - } - else - return ::pow((double)x, (double)n); + return std::pow(x, n); } /** @@ -559,7 +521,7 @@ class CMath : public CSGObject */ static inline float64_t pow(float64_t x, float64_t n) { - return ::pow((double) x, (double) n); + return std::pow(x, n); } /** @@ -604,7 +566,7 @@ class CMath : public CSGObject */ static inline float64_t exp(float64_t x) { - return ::exp((double) x); + return std::exp(x); } /// exp(x), x being a complex128_t @@ -620,7 +582,7 @@ class CMath : public CSGObject */ static inline float64_t tan(float64_t x) { - return ::tan((double) x); + return std::tan(x); } /// tan(x), x being a complex128_t @@ -632,7 +594,7 @@ class CMath : public CSGObject */ static inline float64_t atan(float64_t x) { - return ::atan((double) x); + return std::atan(x); } /// atan(x), x being a complex128_t not implemented @@ -645,7 +607,7 @@ class CMath : public CSGObject */ static inline float64_t atan2(float64_t y, float64_t x) { - return ::atan2((double) y, (double) x); + return std::atan2(y, x); } /// atan2(x), x being a complex128_t not implemented @@ -657,7 +619,7 @@ class CMath : public CSGObject */ static inline float64_t tanh(float64_t x) { - return ::tanh((double) x); + return std::tanh(x); } /// tanh(x), x being a complex128_t @@ -669,7 +631,7 @@ class CMath : public CSGObject */ static inline float64_t sin(float64_t x) { - return ::sin(x); + return std::sin(x); } /// sin(x), x being a complex128_t @@ -681,7 +643,7 @@ class CMath : public CSGObject */ static inline float64_t asin(float64_t x) { - return ::asin(x); + return std::asin(x); } /// asin(x), x being a complex128_t not implemented @@ -693,7 +655,7 @@ class CMath : public CSGObject */ static inline float64_t sinh(float64_t x) { - return ::sinh(x); + return std::sinh(x); } /// sinh(x), x being a complex128_t @@ -705,7 +667,7 @@ class CMath : public CSGObject */ static inline float64_t cos(float64_t x) { - return ::cos(x); + return std::cos(x); } /// cos(x), x being a complex128_t @@ -717,7 +679,7 @@ class CMath : public CSGObject */ static inline float64_t acos(float64_t x) { - return ::acos(x); + return std::acos(x); } /// acos(x), x being a complex128_t not implemented @@ -729,7 +691,7 @@ class CMath : public CSGObject */ static inline float64_t cosh(float64_t x) { - return ::cosh(x); + return std::cosh(x); } /// cosh(x), x being a complex128_t @@ -746,7 +708,7 @@ class CMath : public CSGObject */ static inline float64_t log10(float64_t v) { - return ::log(v)/::log(10.0); + return std::log10(v); } /// log10(x), x being a complex128_t @@ -758,11 +720,7 @@ class CMath : public CSGObject */ static inline float64_t log2(float64_t v) { -#ifdef HAVE_LOG2 - return ::log2(v); -#else - return ::log(v)/::log(2.0); -#endif //HAVE_LOG2 + return std::log2(v); } /** Computes natural logarithm input @@ -771,7 +729,7 @@ class CMath : public CSGObject */ static inline float64_t log(float64_t v) { - return ::log(v); + return std::log(v); } /// log(x), x being a complex128_t From 4a2e87a76568b4ba67570de4629502841ee85d59 Mon Sep 17 00:00:00 2001 From: Viktor Gal Date: Thu, 7 Dec 2017 14:15:41 +0100 Subject: [PATCH 11/21] drop isfinite/isnan function workarounds as we are std::c++11 --- src/shogun/CMakeLists.txt | 19 ------------------ src/shogun/lib/config.h.in | 34 --------------------------------- src/shogun/mathematics/Math.cpp | 31 ------------------------------ src/shogun/mathematics/Math.h | 16 ---------------- 4 files changed, 100 deletions(-) diff --git a/src/shogun/CMakeLists.txt b/src/shogun/CMakeLists.txt index 3364355fb97..9e797869e9f 100644 --- a/src/shogun/CMakeLists.txt +++ b/src/shogun/CMakeLists.txt @@ -207,9 +207,6 @@ ENDIF((NOT CYGWIN) AND (NOT DISABLE_SSE)) FIND_PACKAGE(CxaDemangle) ############################ std lib functions include (CheckCXXSymbolExists) -CHECK_CXX_SYMBOL_EXISTS(isfinite "cmath" HAVE_DECL_ISFINITE) -CHECK_CXX_SYMBOL_EXISTS(isinf "cmath" HAVE_DECL_ISINF) -CHECK_CXX_SYMBOL_EXISTS(isnan "cmath" HAVE_DECL_ISNAN) CHECK_CXX_SYMBOL_EXISTS(signgam "cmath" HAVE_DECL_SIGNGAM) CHECK_CXX_SYMBOL_EXISTS(fdopen "stdio.h" HAVE_FDOPEN) @@ -219,22 +216,6 @@ IF(UNIX) ENDIF() CHECK_FUNCTION_EXISTS(lgammal HAVE_LGAMMAL) -CHECK_FUNCTION_EXISTS(finite HAVE_FPCLASS) -CHECK_FUNCTION_EXISTS(fpclass HAVE_FPCLASS) -CHECK_FUNCTION_EXISTS(isfinite HAVE_ISFINITE) -CHECK_FUNCTION_EXISTS(isinf HAVE_ISINF) -CHECK_FUNCTION_EXISTS(isnan HAVE_ISNAN) - -CHECK_CXX_SOURCE_COMPILES( - "#include \nint main() { return std::isinf( 0.0 ); }\n" - HAVE_STD_ISINF ) -CHECK_CXX_SOURCE_COMPILES( - "#include \nint main() { return std::isfinite( 0.0 ); }\n" - HAVE_STD_ISFINITE ) -CHECK_CXX_SOURCE_COMPILES( - "#include \nint main() { return std::isnan( 0.0 ); }\n" - HAVE_STD_ISNAN ) - ############################ external dependencies ####### LINALG diff --git a/src/shogun/lib/config.h.in b/src/shogun/lib/config.h.in index 47e11922a54..efda983ba5c 100644 --- a/src/shogun/lib/config.h.in +++ b/src/shogun/lib/config.h.in @@ -56,40 +56,6 @@ #cmakedefine USE_EIGEN3_EIGSLV 1 #cmakedefine USE_VIENNACL_EIGSLV 1 -/* Define to 1 if you have the declaration of `isfinite', and to 0 if you - don't. */ -#cmakedefine HAVE_DECL_ISFINITE 1 - -/* Define to 1 if you have the declaration of `isinf', and to 0 if you don't. */ -#cmakedefine HAVE_DECL_ISINF 1 - -/* Define to 1 if you have the declaration of `isnan', and to 0 if you don't. */ -#cmakedefine HAVE_DECL_ISNAN 1 - -/* Define to 1 if you have the `finite' function. */ -#cmakedefine HAVE_FINITE 1 - -/* Define to 1 if you have the `fpclass' function. */ -#cmakedefine HAVE_FPCLASS 1 - -/* Define to 1 if you have the `isfinite' function. */ -#cmakedefine HAVE_ISFINITE 1 - -/* Define to 1 if you have the `isinf' function. */ -#cmakedefine HAVE_ISINF 1 - -/* Define to 1 if you have the `isnan' function. */ -#cmakedefine HAVE_ISNAN 1 - -/* Define to 1 if you have the `std::isfinite' function. */ -#cmakedefine HAVE_STD_ISFINITE 1 - -/* Define to 1 if you have the `std::isinf' function. */ -#cmakedefine HAVE_STD_ISINF 1 - -/* Define to 1 if you have the `std::isnan' function. */ -#cmakedefine HAVE_STD_ISNAN 1 - #cmakedefine HAVE_DECL_SIGNGAM 1 #cmakedefine HAVE_FDOPEN 1 diff --git a/src/shogun/mathematics/Math.cpp b/src/shogun/mathematics/Math.cpp index 604387f8ea2..d92990fe49f 100644 --- a/src/shogun/mathematics/Math.cpp +++ b/src/shogun/mathematics/Math.cpp @@ -209,48 +209,17 @@ void CMath::linspace(float64_t* output, float64_t start, float64_t end, int32_t int CMath::is_nan(double f) { -#ifndef HAVE_STD_ISNAN -#if (HAVE_DECL_ISNAN == 1) || defined(HAVE_ISNAN) - return ::isnan(f); -#else - return ((f != f) ? 1 : 0); -#endif // #if (HAVE_DECL_ISNAN == 1) || defined(HAVE_ISNAN) -#else return std::isnan(f); -#endif // #ifndef HAVE_STD_ISNAN } int CMath::is_infinity(double f) { -#ifndef HAVE_STD_ISINF -#if (HAVE_DECL_ISINF == 1) || defined(HAVE_ISINF) - return ::isinf(f); -#elif defined(FPCLASS) - if (::fpclass(f) == FP_NINF) return -1; - else if (::fpclass(f) == FP_PINF) return 1; - else return 0; -#else - if ((f == f) && ((f - f) != 0.0)) return (f < 0.0 ? -1 : 1); - else return 0; -#endif // #if (HAVE_DECL_ISINF == 1) || defined(HAVE_ISINF) -#else return std::isinf(f); -#endif // #ifndef HAVE_STD_ISINF } int CMath::is_finite(double f) { -#ifndef HAVE_STD_ISFINITE -#if (HAVE_DECL_ISFINITE == 1) || defined(HAVE_ISFINITE) - return ::isfinite(f); -#elif defined(HAVE_FINITE) - return ::finite(f); -#else - return ((!CMath::is_nan(f) && !CMath::is_infinity(f)) ? 1 : 0); -#endif // #if (HAVE_DECL_ISFINITE == 1) || defined(HAVE_ISFINITE) -#else return std::isfinite(f); -#endif // #ifndef HAVE_STD_ISFINITE } bool CMath::strtof(const char* str, float32_t* float_result) diff --git a/src/shogun/mathematics/Math.h b/src/shogun/mathematics/Math.h index 1f5d3290a32..5f3492d2c54 100644 --- a/src/shogun/mathematics/Math.h +++ b/src/shogun/mathematics/Math.h @@ -46,26 +46,10 @@ #include #endif -/// workaround for log2 being a define on cygwin -#ifdef log2 -#define cygwin_log2 log2 -#undef log2 -#endif - #ifndef M_PI #define M_PI 3.14159265358979323846 #endif -#ifdef _WIN32 -#ifndef isnan -#define isnan _isnan -#endif - -#ifndef isfinite -#define isfinite _isfinite -#endif -#endif //_WIN32 - /* Size of RNG seed */ #define RNG_SEED_SIZE 256 From e30d2ebdc68f1f83552f47d45d2f7981a75e4568 Mon Sep 17 00:00:00 2001 From: Viktor Gal Date: Thu, 7 Dec 2017 14:43:44 +0100 Subject: [PATCH 12/21] fix abs function calls in StreamingVwFeatures --- src/shogun/features/streaming/StreamingVwFeatures.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/shogun/features/streaming/StreamingVwFeatures.cpp b/src/shogun/features/streaming/StreamingVwFeatures.cpp index a67e190262d..b075782186b 100644 --- a/src/shogun/features/streaming/StreamingVwFeatures.cpp +++ b/src/shogun/features/streaming/StreamingVwFeatures.cpp @@ -112,8 +112,8 @@ void CStreamingVwFeatures::expand_if_required(float64_t*& vec, int32_t& len) float32_t CStreamingVwFeatures::real_weight(float32_t w, float32_t gravity) { float32_t wprime = 0; - if (gravity < fabsf(w)) - wprime = CMath::sign(w)*(fabsf(w) - gravity); + if (gravity < CMath::abs(w)) + wprime = CMath::sign(w)*(CMath::abs(w) - gravity); return wprime; } @@ -351,7 +351,7 @@ void CStreamingVwFeatures::add_to_dense_vec(float32_t alpha, VwExample* &ex, flo for (vw_size_t* i = ex->indices.begin; i != ex->indices.end; i++) { for (VwFeature* f = ex->atomics[*i].begin; f != ex->atomics[*i].end; f++) - vec2[f->weight_index & env->thread_mask] += alpha * abs(f->x); + vec2[f->weight_index & env->thread_mask] += alpha * CMath::abs(f->x); } } else From c3476620e019aed4b58b466c630f0c5e9023ecb1 Mon Sep 17 00:00:00 2001 From: Viktor Gal Date: Thu, 7 Dec 2017 15:03:14 +0100 Subject: [PATCH 13/21] drop some more math.h inclues and use c++ limits --- src/shogun/classifier/mkl/MKLMulticlassGradient.cpp | 1 - src/shogun/kernel/TStudentKernel.cpp | 1 - src/shogun/kernel/string/SpectrumRBFKernel.cpp | 1 - src/shogun/lib/tfhistogram/histogram.cpp | 13 ++++++------- src/shogun/preprocessor/HomogeneousKernelMap.cpp | 9 ++++----- 5 files changed, 10 insertions(+), 15 deletions(-) diff --git a/src/shogun/classifier/mkl/MKLMulticlassGradient.cpp b/src/shogun/classifier/mkl/MKLMulticlassGradient.cpp index 90811ae5e22..599b8a644e9 100644 --- a/src/shogun/classifier/mkl/MKLMulticlassGradient.cpp +++ b/src/shogun/classifier/mkl/MKLMulticlassGradient.cpp @@ -14,7 +14,6 @@ #include #include #include -#include #include using namespace shogun; diff --git a/src/shogun/kernel/TStudentKernel.cpp b/src/shogun/kernel/TStudentKernel.cpp index bf56e56c1c0..c955cd4049b 100644 --- a/src/shogun/kernel/TStudentKernel.cpp +++ b/src/shogun/kernel/TStudentKernel.cpp @@ -8,7 +8,6 @@ * Copyright (C) 2011 Berlin Institute of Technology and Max-Planck-Society */ -#include #include #include diff --git a/src/shogun/kernel/string/SpectrumRBFKernel.cpp b/src/shogun/kernel/string/SpectrumRBFKernel.cpp index f8f420d7484..40b8fd26a07 100644 --- a/src/shogun/kernel/string/SpectrumRBFKernel.cpp +++ b/src/shogun/kernel/string/SpectrumRBFKernel.cpp @@ -26,7 +26,6 @@ #include #include #include -#include #include diff --git a/src/shogun/lib/tfhistogram/histogram.cpp b/src/shogun/lib/tfhistogram/histogram.cpp index 1b43b77e242..2aa99ee6285 100644 --- a/src/shogun/lib/tfhistogram/histogram.cpp +++ b/src/shogun/lib/tfhistogram/histogram.cpp @@ -18,8 +18,7 @@ limitations under the License. #ifdef HAVE_TFLOGGER #include "histogram.h" -#include -#include +#include #include #include #include @@ -37,8 +36,8 @@ static std::vector* InitDefaultBucketsInner() { neg_buckets.push_back(-v); v *= 1.1; } - buckets.push_back(DBL_MAX); - neg_buckets.push_back(-DBL_MAX); + buckets.push_back(std::numeric_limits::max()); + neg_buckets.push_back(-std::numeric_limits::max()); std::reverse(neg_buckets.begin(), neg_buckets.end()); std::vector* result = new std::vector; result->insert(result->end(), neg_buckets.begin(), neg_buckets.end()); @@ -85,7 +84,7 @@ bool Histogram::DecodeFromProto(const HistogramProto& proto) { void Histogram::Clear() { min_ = bucket_limits_[bucket_limits_.size() - 1]; - max_ = -DBL_MAX; + max_ = -std::numeric_limits::max(); num_ = 0; sum_ = 0; sum_squares_ = 0; @@ -178,7 +177,7 @@ std::string Histogram::ToString() const { if (buckets_[b] <= 0.0) continue; sum += buckets_[b]; snprintf(buf, sizeof(buf), "[ %10.2g, %10.2g ) %7.0f %7.3f%% %7.3f%% ", - ((b == 0) ? -DBL_MAX : bucket_limits_[b - 1]), // left + ((b == 0) ? -std::numeric_limits::max() : bucket_limits_[b - 1]), // left bucket_limits_[b], // right buckets_[b], // count mult * buckets_[b], // percentage @@ -218,7 +217,7 @@ void Histogram::EncodeToProto(HistogramProto* proto, } if (proto->bucket_size() == 0.0) { // It's easier when we restore if we always have at least one bucket entry - proto->add_bucket_limit(DBL_MAX); + proto->add_bucket_limit(std::numeric_limits::max()); proto->add_bucket(0.0); } } diff --git a/src/shogun/preprocessor/HomogeneousKernelMap.cpp b/src/shogun/preprocessor/HomogeneousKernelMap.cpp index edae7ad6ce3..f1f12f8c1e5 100644 --- a/src/shogun/preprocessor/HomogeneousKernelMap.cpp +++ b/src/shogun/preprocessor/HomogeneousKernelMap.cpp @@ -9,10 +9,9 @@ */ #include -#include -#include -#include +#include +#include using namespace shogun; @@ -144,7 +143,7 @@ void CHomogeneousKernelMap::init() for (i = 0 ; i < m_numSubdivisions; ++i, mantissa += m_subdivision) { - x = ldexp (mantissa, exponent); + x = std::ldexp (mantissa, exponent); xgamma = CMath::pow (x, m_gamma); Lxgamma = L * xgamma; Llogx = L * CMath::log (x); @@ -309,7 +308,7 @@ SGVector CHomogeneousKernelMap::apply_to_vector(const SGVector= 0.0) ? +1.0 : -1.0; mantissa *= 2*sign; exponent -- ; From 6766f0d91ea79105dd61093e2cc8664d65027d59 Mon Sep 17 00:00:00 2001 From: Viktor Gal Date: Thu, 7 Dec 2017 15:07:34 +0100 Subject: [PATCH 14/21] define M_SQRT1_2 if not defined in Statistics --- src/shogun/mathematics/Statistics.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/shogun/mathematics/Statistics.cpp b/src/shogun/mathematics/Statistics.cpp index 439544e6eed..5993b4891bc 100644 --- a/src/shogun/mathematics/Statistics.cpp +++ b/src/shogun/mathematics/Statistics.cpp @@ -25,6 +25,10 @@ using namespace Eigen; using namespace shogun; +#ifndef M_SQRT1_2 +#define M_SQRT1_2 0.707106781186547524401 +#endif + float64_t CStatistics::variance(SGVector values) { REQUIRE(values.vlen>1, "Number of observations (%d) needs to be at least 1.\n", From ff21d29316656dd35dc111880f4f5575a95f3798 Mon Sep 17 00:00:00 2001 From: Viktor Gal Date: Thu, 7 Dec 2017 15:37:36 +0100 Subject: [PATCH 15/21] fix DirectorLinearMachine::get_features and DirectorContingencyTableEvaluation::evaluate --- src/shogun/evaluation/DirectorContingencyTableEvaluation.h | 2 +- src/shogun/machine/DirectorLinearMachine.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/shogun/evaluation/DirectorContingencyTableEvaluation.h b/src/shogun/evaluation/DirectorContingencyTableEvaluation.h index 98cab6f4e8d..85aafe1d4d2 100644 --- a/src/shogun/evaluation/DirectorContingencyTableEvaluation.h +++ b/src/shogun/evaluation/DirectorContingencyTableEvaluation.h @@ -41,7 +41,7 @@ IGNORE_IN_CLASSLIST class CDirectorContingencyTableEvaluation: public CContingen /** Evaluate */ virtual float64_t evaluate(CLabels* predicted, CLabels* ground_truth) { - CContingencyTableEvaluation::evaluate(predicted, ground_truth); + return CContingencyTableEvaluation::evaluate(predicted, ground_truth); } /** Computes custom score, not implemented diff --git a/src/shogun/machine/DirectorLinearMachine.h b/src/shogun/machine/DirectorLinearMachine.h index 1d3f1a867b3..e22821c5b89 100644 --- a/src/shogun/machine/DirectorLinearMachine.h +++ b/src/shogun/machine/DirectorLinearMachine.h @@ -73,7 +73,7 @@ IGNORE_IN_CLASSLIST class CDirectorLinearMachine : public CLinearMachine */ virtual CDotFeatures* get_features() { - CLinearMachine::get_features(); + return CLinearMachine::get_features(); } /** apply machine to data From c3aee574d048c073aabf6a97941bbd951202a516 Mon Sep 17 00:00:00 2001 From: Viktor Gal Date: Thu, 7 Dec 2017 15:51:57 +0100 Subject: [PATCH 16/21] fix python typemap when compiling with MSVC --- src/interfaces/python/swig_typemaps.i | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/interfaces/python/swig_typemaps.i b/src/interfaces/python/swig_typemaps.i index 2100e96be16..56e92dd98ec 100644 --- a/src/interfaces/python/swig_typemaps.i +++ b/src/interfaces/python/swig_typemaps.i @@ -352,7 +352,11 @@ template static bool array_to_numpy(PyObject* &obj, SGNDArray sg_array, int typecode) { int n = 1; +#ifdef _MSC_VER + npy_intp* dims = new npy_intp[sg_array.num_dims]; +#else npy_intp dims[sg_array.num_dims]; +#endif for (int i = 0; i < sg_array.num_dims; i++) { dims[i] = (npy_intp)sg_array.dims[i]; @@ -368,6 +372,9 @@ static bool array_to_numpy(PyObject* &obj, SGNDArray sg_array, int typecod descr, sg_array.num_dims, dims, NULL, (void*) copy, NPY_FARRAY | NPY_WRITEABLE, NULL); ((PyArrayObject*) obj)->flags |= NPY_OWNDATA; } +#ifdef _MSC_VER + delete[] dims; +#endif return descr!=NULL; } From bb7d12ebfd34388519c1a417c1abd4ec067d37c1 Mon Sep 17 00:00:00 2001 From: Viktor Gal Date: Thu, 7 Dec 2017 16:18:11 +0100 Subject: [PATCH 17/21] link the interface library with shogun-static on Windows --- cmake/ShogunInterfaces.cmake | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/cmake/ShogunInterfaces.cmake b/cmake/ShogunInterfaces.cmake index cf923fc3a91..a2643028c5e 100644 --- a/cmake/ShogunInterfaces.cmake +++ b/cmake/ShogunInterfaces.cmake @@ -43,8 +43,11 @@ IF(DEFINED TARGET_SWIGFLAGS) ENDIF() SET(SWIG_MODULE_${INTERFACE_NAME}_EXTRA_DEPS ${INTERFACE_FILES}) SWIG_ADD_MODULE(${INTERFACE_TARGET} ${INTERFACE_NAME} shogun.i sg_print_functions.cpp) -SWIG_LINK_LIBRARIES(${INTERFACE_TARGET} shogun::shogun ${INTERFACE_LIBRARIES}) - +IF (WIN32) + SWIG_LINK_LIBRARIES(${INTERFACE_TARGET} shogun::shogun-static ${INTERFACE_LIBRARIES}) +ELSE () + SWIG_LINK_LIBRARIES(${INTERFACE_TARGET} shogun::shogun ${INTERFACE_LIBRARIES}) +ENDIF () #get_cmake_property(_variableNames VARIABLES) #foreach (_variableName ${_variableNames}) From 4e5e37304595c51afc304990a6b4a1fe6ba39535 Mon Sep 17 00:00:00 2001 From: Viktor Gal Date: Thu, 7 Dec 2017 17:39:06 +0100 Subject: [PATCH 18/21] update windows related installation manual --- doc/readme/INSTALL.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/doc/readme/INSTALL.md b/doc/readme/INSTALL.md index 55b3cdad1e2..9a8116d1dfd 100644 --- a/doc/readme/INSTALL.md +++ b/doc/readme/INSTALL.md @@ -38,7 +38,7 @@ or to get just the library: conda install -c conda-forge shogun-cpp -These packages include most of the optional dependencies and are currently available for Linux and MacOS; we're [working on a Windows build](https://github.com/conda-forge/shogun-cpp-feedstock/issues/1). +These packages include most of the optional dependencies and are currently available for Linux, MacOS and Windows. ### Ubuntu ppa We are working on integrating Shogun with Debian/Ubuntu. @@ -88,8 +88,7 @@ Install the latest stable version as brew install homebrew/science/shogun ### Windows -Shogun natively compiles under Windows using MSVC, see the [AppVeyor CI build](https://ci.appveyor.com/project/vigsterkr/shogun) and the [Windows section](#manual-windows) -We currently do not support a binary installer. +Shogun natively compiles under Windows using MSVC, see the [AppVeyor CI build](https://ci.appveyor.com/project/vigsterkr/shogun) and the [Windows section](#manual-windows). We currently only support binary packages via conda. If you are interested in packaging, documenting, or contributing otherwise, please contact us. ## Docker images From 72991f3e1a83d14cd5fa2117674c5d8cb4fb6142 Mon Sep 17 00:00:00 2001 From: Viktor Gal Date: Thu, 7 Dec 2017 16:27:49 +0100 Subject: [PATCH 19/21] add changelog for 6.1.3 --- NEWS | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/NEWS b/NEWS index 87ed7de33c0..74a27cf3eb6 100644 --- a/NEWS +++ b/NEWS @@ -2,8 +2,15 @@ * SHOGUN Release version 6.1.3 (libshogun 18.0, data 0.11, parameter 1) + * Features: + - Drop all function calls [Viktor Gal] + - Use c++11 std::isnan, std:isfinite, std::isinf [Viktor Gal] * Bugfixes: - Port ipython notebooks to be python3 compatible [Viktor Gal] + - Use the shogun-static library on Windows when linking the interface library [Viktor Gal] + - Fix python typemap when compiling with MSVC [Viktor Gal] + - Fix ShogunConfig.cmake paths [Viktor Gal] + - Fix meta example parser bug in parallel builds [Esben Sørig] 2017-11-29 Viktor Gal From 5f8403d5e2ce582afd2999c83a8be53c79c1ba7f Mon Sep 17 00:00:00 2001 From: Viktor Gal Date: Thu, 7 Dec 2017 18:31:58 +0100 Subject: [PATCH 20/21] fix whitespaces in NEWS --- NEWS | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/NEWS b/NEWS index 74a27cf3eb6..c86b4a66636 100644 --- a/NEWS +++ b/NEWS @@ -3,8 +3,8 @@ * SHOGUN Release version 6.1.3 (libshogun 18.0, data 0.11, parameter 1) * Features: - - Drop all function calls [Viktor Gal] - - Use c++11 std::isnan, std:isfinite, std::isinf [Viktor Gal] + - Drop all function calls [Viktor Gal] + - Use c++11 std::isnan, std:isfinite, std::isinf [Viktor Gal] * Bugfixes: - Port ipython notebooks to be python3 compatible [Viktor Gal] - Use the shogun-static library on Windows when linking the interface library [Viktor Gal] From 459b968c9fec93fe8245c9c61d9496e40c7fe6c1 Mon Sep 17 00:00:00 2001 From: Viktor Gal Date: Thu, 7 Dec 2017 18:38:10 +0100 Subject: [PATCH 21/21] revert tfhistogram changes sad commit --- src/shogun/lib/tfhistogram/histogram.cpp | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/shogun/lib/tfhistogram/histogram.cpp b/src/shogun/lib/tfhistogram/histogram.cpp index 2aa99ee6285..1b43b77e242 100644 --- a/src/shogun/lib/tfhistogram/histogram.cpp +++ b/src/shogun/lib/tfhistogram/histogram.cpp @@ -18,7 +18,8 @@ limitations under the License. #ifdef HAVE_TFLOGGER #include "histogram.h" -#include +#include +#include #include #include #include @@ -36,8 +37,8 @@ static std::vector* InitDefaultBucketsInner() { neg_buckets.push_back(-v); v *= 1.1; } - buckets.push_back(std::numeric_limits::max()); - neg_buckets.push_back(-std::numeric_limits::max()); + buckets.push_back(DBL_MAX); + neg_buckets.push_back(-DBL_MAX); std::reverse(neg_buckets.begin(), neg_buckets.end()); std::vector* result = new std::vector; result->insert(result->end(), neg_buckets.begin(), neg_buckets.end()); @@ -84,7 +85,7 @@ bool Histogram::DecodeFromProto(const HistogramProto& proto) { void Histogram::Clear() { min_ = bucket_limits_[bucket_limits_.size() - 1]; - max_ = -std::numeric_limits::max(); + max_ = -DBL_MAX; num_ = 0; sum_ = 0; sum_squares_ = 0; @@ -177,7 +178,7 @@ std::string Histogram::ToString() const { if (buckets_[b] <= 0.0) continue; sum += buckets_[b]; snprintf(buf, sizeof(buf), "[ %10.2g, %10.2g ) %7.0f %7.3f%% %7.3f%% ", - ((b == 0) ? -std::numeric_limits::max() : bucket_limits_[b - 1]), // left + ((b == 0) ? -DBL_MAX : bucket_limits_[b - 1]), // left bucket_limits_[b], // right buckets_[b], // count mult * buckets_[b], // percentage @@ -217,7 +218,7 @@ void Histogram::EncodeToProto(HistogramProto* proto, } if (proto->bucket_size() == 0.0) { // It's easier when we restore if we always have at least one bucket entry - proto->add_bucket_limit(std::numeric_limits::max()); + proto->add_bucket_limit(DBL_MAX); proto->add_bucket(0.0); } }