diff --git a/egs/callhome_diarization/v1/diarization/nnet3/xvector/extract_xvectors.sh b/egs/callhome_diarization/v1/diarization/nnet3/xvector/extract_xvectors.sh index d7591a6a3a8..8d579138c73 100755 --- a/egs/callhome_diarization/v1/diarization/nnet3/xvector/extract_xvectors.sh +++ b/egs/callhome_diarization/v1/diarization/nnet3/xvector/extract_xvectors.sh @@ -102,7 +102,7 @@ if [ $stage -le 0 ]; then fi utils/data/get_uniform_subsegments.py \ --max-segment-duration=$window \ - --overlap-duration=$(echo "$window-$period" | bc) \ + --overlap-duration=$(perl -e "print ($window-$period);") \ --max-remaining-duration=$min_segment \ --constant-duration=True \ $segments > $dir/subsegments diff --git a/egs/callhome_diarization/v1/run.sh b/egs/callhome_diarization/v1/run.sh index acc48bd24f9..f4652c0c0ef 100755 --- a/egs/callhome_diarization/v1/run.sh +++ b/egs/callhome_diarization/v1/run.sh @@ -188,7 +188,7 @@ if [ $stage -le 6 ]; then der=$(grep -oP 'DIARIZATION\ ERROR\ =\ \K[0-9]+([.][0-9]+)?' \ exp/tuning/${dataset}_t${threshold}) - if [ $(echo $der'<'$best_der | bc -l) -eq 1 ]; then + if [ $(perl -e "print ($der < $best_der ? 1 : 0);") -eq 1 ]; then best_der=$der best_threshold=$threshold fi diff --git a/egs/callhome_diarization/v2/run.sh b/egs/callhome_diarization/v2/run.sh index ae05dd9da1c..b79717e2348 100755 --- a/egs/callhome_diarization/v2/run.sh +++ b/egs/callhome_diarization/v2/run.sh @@ -297,7 +297,7 @@ if [ $stage -le 10 ]; then der=$(grep -oP 'DIARIZATION\ ERROR\ =\ \K[0-9]+([.][0-9]+)?' \ $nnet_dir/tuning/${dataset}_t${threshold}) - if [ $(echo $der'<'$best_der | bc -l) -eq 1 ]; then + if [ $(perl -e "print ($der < $best_der ? 1 : 0);") -eq 1 ]; then best_der=$der best_threshold=$threshold fi diff --git a/egs/dihard_2018/v1/run.sh b/egs/dihard_2018/v1/run.sh index 429a1231975..44af9f48c3f 100755 --- a/egs/dihard_2018/v1/run.sh +++ b/egs/dihard_2018/v1/run.sh @@ -186,7 +186,7 @@ if [ $stage -le 7 ]; then der=$(grep -oP 'DIARIZATION\ ERROR\ =\ \K[0-9]+([.][0-9]+)?' \ $ivec_dir/tuning/dihard_2018_dev_t${threshold}) - if [ $(echo $der'<'$best_der | bc -l) -eq 1 ]; then + if [ $(perl -e "print ($der < $best_der ? 1 : 0);") -eq 1 ]; then best_der=$der best_threshold=$threshold fi diff --git a/egs/dihard_2018/v2/run.sh b/egs/dihard_2018/v2/run.sh index 1c018dfcc55..0da1f330ea7 100755 --- a/egs/dihard_2018/v2/run.sh +++ b/egs/dihard_2018/v2/run.sh @@ -260,7 +260,7 @@ if [ $stage -le 12 ]; then der=$(grep -oP 'DIARIZATION\ ERROR\ =\ \K[0-9]+([.][0-9]+)?' \ $nnet_dir/tuning/dihard_2018_dev_t${threshold}) - if [ $(echo $der'<'$best_der | bc -l) -eq 1 ]; then + if [ $(perl -e "print ($der < $best_der ? 1 : 0);") -eq 1 ]; then best_der=$der best_threshold=$threshold fi diff --git a/egs/rm/README.txt b/egs/rm/README.txt index ed588e481c6..4fa3d7c87e8 100644 --- a/egs/rm/README.txt +++ b/egs/rm/README.txt @@ -9,7 +9,7 @@ About the Resource Management corpus: Each subdirectory of this directory contains the scripts for a sequence of experiments. -s5 is the currently recommmended setup. +s5 is the currently recommended setup. s5: This is the "new-new-style" recipe. It is now finished. All further work will be on top of this style of recipe. Note: diff --git a/egs/sre08/v1/local/score_sre08.sh b/egs/sre08/v1/local/score_sre08.sh index 92831502f45..c1584946735 100755 --- a/egs/sre08/v1/local/score_sre08.sh +++ b/egs/sre08/v1/local/score_sre08.sh @@ -35,11 +35,11 @@ tot_eer=0.0 printf '% 12s' 'EER:' for condition in $(seq 8); do eer=$(awk '{print $3}' $scores | paste - $trials | awk -v c=$condition '{n=4+c; if ($n == "Y") print $1, $4}' | compute-eer - 2>/dev/null) - tot_eer=$(echo "$tot_eer+$eer" | bc) + tot_eer=$(perl -e "print ($tot_eer+$eer);") eers[$condition]=$eer done -eers[0]=$(echo "$tot_eer/8" | bc -l) +eers[0]=$(perl -e "print ($tot_eer/8.0);") for i in $(seq 0 8); do printf '% 7.2f' ${eers[$i]} diff --git a/egs/swbd/s5c/local/score_sclite_conf.sh b/egs/swbd/s5c/local/score_sclite_conf.sh index 9a1fa5083bf..21da4520a4d 100755 --- a/egs/swbd/s5c/local/score_sclite_conf.sh +++ b/egs/swbd/s5c/local/score_sclite_conf.sh @@ -39,6 +39,12 @@ for f in $data/stm $data/glm $lang/words.txt $lang/phones/word_boundary.int \ [ ! -f $f ] && echo "$0: expecting file $f to exist" && exit 1; done +if [ -f $dir/../frame_subsampling_factor ]; then + factor=$(cat $dir/../frame_subsampling_factor) || exit 1 + frame_shift_opt="--frame-shift=0.0$factor" + echo "$0: $dir/../frame_subsampling_factor exists, using $frame_shift_opt" +fi + name=`basename $data`; # e.g. eval2000 mkdir -p $dir/scoring/log @@ -51,7 +57,7 @@ if [ $stage -le 0 ]; then ACWT=\`perl -e \"print 1.0/LMWT\;\"\` '&&' \ lattice-add-penalty --word-ins-penalty=$wip "ark:gunzip -c $dir/lat.*.gz|" ark:- \| \ lattice-align-words $lang/phones/word_boundary.int $model ark:- ark:- \| \ - lattice-to-ctm-conf --decode-mbr=$decode_mbr --acoustic-scale=\$ACWT ark:- - \| \ + lattice-to-ctm-conf $frame_shift_opt --decode-mbr=$decode_mbr --acoustic-scale=\$ACWT ark:- - \| \ utils/int2sym.pl -f 5 $lang/words.txt \| \ utils/convert_ctm.pl $data/segments $data/reco2file_and_channel \ '>' $dir/score_LMWT_${wip}/$name.ctm || exit 1; diff --git a/egs/wsj/s5/local/chain/tuning/run_tdnn_1g.sh b/egs/wsj/s5/local/chain/tuning/run_tdnn_1g.sh index 526059b7b90..8f566ccfe6d 100755 --- a/egs/wsj/s5/local/chain/tuning/run_tdnn_1g.sh +++ b/egs/wsj/s5/local/chain/tuning/run_tdnn_1g.sh @@ -160,7 +160,7 @@ if [ $stage -le 15 ]; then echo "$0: creating neural net configs using the xconfig parser"; num_targets=$(tree-info $tree_dir/tree |grep num-pdfs|awk '{print $2}') - learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python) + learning_rate_factor=$(echo "print(0.5/$xent_regularize)" | python) tdnn_opts="l2-regularize=0.01 dropout-proportion=0.0 dropout-per-dim-continuous=true" tdnnf_opts="l2-regularize=0.01 dropout-proportion=0.0 bypass-scale=0.66" linear_opts="l2-regularize=0.01 orthonormal-constraint=-1.0" diff --git a/egs/wsj/s5/steps/segmentation/internal/merge_targets.py b/egs/wsj/s5/steps/segmentation/internal/merge_targets.py index a14aef151c2..84b0c884f45 100755 --- a/egs/wsj/s5/steps/segmentation/internal/merge_targets.py +++ b/egs/wsj/s5/steps/segmentation/internal/merge_targets.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright 2017 Vimal Manohar # Apache 2.0 @@ -16,8 +16,6 @@ option. """ -from __future__ import print_function -from __future__ import division import argparse import logging import numpy as np @@ -111,7 +109,7 @@ def should_remove_frame(row, dim): # source[2] = [ 0 0 0 ] """ assert len(row) % dim == 0 - num_sources = len(row) / dim + num_sources = len(row) // dim max_idx = np.argmax(row) max_val = row[max_idx] diff --git a/egs/wsj/s5/utils/data/perturb_data_dir_volume.sh b/egs/wsj/s5/utils/data/perturb_data_dir_volume.sh index dae440b03a3..e357ba8cbfb 100755 --- a/egs/wsj/s5/utils/data/perturb_data_dir_volume.sh +++ b/egs/wsj/s5/utils/data/perturb_data_dir_volume.sh @@ -52,15 +52,15 @@ for line in sys.stdin.readlines(): parts = line.strip().split() if line.strip()[-1] == '|': if re.search('sox --vol', ' '.join(parts[-11:])): - print 'true' + print('true') sys.exit(0) elif re.search(':[0-9]+$', line.strip()) is not None: continue else: if ' '.join(parts[1:3]) == 'sox --vol': - print 'true' + print('true') sys.exit(0) -print 'false' +print('false') "` || exit 1 if $volume_perturb_done; then diff --git a/src/bin/compute-wer-bootci.cc b/src/bin/compute-wer-bootci.cc index b8b0697af75..ba2a4ce739c 100644 --- a/src/bin/compute-wer-bootci.cc +++ b/src/bin/compute-wer-bootci.cc @@ -162,7 +162,7 @@ int main(int argc, char *argv[]) { try { const char *usage = - "Compute a bootstrapping of WER to extract the 95\% confidence interval.\n" + "Compute a bootstrapping of WER to extract the 95% confidence interval.\n" "Take a reference and a transcription file, in integer or text format,\n" "and outputs overall WER statistics to standard output along with its\n" "confidence interval using the bootstrap method of Bisani and Ney.\n" @@ -234,12 +234,12 @@ int main(int argc, char *argv[]) { std::cout.precision(2); std::cerr.precision(2); std::cout << "Set1: %WER " << std::fixed << 100*mean_wer << - " 95\% Conf Interval [ " << 100*mean_wer-100*interval << + " 95% Conf Interval [ " << 100*mean_wer-100*interval << ", " << 100*mean_wer+100*interval << " ]" << '\n'; if(!hyp2_rspecifier.empty()) { std::cout << "Set2: %WER " << std::fixed << 100*mean_wer2 << - " 95\% Conf Interval [ " << 100*mean_wer2-100*interval2 << + " 95% Conf Interval [ " << 100*mean_wer2-100*interval2 << ", " << 100*mean_wer2+100*interval2 << " ]" << '\n'; std::cout << "Probability of Set2 improving Set1: " << std::fixed << diff --git a/src/cudamatrix/cu-device.cc b/src/cudamatrix/cu-device.cc index 140275d3b6e..85c2492c074 100644 --- a/src/cudamatrix/cu-device.cc +++ b/src/cudamatrix/cu-device.cc @@ -111,12 +111,14 @@ void CuDevice::Initialize() { CUBLAS_SAFE_CALL(cublasCreate(&cublas_handle_)); CUBLAS_SAFE_CALL(cublasSetStream(cublas_handle_, cudaStreamPerThread)); + #if CUDA_VERSION >= 9000 if (device_options_.use_tensor_cores) { // Enable tensor cores in CUBLAS // Note if the device does not support tensor cores this will fall back to normal math mode CUBLAS_SAFE_CALL(cublasSetMathMode(cublas_handle_, CUBLAS_TENSOR_OP_MATH)); } + #endif // Initialize the cuSPARSE library CUSPARSE_SAFE_CALL(cusparseCreate(&cusparse_handle_)); diff --git a/src/cudamatrix/cu-kernels.cu b/src/cudamatrix/cu-kernels.cu index 5a5307b9f87..17d56a05772 100644 --- a/src/cudamatrix/cu-kernels.cu +++ b/src/cudamatrix/cu-kernels.cu @@ -28,7 +28,7 @@ #include #include #include "cudamatrix/cu-kernels-ansi.h" - +#include /*********************************************************************** @@ -958,6 +958,7 @@ static void _trace_mat_mat(const Real* A, const Real* B, MatrixDim dA, Real trans[TileDim][TileDim + 1]; Real sum[CU1DBLOCK]; } smem; + // linear thread id; const int32_cuda tid = threadIdx.y * blockDim.x + threadIdx.x; const int32_cuda grid_height = gridDim.y * TileDim; @@ -1021,6 +1022,7 @@ static void _trace_mat_mat(const Real* A, const Real* B, MatrixDim dA, if (tid == 0) { value[blockIdx.y * gridDim.x + blockIdx.x] = smem.sum[0]; } + } // _trace_mat_mat_trans reduce the partial sum to @@ -1030,6 +1032,7 @@ __global__ static void _trace_mat_mat_trans(const Real* A, const Real* B, MatrixDim dA, int B_stride, Real* value) { __shared__ Real ssum[CU1DBLOCK]; + // linear thread id; const int32_cuda tid = threadIdx.y * blockDim.x + threadIdx.x; const int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x; @@ -1046,7 +1049,7 @@ static void _trace_mat_mat_trans(const Real* A, const Real* B, MatrixDim dA, } ssum[tid] = tsum; __syncthreads(); - + // Block reduce # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { @@ -2485,6 +2488,8 @@ template __global__ static void _softmax_reduce(Real*y, const Real*x, MatrixDim d, int src_stride) { __shared__ Real smem[CU1DBLOCK]; + typedef cub::BlockReduce BlockReduceT; + __shared__ typename BlockReduceT::TempStorage temp_storage; const int i = blockIdx.x; const int x_start = i * src_stride; const int y_start = i * d.stride; @@ -2496,24 +2501,9 @@ static void _softmax_reduce(Real*y, const Real*x, MatrixDim d, int src_stride) { for (int j = tid; j < d.cols; j += CU1DBLOCK) { tmax = fmax(tmax, x[x_start + j]); } - smem[tid] = tmax; - __syncthreads(); - - // reduce to 2x warpSize elements per row -# pragma unroll - for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { - if (tid < shift) { - smem[tid] = fmax(smem[tid], smem[tid + shift]); - } - __syncthreads(); - } - - // reduce to 1 element per row - if (tid < warpSize) { -# pragma unroll - for (int shift = warpSize; shift > 0; shift >>= 1) { - smem[tid] = fmax(smem[tid], smem[tid + shift]); - } + tmax = BlockReduceT(temp_storage).Reduce(tmax, cub::Max()); + if (tid == 0) { + smem[0] = tmax; } // broadcast max to all threads @@ -2526,24 +2516,9 @@ static void _softmax_reduce(Real*y, const Real*x, MatrixDim d, int src_stride) { for (int j = tid; j < d.cols; j += CU1DBLOCK) { tsum += exp(x[x_start + j] - max); } - smem[tid] = tsum; - __syncthreads(); - - // reduce to 2x warpSize elements per row -# pragma unroll - for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { - if (tid < shift) { - smem[tid] += smem[tid + shift]; - } - __syncthreads(); - } - - // reduce to 1 element per row - if (tid < warpSize) { -# pragma unroll - for (int shift = warpSize; shift > 0; shift >>= 1) { - smem[tid] += smem[tid + shift]; - } + tsum = BlockReduceT(temp_storage).Sum(tsum); + if (tid == 0) { + smem[0] = tsum; } // broadcast sum to all threads @@ -2577,6 +2552,8 @@ static void _normalize_per_row(Real *y, int y_stride, const Real *x, const int i = blockIdx.x; const int tid = threadIdx.x; const Real* x_row = x + i * x_d.stride; + typedef cub::BlockReduce BlockReduceT; + __shared__ typename BlockReduceT::TempStorage temp_storage; __shared__ Real ssum[CU1DBLOCK]; // Reduce x_j^2 to CU1DBLOCK elements per row @@ -2584,34 +2561,14 @@ static void _normalize_per_row(Real *y, int y_stride, const Real *x, for (int j = tid; j < x_d.cols; j += CU1DBLOCK) { tsum += x_row[j] * x_row[j]; } - ssum[tid] = tsum; + tsum = BlockReduceT(temp_storage).Sum(tsum); __syncthreads(); - - // Tree reduce to 2x warpSize elements per row -# pragma unroll - for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { - if (tid < shift) - ssum[tid] += ssum[tid + shift]; - __syncthreads(); - } - - // Reduce last warp to 1 element per row. - // Threads implicitly synchronized within a warp. - if (tid < warpSize) { -# pragma unroll - for (int shift = warpSize; shift > 0; shift >>= 1) { - ssum[tid] += ssum[tid + shift]; - } - } + const Real kSquaredNormFloor = 1.3552527156068805425e-20; // 2^-66 - if (tid == 0) { - ssum[0] = sqrt( - fmax(ssum[0] / (target_rms * target_rms * x_d.cols), kSquaredNormFloor)); - } + ssum[tid] = sqrt( + fmax(tsum / (target_rms * target_rms * x_d.cols), kSquaredNormFloor)); - // Broadcast floored stddev to all threads. - __syncthreads(); const Real stddev_div_target_rms = ssum[0]; const Real scale = Real(1) / stddev_div_target_rms; @@ -2626,7 +2583,6 @@ static void _normalize_per_row(Real *y, int y_stride, const Real *x, } } - template __global__ static void _diff_normalize_per_row(Real *id, int id_stride, const Real *iv, @@ -2722,6 +2678,8 @@ __global__ static void _log_softmax_reduce(Real* y, const Real* x, MatrixDim y_dim, int x_stride) { __shared__ Real smem[CU1DBLOCK]; + typedef cub::BlockReduce BlockReduceT; + __shared__ typename BlockReduceT::TempStorage temp_storage; const int i = blockIdx.x; const int x_start = i * x_stride; const int y_start = i * y_dim.stride; @@ -2733,23 +2691,9 @@ static void _log_softmax_reduce(Real* y, const Real* x, MatrixDim y_dim, for (int j = tid; j < y_dim.cols; j += CU1DBLOCK) { tmax = fmax(tmax, x[x_start + j]); } - smem[tid] = tmax; - __syncthreads(); - - // reduce to 2x warpSize elements per row -# pragma unroll - for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { - if (tid < shift) { - smem[tid] = fmax(smem[tid], smem[tid + shift]); - } - __syncthreads(); - } - - // reduce to 1 element per row - if (tid < warpSize) { - for (int shift = warpSize; shift > 0; shift >>= 1) { - smem[tid] = fmax(smem[tid], smem[tid + shift]); - } + tmax = BlockReduceT(temp_storage).Reduce(tmax, cub::Max()); + if (tid == 0) { + smem[0] = tmax; } // broadcast max to all threads @@ -2762,23 +2706,9 @@ static void _log_softmax_reduce(Real* y, const Real* x, MatrixDim y_dim, for (int j = tid; j < y_dim.cols; j += CU1DBLOCK) { tsum += exp(x[x_start + j] - max); } - smem[tid] = tsum; - __syncthreads(); - - // reduce to 2x warpSize elements per row -# pragma unroll - for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { - if (tid < shift) { - smem[tid] += smem[tid + shift]; - } - __syncthreads(); - } - - // reduce to 1 element per row - if (tid < warpSize) { - for (int shift = warpSize; shift > 0; shift >>= 1) { - smem[tid] += smem[tid + shift]; - } + tsum = BlockReduceT(temp_storage).Sum(tsum); + if (tid == 0) { + smem[0] = tsum; } // broadcast sum to all threads @@ -3024,6 +2954,9 @@ static void _diff_softmax(Real* x, const MatrixDim dim, const Real* value, const int value_stride, const Real* diff, const int diff_stride) { __shared__ Real ssum[CU1DBLOCK]; + typedef cub::BlockReduce BlockReduceT; + __shared__ typename BlockReduceT::TempStorage temp_storage; + const int tid = threadIdx.x; const int i = blockIdx.x; const int value_start = i * value_stride; @@ -3035,24 +2968,9 @@ static void _diff_softmax(Real* x, const MatrixDim dim, const Real* value, for (int j = tid; j < dim.cols; j += CU1DBLOCK) { tsum += value[value_start + j] * diff[diff_start + j]; } - ssum[tid] = tsum; - __syncthreads(); - - // Tree reduce to 2x warpSize elements. -# pragma unroll - for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { - if (tid < shift) { - ssum[tid] += ssum[tid + shift]; - } - __syncthreads(); - } - - // Warp reduce to 1 element. Threads implicitly synchronized within a warp. - if (tid < warpSize) { -# pragma unroll - for (int shift = warpSize; shift > 0; shift >>= 1) { - ssum[tid] += ssum[tid + shift]; - } + tsum = BlockReduceT(temp_storage).Sum(tsum); + if (tid == 0) { + ssum[0] = tsum; } // Broadcast result to all threads @@ -3078,6 +2996,8 @@ static void _diff_log_softmax(const MatrixDim in_deriv_dim, Real* in_deriv) { __shared__ Real ssum[CU1DBLOCK]; + typedef cub::BlockReduce BlockReduceT; + __shared__ typename BlockReduceT::TempStorage temp_storage; const int tid = threadIdx.x; const int i = blockIdx.x; const int out_value_start = i * out_value_stride; @@ -3089,24 +3009,9 @@ static void _diff_log_softmax(const MatrixDim in_deriv_dim, for (int j = tid; j < in_deriv_dim.cols; j += CU1DBLOCK) { tsum += out_deriv[out_deriv_start + j]; } - ssum[tid] = tsum; - __syncthreads(); - - // Tree reduce to 2x warpSize elements. -# pragma unroll - for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { - if (tid < shift) { - ssum[tid] += ssum[tid + shift]; - } - __syncthreads(); - } - - // Warp reduce to 1 element. Threads implicitly synchronized within a warp. - if (tid < warpSize) { -# pragma unroll - for (int shift = warpSize; shift > 0; shift >>= 1) { - ssum[tid] += ssum[tid + shift]; - } + tsum = BlockReduceT(temp_storage).Sum(tsum); + if (tid == 0) { + ssum[0] = tsum; } // Broadcast result to all threads diff --git a/src/doc/data_prep.dox b/src/doc/data_prep.dox index d8fe1746df1..e81032537cc 100644 --- a/src/doc/data_prep.dox +++ b/src/doc/data_prep.dox @@ -191,7 +191,7 @@ the speaker identities, you can just make the speaker-ids the same as the uttera so the format of the file would be just \ \. We have made the previous sentence bold because we have encountered people creating a "global" speaker-id. This is a bad idea because it makes cepstral mean normalization -ineffective in traning (since it's applied globally), and because it will create problems +ineffective in training (since it's applied globally), and because it will create problems when you use utils/split_data_dir.sh to split your data into pieces. There is another file that exists in some setups; it is used only occasionally and diff --git a/src/doc/dependencies.dox b/src/doc/dependencies.dox index 63d2658b726..d8a5591955f 100644 --- a/src/doc/dependencies.dox +++ b/src/doc/dependencies.dox @@ -113,7 +113,7 @@ - CLAPACK, the linear algebra library (we download the headers). This is useful only on systems where you don't have ATLAS and are instead compiling with CLAPACK. - - OpenBLAS: this is an alernative to ATLAS or CLAPACK. The scripts don't + - OpenBLAS: this is an alternative to ATLAS or CLAPACK. The scripts don't use it by default but we provide installation scripts so you can install it if you want to compare it against ATLAS (it's more actively maintained than ATLAS). diff --git a/src/doc/dnn.dox b/src/doc/dnn.dox index 5b3d2b98261..bab4658e552 100644 --- a/src/doc/dnn.dox +++ b/src/doc/dnn.dox @@ -37,7 +37,7 @@ namespace kaldi { We currently have three separate codebases for deep neural nets in Kaldi. All are still active in the sense that the up-to-date recipes refer to all of them. The first one ("nnet1"( is located in code subdirectories nnet/ and - nnetbin/, and is primiarly maintained by Karel Vesely. The second is located + nnetbin/, and is primarily maintained by Karel Vesely. The second is located in code subdirectories nnet2/ and nnet2bin/, and is primarily maintained by Daniel Povey (this code was originally based on an earlier version of Karel's code, but it has been extensively rewritten). The third is located diff --git a/src/doc/io.dox b/src/doc/io.dox index dc958f57a6f..8f3a3cc05b6 100644 --- a/src/doc/io.dox +++ b/src/doc/io.dox @@ -383,7 +383,7 @@ namespace kaldi { std::string rspecifier2 = "ark:-"; // archive read from stdin. // write to a gzipped text archive. std::string wspecifier1 = "ark,t:| gzip -c > /some/dir/foo.ark.gz"; - std::string wspecifier2 = "ark,scp:data/my.ark,data/my.ark"; + std::string wspecifier2 = "ark,scp:data/my.ark,data/my.scp"; \endcode Usually, an rspecifier or wspecifier consists of a comma-separated, unordered @@ -401,7 +401,7 @@ namespace kaldi { \endverbatim This will write an archive, and a script file with lines like "utt_id /somedir/foo.ark:1234" that specify offsets into the - archive for more efficient random access. You can then do what you like which + archive for more efficient random access. You can then do whatever you like with the script file, including breaking it up into segments, and it will behave like any other script file. Note that although the order of options before the colon doesn't generally matter, in this particular case the "ark" must come before diff --git a/src/doc/kaldi_for_dummies.dox b/src/doc/kaldi_for_dummies.dox index d712ab87af9..b48d6dd8dac 100644 --- a/src/doc/kaldi_for_dummies.dox +++ b/src/doc/kaldi_for_dummies.dox @@ -71,7 +71,7 @@ and installation, - \c awk – programming language, used for searching and processing patterns in files and data streams, - \c bash – Unix shell and script programming language, - - \c grep – command-line utility for searching plain-text data sets for lines + - \c grep – command-line utility for searching plain-text datasets for lines matching a regular expression, - \c make – automatically builds executable programs and libraries from source code, @@ -96,7 +96,7 @@ be nice if you read any \c README files you find. \c kaldi - main Kaldi directory which contains: - \c egs – example scripts allowing you to quickly build ASR -systems for over 30 popular speech corporas (documentation is attached for each +systems for over 30 popular speech corpora (documentation is attached for each project), - \c misc – additional tools and supplies, not needed for proper Kaldi functionality, @@ -136,34 +136,34 @@ the stuff related to your project. I assume that you want to set up an ASR system, basing on your own audio data. For example - let it be a set of 100 files. File format is WAV. Each file -contains 3 spoken digits recorded in english language, one by one. Each of +contains 3 spoken digits recorded in English language, one by one. Each of these audio files is named in a recognizable way (e.g. \c 1_5_6.wav, which in my pattern means that the spoken sentence is 'one, five, six') and placed in the recognizable folder representing particular speaker during a particular recording session (there may be a situation that you have recordings of the same person but in two different quality/noise environments - put these -in separate folders). So to sum up, my exemplary data set looks like this: +in separate folders). So to sum up, my exemplary dataset looks like this: - 10 different speakers (ASR systems must be trained and tested on different speakers, the more speakers you have the better), - each speaker says 10 sentences, - - 100 senteces/utterances (in 100 *.wav files placed in 10 folders related to + - 100 sentences/utterances (in 100 *.wav files placed in 10 folders related to particular speakers - 10 *.wav files in each folder), - 300 words (digits from zero to nine), - each sentence/utterance consist of 3 words. -Whatever your first data set is, adjust my example to your particular case. Be -careful with big data sets and complex grammars - start with something simple. +Whatever your first dataset is, adjust my example to your particular case. Be +careful with big datasets and complex grammars - start with something simple. Sentences that contain only digits are perfect in this case.

Task

Go to \c kaldi/egs/digits directory and create \c digits_audio folder. In \c kaldi/egs/digits/digits_audio create two folders: \c train and \c test. Select one speaker -of your choice to represent testing data set. Use this speaker's 'speakerID' as +of your choice to represent testing dataset. Use this speaker's 'speakerID' as a name for an another new folder in \c kaldi/egs/digits/digits_audio/test directory. Then put there all the audio files related to that person. Put the rest (9 speakers) into \c train folder - this will be your training -data set. Also create subfolders for each speaker. +dataset. Also create subfolders for each speaker. \subsection kaldi_for_dummies_acoustic Acoustic data @@ -174,14 +174,14 @@ section as well) can be considered as a text file with some number of strings (each string in a new line). These strings need to be sorted. If you will encounter any sorting issues you can use Kaldi scripts for checking (\c utils/validate_data_dir.sh) and fixing (\c utils/fix_data_dir.sh) data order. -And for you information - \c utils directory will be attached to your project in +And for your information - \c utils directory will be attached to your project in \ref kaldi_for_dummies_tools "Tools attachment" section.

Task

In \c kaldi/egs/digits directory, create a folder \c data. Then create \c test and \c train subfolders inside. Create in each subfolder following files (so you have files named in the same way in \c test and \c train subfolders -but they relate to two different data sets that you created before): +but they relate to two different datasets that you created before): a.) \c spk2gender
This file informs about speakers gender. As we assumed, 'speakerID' is a unique @@ -252,7 +252,7 @@ four four two \subsection kaldi_for_dummies_language Language data -This section relates to language modelling files that also need to be considered +This section relates to language modeling files that also need to be considered as 'must be done'. Look for the syntax details here: \ref data_prep (each file is precisely described). Also feel free to read some examples in other \c egs scripts. Now is the perfect time. @@ -395,7 +395,7 @@ decided to use two different training methods: - TRI1 - simple triphone training (first triphone pass). These two methods are enough to show noticable differences in decoding results -using only digits lexicon and small training data set. +using only digits lexicon and small training dataset.

Task

In \c kaldi/egs/digits directory create 3 scripts: @@ -432,7 +432,7 @@ c.) \c run.sh . ./path.sh || exit 1 . ./cmd.sh || exit 1 -nj=1 # number of parallel jobs - 1 is perfect for such a small data set +nj=1 # number of parallel jobs - 1 is perfect for such a small dataset lm_order=1 # language model order (n-gram quantity) - 1 is enough for digits grammar # Safety mechanism (possible running this script with modified arguments) @@ -575,7 +575,7 @@ folder (same directory). This is just an example. The point of this short tutorial is to show you how to create 'anything' in Kaldi and to get a better understanding of how to think while using this toolkit. Personally I started with looking for tutorials made -by the Kaldi authors/developers. After succesful Kaldi installation I launched +by the Kaldi authors/developers. After successful Kaldi installation I launched some example scripts (Yesno, Voxforge, LibriSpeech - they are relatively easy and have free acoustic/language data to download - I used these three as a base for my own scripts). @@ -586,7 +586,7 @@ There are two very useful sections for beginners inside:
a.) \ref tutorial - almost 'step by step' tutorial on how to set up an ASR system; up to some point this can be done without RM dataset. It is good to read it,
-b.) \ref data_prep - very detailed explaination of how to use your own data +b.) \ref data_prep - very detailed explanation of how to use your own data in Kaldi. More useful links about Kaldi I found:
diff --git a/src/doc/tutorial_looking.dox b/src/doc/tutorial_looking.dox index 420abfc9bce..831d721c7eb 100644 --- a/src/doc/tutorial_looking.dox +++ b/src/doc/tutorial_looking.dox @@ -171,7 +171,7 @@ making sure have their normal values, begin with KALDI_. This is a precaution to avoid future conflicts with other codebases (since \#defines don't limit themselves to the kaldi namespace). Notice the style of the function names: LikeThis(). Our style is generally based on - this one , + this one , to conform with OpenFst, but there are some differences. To see other elements of the style, which will help you to understand Kaldi @@ -190,7 +190,7 @@ It prints out the usage, which should give you a generic idea of how Kaldi progr are called. Note that while there is a --config option that can be used to pass a configuration file, in general Kaldi is not as config-driven as HTK and these files are not widely used. You will see a --binary option. In general, Kaldi file -formats come in both binary and test forms, and the --binary option controls how +formats come in both binary and text forms, and the --binary option controls how they are written. However, this only controls how single objects (e.g. acoustic models) are written. For whole collections of objects (e.g. collections of feature files), there is a different mechanism that we will come to later. diff --git a/src/doc/tutorial_prereqs.dox b/src/doc/tutorial_prereqs.dox index 82079a281b9..72b1fcf8ad8 100644 --- a/src/doc/tutorial_prereqs.dox +++ b/src/doc/tutorial_prereqs.dox @@ -51,7 +51,7 @@ The most difficult part of the installation process relates to the math library ATLAS; if this is not already installed as a library on your system you will have to compile it, and this requires that CPU throttling be turned off, which - may require root priveleges. We provide scripts and detailed instructions for + may require root privileges. We provide scripts and detailed instructions for all installation steps. When scripts fail, read the output carefully because it tries to provide guidance as to how to fix problems. Please inform us if there are problems at any point, however minor; see \ref other. diff --git a/src/doc/tutorial_running.dox b/src/doc/tutorial_running.dox index f977348a3cb..d639cd4e664 100644 --- a/src/doc/tutorial_running.dox +++ b/src/doc/tutorial_running.dox @@ -115,14 +115,14 @@ Now go back to the data directory and change directory to /train. Then execute t \verbatim head text -head spk2gender.map +head spk2gender head spk2utt head utt2spk head wav.scp \endverbatim - text - This file contains mappings between utterances and utterance ids which will be used by Kaldi. This file will be turned into an integer format-- still a text file, but with the words replaced with integers. -- spk2gender.map - This file contains mappings between speakers and their gender. This also acts as a list of unique users involved in training. +- spk2gender - This file contains mappings between speakers and their gender. This also acts as a list of unique users involved in training. - spk2utt - This is a mapping between the speaker identifiers and all the utterance identifiers associated with the speaker. - utt2spk - This is a one-to-one mapping between utterance ids and the corresponding speaker identifiers. - wav.scp - This file is actually read directly by Kaldi programs when doing feature extraction. Look at the file again. It is parsed as a set of key-value pairs, where the key is the first string on each line. The value is a kind of "extended filename", and you can guess how it works. Since it is for reading we will refer to this type of string as an "rxfilename" (for writing we use the term wxfilename). See \ref io_sec_xfilename if you are curious. Note that although we use the extension .scp, this is not a script file in the HTK sense (i.e. it is not viewed as an extension to the command-line arguments). @@ -383,7 +383,7 @@ do copy-tree --binary=false exp/mono/tree - | less \endverbatim Note that this is a monophone "tree" so it is very trivial-- it -does not have any "splits". Although this tree format was not indended to be +does not have any "splits". Although this tree format was not intended to be very human-readable, we have received a number of queries about the tree format so we will explain it. The rest of this paragraph can be skipped over by the casual reader. After "ToPdf", the tree file contains an object of the @@ -442,7 +442,7 @@ Type \verbatim grep Overall exp/mono/log/acc.{?,??}.{?,??}.log \endverbatim -You can see the acoustic likelihods on each iteration. Next look at one of the files +You can see the acoustic likelihoods on each iteration. Next look at one of the files exp/mono/log/update.*.log to see what kind of information is in the update log. When the monophone training is finished, we can test the monophone decoding. Before decoding, we have to create the decode graph. Type: @@ -505,7 +505,7 @@ gmm-decode-faster \endverbatim to see the usage message, and match up the arguments with what you see in the log file. Recall that "rspecifier" is one of those strings that specifies how to read a table, -and "wspecifier" specifies how to write one. Look carefuly at these arguments and try +and "wspecifier" specifies how to write one. Look carefully at these arguments and try to figure out what they mean. Look at the rspecifier that corresponds to the features, and try to understand it (this one has spaces inside, so Kaldi prints it out with single quotes around it so that you could paste it into the shell and the program would run as intended). diff --git a/src/doc/versions.dox b/src/doc/versions.dox index b26978b6e4d..08e2c2bbda7 100644 --- a/src/doc/versions.dox +++ b/src/doc/versions.dox @@ -28,7 +28,7 @@ \section versions_scheme Versioning scheme - During its lifetime, Kaldi has has three different versioning methods. + During its lifetime, Kaldi has three different versioning methods. Originally Kaldi was a subversion (svn)-based project, and was hosted on Sourceforge. Then Kaldi was moved to github, and for some time the only version-number available was the git hash of the commit. @@ -121,7 +121,7 @@ - Create a nnet3-based setup for RNN language models (i.e. recurrent and neural net based language models) - Some extentions to the core of the nnet3 framework to support constant values and - scalar multiplication without dedicated compoennts. + scalar multiplication without dedicated components. Below are commits corresponding to minor version numbers 5.3.x. diff --git a/src/fstext/determinize-lattice-inl.h b/src/fstext/determinize-lattice-inl.h index 43ad809f70e..775228bfd21 100644 --- a/src/fstext/determinize-lattice-inl.h +++ b/src/fstext/determinize-lattice-inl.h @@ -510,7 +510,7 @@ template class LatticeDeterminizer { if (!CheckMemoryUsage()) return false; } return (determinized_ = true); - } catch (std::bad_alloc) { + } catch (const std::bad_alloc &) { int32 repo_size = repository_.MemSize(), arcs_size = num_arcs_ * sizeof(TempArc), elems_size = num_elems_ * sizeof(Element), @@ -520,7 +520,7 @@ template class LatticeDeterminizer { << " (repo,arcs,elems) = (" << repo_size << "," << arcs_size << "," << elems_size << ")"; return (determinized_ = false); - } catch (std::runtime_error) { + } catch (const std::runtime_error &) { KALDI_WARN << "Caught exception doing lattice determinization"; return (determinized_ = false); } diff --git a/src/fstext/lattice-weight.h b/src/fstext/lattice-weight.h index af4826f7bed..86bec97d4e8 100644 --- a/src/fstext/lattice-weight.h +++ b/src/fstext/lattice-weight.h @@ -179,8 +179,7 @@ class LatticeWeightTpl { } else if (s == "-Infinity") { f = -numeric_limits::infinity(); } else if (s == "BadNumber") { - f = numeric_limits::infinity(); - f -= f; // get NaN + f = numeric_limits::quiet_NaN(); } else { char *p; f = strtod(s.c_str(), &p); diff --git a/src/gmm/mle-diag-gmm.h b/src/gmm/mle-diag-gmm.h index 24194ef886a..d41d36489bf 100644 --- a/src/gmm/mle-diag-gmm.h +++ b/src/gmm/mle-diag-gmm.h @@ -85,7 +85,7 @@ struct MapDiagGmmOptions { /// Tau value for the weights-- this tau value is applied /// per state, not per Gaussian. BaseFloat weight_tau; - + MapDiagGmmOptions(): mean_tau(10.0), variance_tau(50.0), weight_tau(10.0) { } @@ -150,8 +150,8 @@ class AccumDiagGmm { const MatrixBase &data, const VectorBase &frame_weights, int32 num_threads); - - + + /// Increment the stats for this component by the specified amount /// (not all parts may be taken, depending on flags). /// Note: x_stats and x2_stats are assumed to already be multiplied by "occ" @@ -162,7 +162,7 @@ class AccumDiagGmm { /// Increment with stats from this other accumulator (times scale) void Add(double scale, const AccumDiagGmm &acc); - + /// Smooths the accumulated counts by adding 'tau' extra frames. An example /// use for this is I-smoothing for MMIE. Calls SmoothWithAccum. void SmoothStats(BaseFloat tau); @@ -179,13 +179,13 @@ class AccumDiagGmm { void SmoothWithModel(BaseFloat tau, const DiagGmm &src_gmm); // Const accessors - const GmmFlagsType Flags() const { return flags_; } + GmmFlagsType Flags() const { return flags_; } const VectorBase &occupancy() const { return occupancy_; } const MatrixBase &mean_accumulator() const { return mean_accumulator_; } const MatrixBase &variance_accumulator() const { return variance_accumulator_; } // used in testing. - void AssertEqual(const AccumDiagGmm &other); + void AssertEqual(const AccumDiagGmm &other); private: int32 dim_; int32 num_comp_; diff --git a/src/gmm/mle-full-gmm.h b/src/gmm/mle-full-gmm.h index 6e770764e1e..618714b0e9b 100644 --- a/src/gmm/mle-full-gmm.h +++ b/src/gmm/mle-full-gmm.h @@ -1,7 +1,7 @@ // gmm/mle-full-gmm.h // Copyright 2009-2011 Jan Silovsky; Saarland University; -// Microsoft Corporation; +// Microsoft Corporation; // Univ. Erlangen Nuremberg, Korbinian Riedhammer // See ../../COPYING for clarification regarding multiple authors @@ -91,7 +91,7 @@ class AccumFullGmm { void Resize(int32 num_components, int32 dim, GmmFlagsType flags); /// Calls Resize with arguments based on gmm_ptr_ void Resize(const FullGmm &gmm, GmmFlagsType flags); - + void ResizeVarAccumulator(int32 num_comp, int32 dim); /// Returns the number of mixture components int32 NumGauss() const { return num_comp_; } @@ -122,8 +122,8 @@ class AccumFullGmm { const VectorBase &data, BaseFloat frame_posterior); - /// Accessors - const GmmFlagsType Flags() const { return flags_; } + /// Accessors + GmmFlagsType Flags() const { return flags_; } const Vector &occupancy() const { return occupancy_; } const Matrix &mean_accumulator() const { return mean_accumulator_; } const std::vector > &covariance_accumulator() const { return covariance_accumulator_; } diff --git a/src/makefiles/cuda_64bit.mk b/src/makefiles/cuda_64bit.mk index d66ae03602f..eb8cf743ab3 100644 --- a/src/makefiles/cuda_64bit.mk +++ b/src/makefiles/cuda_64bit.mk @@ -5,7 +5,7 @@ ifndef CUDATKDIR $(error CUDATKDIR not defined.) endif -CXXFLAGS += -DHAVE_CUDA -I$(CUDATKDIR)/include -fPIC -pthread -isystem $(OPENFSTINC) -rdynamic +CXXFLAGS += -DHAVE_CUDA -I$(CUDATKDIR)/include -fPIC -pthread -isystem $(OPENFSTINC) CUDA_INCLUDE= -I$(CUDATKDIR)/include -I$(CUBROOT) CUDA_FLAGS = --machine 64 -DHAVE_CUDA \ @@ -14,4 +14,4 @@ CUDA_FLAGS = --machine 64 -DHAVE_CUDA \ --verbose -Xcompiler "$(CXXFLAGS)" CUDA_LDFLAGS += -L$(CUDATKDIR)/lib64 -Wl,-rpath,$(CUDATKDIR)/lib64 -CUDA_LDLIBS += -lcublas -lcusparse -lcudart -lcurand -lnvToolsExt #LDLIBS : The libs are loaded later than static libs in implicit rule +CUDA_LDLIBS += -lcublas -lcusparse -lcudart -lcurand -lnvToolsExt #LDLIBS : The .so libs are loaded later than static libs in implicit rule diff --git a/src/makefiles/default_rules.mk b/src/makefiles/default_rules.mk index 25dafae2f3a..fcce90f5c21 100644 --- a/src/makefiles/default_rules.mk +++ b/src/makefiles/default_rules.mk @@ -125,7 +125,7 @@ valgrind: .valgrind #buid up dependency commands CC_SRCS=$(wildcard *.cc) #check if files exist to run dependency commands on -ifneq ($(CC_SRCS),) +ifneq ($(CC_SRCS),) CC_DEP_COMMAND=$(CXX) -M $(CXXFLAGS) $(CC_SRCS) endif diff --git a/src/makefiles/linux_x86_64_mkl.mk b/src/makefiles/linux_x86_64_mkl.mk index 7a70fa51a65..d1c399d9796 100644 --- a/src/makefiles/linux_x86_64_mkl.mk +++ b/src/makefiles/linux_x86_64_mkl.mk @@ -22,7 +22,7 @@ ifndef MKLROOT $(error MKLROOT not defined.) endif -MKLLIB ?= $(MKLROOT)/lib/em64t +MKLLIB ?= $(MKLROOT)/lib/intel64 CXXFLAGS = -std=c++11 -I.. -isystem $(OPENFSTINC) -O1 $(EXTRA_CXXFLAGS) \ -Wall -Wno-sign-compare -Wno-unused-local-typedefs \ diff --git a/src/nnet3/nnet-analyze.cc b/src/nnet3/nnet-analyze.cc index 584a7c19ab8..a3696403eba 100644 --- a/src/nnet3/nnet-analyze.cc +++ b/src/nnet3/nnet-analyze.cc @@ -880,7 +880,7 @@ void ComputationChecker::CheckComputationIndexes() const { KALDI_ERR << "Backprop input needed but not supplied."; if ((properties & kBackpropNeedsOutput) && c.arg4 == 0) KALDI_ERR << "Backprop output needed but not supplied."; - if (c.arg6 == 0 && !(properties && kUpdatableComponent)) { + if (c.arg6 == 0 && !(properties & kUpdatableComponent)) { // note: we could perhaps make this just a warning, // or optimize it away somehow. KALDI_ERR << "Backprop is done but has no effect."; diff --git a/src/nnet3/nnet-chain-training.cc b/src/nnet3/nnet-chain-training.cc index a798cb597f5..cccb1110d3c 100644 --- a/src/nnet3/nnet-chain-training.cc +++ b/src/nnet3/nnet-chain-training.cc @@ -298,7 +298,7 @@ void NnetChainTrainer::PrintMaxChangeStats() const { (num_minibatches_processed_ * (nnet_config.backstitch_training_scale == 0.0 ? 1.0 : 1.0 + 1.0 / nnet_config.backstitch_training_interval)) - << " \% of the time."; + << " % of the time."; i++; } } @@ -308,7 +308,7 @@ void NnetChainTrainer::PrintMaxChangeStats() const { (num_minibatches_processed_ * (nnet_config.backstitch_training_scale == 0.0 ? 1.0 : 1.0 + 1.0 / nnet_config.backstitch_training_interval)) - << " \% of the time."; + << " % of the time."; } NnetChainTrainer::~NnetChainTrainer() { diff --git a/src/nnet3/nnet-simple-component.cc b/src/nnet3/nnet-simple-component.cc index e8c99494b06..32f49745c0c 100644 --- a/src/nnet3/nnet-simple-component.cc +++ b/src/nnet3/nnet-simple-component.cc @@ -4068,13 +4068,13 @@ bool CompositeComponent::IsUpdatable() const { int32 CompositeComponent::InputDim() const { KALDI_ASSERT(!components_.empty()); return components_.front()->InputDim(); -}; +} // virtual int32 CompositeComponent::OutputDim() const { KALDI_ASSERT(!components_.empty()); return components_.back()->OutputDim(); -}; +} // virtual int32 CompositeComponent::Properties() const { @@ -4096,7 +4096,7 @@ int32 CompositeComponent::Properties() const { if (last_component_properties & kStoresStats) ans |= kBackpropNeedsOutput; return ans; -}; +} MatrixStrideType CompositeComponent::GetStrideType(int32 i) const { @@ -4319,7 +4319,7 @@ void CompositeComponent::Backprop(const std::string &debug_info, // optimization; other propagates might also be skippable. int32 properties = components_[num_components - 2]->Properties(), next_properties = components_[num_components - 1]->Properties(); - if (!(properties & (kBackpropNeedsOutput || kUsesMemo)) && + if (!(properties & (kBackpropNeedsOutput | kUsesMemo)) && !(next_properties & kBackpropNeedsInput)) { num_components_to_propagate--; } diff --git a/src/nnet3/nnet-training.cc b/src/nnet3/nnet-training.cc index 0acaa5c2008..820644470c7 100644 --- a/src/nnet3/nnet-training.cc +++ b/src/nnet3/nnet-training.cc @@ -257,7 +257,7 @@ void NnetTrainer::PrintMaxChangeStats() const { (num_minibatches_processed_ * (config_.backstitch_training_scale == 0.0 ? 1.0 : 1.0 + 1.0 / config_.backstitch_training_interval)) - << " \% of the time."; + << " % of the time."; i++; } } @@ -267,7 +267,7 @@ void NnetTrainer::PrintMaxChangeStats() const { (num_minibatches_processed_ * (config_.backstitch_training_scale == 0.0 ? 1.0 : 1.0 + 1.0 / config_.backstitch_training_interval)) - << " \% of the time."; + << " % of the time."; } void ObjectiveFunctionInfo::UpdateStats( diff --git a/src/probe/README.slow_expf b/src/probe/README.slow_expf index 00c9ce5be09..c20386b8137 100644 --- a/src/probe/README.slow_expf +++ b/src/probe/README.slow_expf @@ -1,5 +1,6 @@ -On some machines, expf() turns out to be very slow: much slower than its double precision counterpart exp(). -Probably this is concerned with the version of glibc. +On some machines, expf() turns out to be very slow: much slower than its double +precision counterpart exp(). Probably this is concerned with the version of +glibc. Here are a couple of examples: @@ -21,5 +22,7 @@ configuration$ ./exp-test exp() time: 0.0028439 expf() time: 0.00713329 -If slow behaviour is detected, then KALDI_NO_EXPF macro will be used, and the Exp() wrapper in base/kaldi-math.h will use exp() even for single precision floats. -The behaviour of expf() is considered to be slow if it is slower than exp() by at least 10%. \ No newline at end of file +If slow behaviour is detected, then KALDI_NO_EXPF macro will be used, and the +Exp() wrapper in base/kaldi-math.h will use exp() even for single precision +floats. The behaviour of expf() is considered to be slow if it is slower than +exp() by at least 10%. diff --git a/src/probe/exp-test.cc b/src/probe/exp-test.cc index 1fd8a64c6a6..d6cc76d4ce2 100644 --- a/src/probe/exp-test.cc +++ b/src/probe/exp-test.cc @@ -17,35 +17,52 @@ // See the Apache 2 License for the specific language governing permissions and // limitations under the License. +// Read Makefile.slow_expf. This test must be compiled with -O0. + #include #include #include "base/timer.h" -#define SAMPLE 100000 +int main() { + int test_iter = 300000; + + // Make sure that the CPU bumps its clock to full speed: run the first loop + // without timing. Then increase the sample iteration count exponentially + // until the loop takes at least 10ms. We run this loop 1/4 of the number of + // actual test iterations and call both exp() and expf(), so that the overall + // test run will take 20 to 60 ms, to ensure a sensibly measurable result. + for (bool first = true; ; first=false) { + kaldi::Timer timer; + for(int i = 0; i < test_iter; i += 4) { + (void)exp((double)(i & 0x0F)); + (void)expf((double)(i & 0x0F)); + } + double time = timer.Elapsed(); + if (first) continue; + if (time > 0.01) break; + test_iter *= 3; + } -int main() { - float dummy = 0.0; kaldi::Timer exp_timer; - for(int i = 0; i < SAMPLE; ++i) { - dummy += exp((double)(i % 10)); + for(int i = 0; i < test_iter; ++i) { + (void)exp((double)(i & 0x0F)); } double exp_time = exp_timer.Elapsed(); kaldi::Timer expf_timer; - for(int i = 0; i < SAMPLE; ++i) { - dummy += expf((double)(i % 10)); + for(int i = 0; i < test_iter; ++i) { + (void)expf((double)(i & 0x0F)); } double expf_time = expf_timer.Elapsed(); - - // Often exp() and expf() perform very similarly, - // so we will replace expf() by exp() only if there is at least 10% difference - if (expf_time < exp_time * 1.1) { + + double ratio = expf_time / exp_time; + if (ratio < 1.1) { + // Often exp() and expf() perform very similarly, so we will replace expf() + // by exp() only if there is at least 10% difference. return 0; - } else { - std::cerr << "exp() time: " << exp_time << std::endl; - std::cerr << "expf() time: " << expf_time << std::endl; - return 1; } - - std::cerr << dummy << std::endl; // No complaint about the unused variable + + std::cerr << ("WARNING: slow expf() detected. expf() is slower than exp() " + "by the factor of ") << ratio << "\n"; + return 1; } diff --git a/src/rnnlm/rnnlm-core-training.cc b/src/rnnlm/rnnlm-core-training.cc index 5a1ae97895f..d1a01f7ef66 100644 --- a/src/rnnlm/rnnlm-core-training.cc +++ b/src/rnnlm/rnnlm-core-training.cc @@ -302,7 +302,7 @@ void RnnlmCoreTrainer::PrintMaxChangeStats() const { << ", per-component max-change was enforced " << ((100.0 * num_max_change_per_component_applied_[i]) / num_minibatches_processed_) - << "\% of the time."; + << "% of the time."; i++; } } @@ -312,7 +312,7 @@ void RnnlmCoreTrainer::PrintMaxChangeStats() const { (num_minibatches_processed_ * (config_.backstitch_training_scale == 0.0 ? 1.0 : 1.0 + 1.0 / config_.backstitch_training_interval)) - << "\% of the time."; + << "% of the time."; } void RnnlmCoreTrainer::ProcessOutput( diff --git a/src/rnnlm/rnnlm-embedding-training.cc b/src/rnnlm/rnnlm-embedding-training.cc index c4238c7356a..0b5916b6bba 100644 --- a/src/rnnlm/rnnlm-embedding-training.cc +++ b/src/rnnlm/rnnlm-embedding-training.cc @@ -117,9 +117,9 @@ void RnnlmEmbeddingTrainer::TrainBackstitch( bool is_backstitch_step1, CuMatrixBase *embedding_deriv) { - // backstitch training is incompatible with momentum > 0 + // backstitch training is incompatible with momentum > 0 KALDI_ASSERT(config_.momentum == 0.0); - + // If relevant, do the following: // "embedding_deriv += - 2 * l2_regularize * embedding_mat_" // This is an approximate to the regular l2 regularization (add l2 regularization @@ -130,7 +130,7 @@ void RnnlmEmbeddingTrainer::TrainBackstitch( embedding_deriv->AddMat(1.0 / (1.0 + config_.backstitch_training_scale) * l2_term, *embedding_mat_); } - } + } BaseFloat scale = 1.0; if (config_.use_natural_gradient) { @@ -213,7 +213,7 @@ void RnnlmEmbeddingTrainer::Train( } void RnnlmEmbeddingTrainer::TrainBackstitch( - bool is_backstitch_step1, + bool is_backstitch_step1, const CuArrayBase &active_words, CuMatrixBase *embedding_deriv) { @@ -232,7 +232,7 @@ void RnnlmEmbeddingTrainer::TrainBackstitch( embedding_deriv->AddRows(l2_term / (1.0 + config_.backstitch_training_scale), *embedding_mat_, active_words); } - } + } BaseFloat scale = 1.0; if (config_.use_natural_gradient) { if (is_backstitch_step1) preconditioner_.Freeze(true); @@ -273,7 +273,7 @@ void RnnlmEmbeddingTrainer::PrintStats() { (num_minibatches_ * (config_.backstitch_training_scale == 0.0 ? 1.0 : 1.0 + 1.0 / config_.backstitch_training_interval)) - << " \% of the time."; + << " % of the time."; Matrix delta_embedding_mat(*embedding_mat_); delta_embedding_mat.AddMat(-1.0, initial_embedding_mat_); diff --git a/src/tree/build-tree-questions.h b/src/tree/build-tree-questions.h index a6bcfdd500b..22f12d62912 100644 --- a/src/tree/build-tree-questions.h +++ b/src/tree/build-tree-questions.h @@ -52,7 +52,7 @@ struct QuestionsForKey { // Configuration class associated with a particular ke std::vector > initial_questions; RefineClustersOptions refine_opts; // if refine_opts.max_iter == 0, // we just pick from the initial questions. - + QuestionsForKey(int32 num_iters = 5): refine_opts(num_iters, 2) { // refine_cfg with 5 iters and top-n = 2 (this is no restriction because // RefineClusters called with 2 clusters; would get set to that anyway as @@ -102,7 +102,9 @@ class Questions { // careful, this is a class. KALDI_ASSERT(keys_out != NULL); CopyMapKeysToVector(key_idx_, keys_out); } - const bool HasQuestionsForKey(EventKeyType key) const { return (key_idx_.count(key) != 0); } + bool HasQuestionsForKey(EventKeyType key) const { + return (key_idx_.count(key) != 0); + } ~Questions() { kaldi::DeletePointers(&key_options_); } diff --git a/src/util/kaldi-pipebuf.h b/src/util/kaldi-pipebuf.h index 9b83cdccc3d..61034ac2757 100644 --- a/src/util/kaldi-pipebuf.h +++ b/src/util/kaldi-pipebuf.h @@ -82,7 +82,6 @@ class basic_pipebuf : public std::basic_filebuf { }; // class basic_pipebuf #endif // _MSC_VER -}; // namespace kaldi +} // namespace kaldi #endif // KALDI_UTIL_KALDI_PIPEBUF_H_ -