5Lg{VddlZddlZGddeZdZdZedk(r egdZyy)Nc8eZdZdZdZd dZdZdZdZdZ y) NetworkcDt||_||_|ddDcgc]"}tjj |d$c}|_t|dd|ddDcgc]%\}}tjj ||'c}}|_ycc}wcc}}w)N) len num_layerssizesnprandomrandnbiaseszipweights)selfr yxs `/Users/aayushbajaj/Documents/new-site/static/code/10khrs-ai-ml-dl/learning/nielsen-dl/network.py__init__zNetwork.__init__s%jDODJ38)<)Q299??1a()t{{G;TU;T%!R1c#j/)2--;TUDK77DCWUs#&E&EE:E'E%'E+c|jDcgc]!}tj|j#}}|jDcgc]!}tj|j#}}|}|g}g} t |j|jD]K\}}tj |||z} | j| t| }|j|M|j|d|t| dz} | |d<tj | |dj|d<td|jD]} | | } t| } tj |j| dzj| | z} | || <tj | || dz j|| <||fScc}wcc}w)zReturn a tuple ''(nabla_b, nabla_w)'' representing the gradient for the cost function C_x. ''nabla_b'' and ''nabla_w'' are layer-by-layer lists of numpy arrays, similar to ''self.biases'' and ''self.weights''.rr)rr r2r3rrrappendrcost_derivative sigmoid_prime transposerr )rrrrr5rr6 activation activationszszdeltalsps rr4zNetwork.backprop1s*.++6+Qrxx +G6*.,,7,Qrxx ,G7J#K BDKK.1 &&J ! #aiil1:j$ /  R! 4}RV7L LEGBK&& B 9 9 ;K|]\}}t||k(ywN)int).0rrs r z#Network.evaluate..Os6v1s16{s)r argmaxrsum)rr)rr test_resultss rr$zNetwork.evaluateLsJFOPiFQRYYt//23Q7iLP 66 66Qs0A c ||z S)z^Return the vector of partial derivatives 'partial C_x / partial a' for the output activations.)routput_activationsrs rrAzNetwork.cost_derivativeQs  "#rrM) __name__ __module__ __qualname__rrr0r!r4r$rArUrrrrs'R  , V67 $rrc:ddtj| zz S)Ng?)r exprGs rrrUs c"&&!*n rc6t|dt|z zS)Nr)rr\s rrBrBXs Qwqz\ ""r__main__)r?r) numpyr r objectrrrBrWnetrUrrrcs= O$fO$b# z#r