python 2.7 - Websocket Error Running Pygame -
i'm running siera on macbook pro. i've got code below in cell in jupyter notebook. i'm trying run code see part of project udacity machine learning engineer class. when hit shift enter run code pygame window pops up, , hangs , spins. message below in terminal. see issue might be? pygame has been little tricky going.
terminal message: uncaught exception /api/kernels/6f220a14-725d-4484-8770-93a3a7f7d95d/channels?session_id=07775c8bbf074e4d8717d4135fcafded (::1) httpserverrequest(protocol='http', host='localhost:8888', method='get', uri='/api/kernels/6f220a14-725d-4484-8770-93a3a7f7d95d/channels?session_id=07775c8bbf074e4d8717d4135fcafded', version='http/1.1', remote_ip='::1', headers={'origin': 'http://localhost:8888', 'upgrade': 'websocket', 'sec-websocket-extensions': 'x-webkit-deflate-frame', 'sec-websocket-version': '13', 'connection': 'upgrade', 'sec-websocket-key': 'rrothfnf2xt1xznyhkevrg==', 'user-agent': 'mozilla/5.0 (macintosh; intel mac os x 10_12_6) applewebkit/603.3.8 (khtml, gecko) version/10.1.2 safari/603.3.8', 'host': 'localhost:8888', 'cookie': 'username-localhost-8888="2|1:0|10:1503172058|23:username-localhost-8888|44:yzvmzdrhowjhmjc4ndlimmjhnzg3mjyxmtfkoti3m2m=|57cc29addda75eecbfbcc042014d8e21438ee1e1bd03287e905edf0ff195be0d"; _xsrf=2|c4cb02f3|6fa896295a4e403506de1a7168fe3f8a|1501449753', 'pragma': 'no-cache', 'cache-control': 'no-cache'}) traceback (most recent call last): file "/users/myname/anaconda/lib/python2.7/site-packages/tornado/web.py", line 1425, in _stack_context_handle_exception raise_exc_info((type, value, traceback)) file "/users/myname/anaconda/lib/python2.7/site-packages/tornado/stack_context.py", line 314, in wrapped ret = fn(*args, **kwargs) file "/users/myname/anaconda/lib/python2.7/site-packages/zmq/eventloop/zmqstream.py", line 191, in <lambda> self.on_recv(lambda msg: callback(self, msg), copy=copy) file "/users/myname/anaconda/lib/python2.7/site-packages/notebook/services/kernels/handlers.py", line 373, in _on_zmq_reply super(zmqchannelshandler, self)._on_zmq_reply(stream, msg) file "/users/myname/anaconda/lib/python2.7/site-packages/notebook/base/zmqhandlers.py", line 258, in _on_zmq_reply self.write_message(msg, binary=isinstance(msg, bytes)) file "/users/myname/anaconda/lib/python2.7/site-packages/tornado/websocket.py", line 210, in write_message raise websocketclosederror() websocketclosederror code: import random import math environment import agent, environment planner import routeplanner simulator import simulator class learningagent(agent): """ agent learns drive in smartcab world. object modifying. """ def __init__(self, env, learning=false, epsilon=1.0, alpha=0.5): super(learningagent, self).__init__(env) # set agent in evironment self.planner = routeplanner(self.env, self) # create route planner self.valid_actions = self.env.valid_actions # set of valid actions # set parameters of learning agent self.learning = learning # whether agent expected learn self.q = dict() # create q-table dictionary of tuples self.epsilon = epsilon # random exploration factor self.alpha = alpha # learning factor ########### ## ## ########### # set additional class parameters needed def reset(self, destination=none, testing=false): """ reset function called @ beginning of each trial. 'testing' set true if testing trials being used once training trials have completed. """ # select destination new location route self.planner.route_to(destination) ########### ## ## ########### # update epsilon using decay function of choice # update additional class parameters needed # if 'testing' true, set epsilon , alpha 0 return none def build_state(self): """ build_state function called when agent requests data environment. next waypoint, intersection inputs, , deadline features available agent. """ # collect data environment waypoint = self.planner.next_waypoint() # next waypoint inputs = self.env.sense(self) # visual input - intersection light , traffic deadline = self.env.get_deadline(self) # remaining deadline ########### ## ## ########### # set 'state' tuple of relevant data agent state = none return state def get_maxq(self, state): """ get_max_q function called when agent asked find maximum q-value of actions based on 'state' smartcab in. """ ########### ## ## ########### # calculate maximum q-value of actions given state maxq = none return maxq def createq(self, state): """ createq function called when state generated agent. """ ########### ## ## ########### # when learning, check if 'state' not in q-table # if not, create new dictionary state # then, each action available, set initial q-value 0.0 return def choose_action(self, state): """ choose_action function called when agent asked choose action take, based on 'state' smartcab in. """ # set agent state , default action self.state = state self.next_waypoint = self.planner.next_waypoint() action = none ########### ## ## ########### # when not learning, choose random action # when learning, choose random action 'epsilon' probability # otherwise, choose action highest q-value current state return action def learn(self, state, action, reward): """ learn function called after agent completes action , receives award. function not consider future rewards when conducting learning. """ ########### ## ## ########### # when learning, implement value iteration update rule # use learning rate 'alpha' (do not use discount factor 'gamma') return def update(self): """ update function called when time step completed in environment given trial. function build agent state, choose action, receive reward, , learn if enabled. """ state = self.build_state() # current state self.createq(state) # create 'state' in q-table action = self.choose_action(state) # choose action reward = self.env.act(self, action) # receive reward self.learn(state, action, reward) # q-learn return def run(): """ driving function running simulation. press esc close simulation, or [space] pause simulation. """ ############## # create environment # flags: # verbose - set true display additional output simulation # num_dummies - discrete number of dummy agents in environment, default 100 # grid_size - discrete number of intersections (columns, rows), default (8, 6) env = environment() ############## # create driving agent # flags: # learning - set true force driving agent use q-learning # * epsilon - continuous value exploration factor, default 1 # * alpha - continuous value learning rate, default 0.5 agent = env.create_agent(learningagent) ############## # follow driving agent # flags: # enforce_deadline - set true enforce deadline metric env.set_primary_agent(agent) ############## # create simulation # flags: # update_delay - continuous time (in seconds) between actions, default 2.0 seconds # display - set false disable gui if pygame enabled # log_metrics - set true log trial , simulation results /logs # optimized - set true change default log file name sim = simulator(env) ############## # run simulator # flags: # tolerance - epsilon tolerance before beginning testing, default 0.05 # n_test - discrete number of testing trials perform, default 0 sim.run() if __name__ == '__main__': run()
Comments
Post a Comment