tensorflow 1.14, ValueError: No variables to optimize

90 views Asked by At

As the title mentioned, I got this error when running my code. But I printed the trainable variables and found that there is no problem with the variables. The version I am using is tensorflow._api.v2.compat.v1.Because i need to use network from a pbfile, there is two graph in here.so i use with graph.as_default(). here is the part of code for train function:def train():

objects = ["1","2","3","4","5","6","7","8","9","10","11","12","13","14","15"]#
croppings = yaml.safe_load(open('config/croppings.yaml', 'rb'))  
dataset_name = 'linemod'
net = "models\\refiner_linemod_obj_02.pb"
tf.reset_default_graph()
# rot_gt = tf.placeholder(tf.float32, [None], "rot_gt")
# trans_gt = tf.placeholder(tf.float32, [None], "trans_gt")
graph = load_frozen_graph(net)

#graph.as_default()
#graph.as_default()


#print(graph.get_operations())

learning_rate = 1e-4
global_step = training_util.create_global_step()
loss = None
with graph.as_default():
    rot_gt = tf.placeholder(tf.float32, [None,4], "rot_gt")
    trans_gt = tf.placeholder(tf.float32, [None,3], "trans_gt")
    p1_x = graph.get_tensor_by_name('refined_rotation:0')
    p1_q = graph.get_tensor_by_name('refined_translation:0')

    # print(rot_gt.graph())
   

    l1_x = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(p1_x, rot_gt)))) * 0.3
    l1_q = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(p1_q, trans_gt)))) * 150
    loss = l1_x + l1_q
    print(tf.trainable_variables())
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, epsilon=0.1, name="myNewAdam")  
# loss = add_pose_loss(graph)
# print("keyi")
optimizer.minimize(loss, global_step=global_step)
cam_info = load_yaml(os.path.join(sixd_base, 'camera.yml'))

And the error message is as follows:

non-resource variables are not supported in the long term
2022-04-21 09:23:15.476783: I tensorflow/core/platform/cpu_feature_guard.cc:151] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations:  AVX AVX2
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2022-04-21 09:23:15.478540: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'nvcuda.dll'; dlerror: nvcuda.dll not found
2022-04-21 09:23:15.478910: W tensorflow/stream_executor/cuda/cuda_driver.cc:269] failed call to cuInit: UNKNOWN ERROR (303)
2022-04-21 09:23:15.482861: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:169] retrieving CUDA diagnostic information for host: DESKTOP-TNEFF0A
2022-04-21 09:23:15.483167: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:176] hostname: DESKTOP-TNEFF0A
Traceback (most recent call last):
  File "C:\Users\YuNing Ye\PycharmProjects\6D pose refinement\train_with_bp.py", line 266, in <module>
    train()
  File "C:\Users\YuNing Ye\PycharmProjects\6D pose refinement\train_with_bp.py", line 161, in train
    optimizer.minimize(loss, global_step=global_step)
  File "D:\SoftWare\Anaconda3\envs\show_architecture\lib\site-packages\tensorflow\python\training\optimizer.py", line 477, in minimize
    grads_and_vars = self.compute_gradients(
  File "D:\SoftWare\Anaconda3\envs\show_architecture\lib\site-packages\tensorflow\python\training\optimizer.py", line 601, in compute_gradients
    raise ValueError("No variables to optimize.")
ValueError: No variables to optimize.

Process finished with exit code 1
0

There are 0 answers